The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/kern/vfs_subr.c

Version: -  FREEBSD  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-2  -  FREEBSD-11-1  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-4  -  FREEBSD-10-3  -  FREEBSD-10-2  -  FREEBSD-10-1  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-3  -  FREEBSD-9-2  -  FREEBSD-9-1  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-4  -  FREEBSD-8-3  -  FREEBSD-8-2  -  FREEBSD-8-1  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-4  -  FREEBSD-7-3  -  FREEBSD-7-2  -  FREEBSD-7-1  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-4  -  FREEBSD-6-3  -  FREEBSD-6-2  -  FREEBSD-6-1  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-5  -  FREEBSD-5-4  -  FREEBSD-5-3  -  FREEBSD-5-2  -  FREEBSD-5-1  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  linux-2.6  -  linux-2.4.22  -  MK83  -  MK84  -  PLAN9  -  DFBSD  -  NETBSD  -  NETBSD5  -  NETBSD4  -  NETBSD3  -  NETBSD20  -  OPENBSD  -  xnu-517  -  xnu-792  -  xnu-792.6.70  -  xnu-1228  -  xnu-1456.1.26  -  xnu-1699.24.8  -  xnu-2050.18.24  -  OPENSOLARIS  -  minix-3-1-1 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 1989, 1993
    3  *      The Regents of the University of California.  All rights reserved.
    4  * (c) UNIX System Laboratories, Inc.
    5  * All or some portions of this file are derived from material licensed
    6  * to the University of California by American Telephone and Telegraph
    7  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
    8  * the permission of UNIX System Laboratories, Inc.
    9  *
   10  * Redistribution and use in source and binary forms, with or without
   11  * modification, are permitted provided that the following conditions
   12  * are met:
   13  * 1. Redistributions of source code must retain the above copyright
   14  *    notice, this list of conditions and the following disclaimer.
   15  * 2. Redistributions in binary form must reproduce the above copyright
   16  *    notice, this list of conditions and the following disclaimer in the
   17  *    documentation and/or other materials provided with the distribution.
   18  * 4. Neither the name of the University nor the names of its contributors
   19  *    may be used to endorse or promote products derived from this software
   20  *    without specific prior written permission.
   21  *
   22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
   23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   32  * SUCH DAMAGE.
   33  *
   34  *      @(#)vfs_subr.c  8.31 (Berkeley) 5/26/95
   35  */
   36 
   37 /*
   38  * External virtual filesystem routines
   39  */
   40 
   41 #include <sys/cdefs.h>
   42 __FBSDID("$FreeBSD: releng/6.1/sys/kern/vfs_subr.c 158286 2006-05-04 07:42:52Z scottl $");
   43 
   44 #include "opt_ddb.h"
   45 #include "opt_mac.h"
   46 
   47 #include <sys/param.h>
   48 #include <sys/systm.h>
   49 #include <sys/bio.h>
   50 #include <sys/buf.h>
   51 #include <sys/conf.h>
   52 #include <sys/dirent.h>
   53 #include <sys/event.h>
   54 #include <sys/eventhandler.h>
   55 #include <sys/extattr.h>
   56 #include <sys/file.h>
   57 #include <sys/fcntl.h>
   58 #include <sys/kdb.h>
   59 #include <sys/kernel.h>
   60 #include <sys/kthread.h>
   61 #include <sys/mac.h>
   62 #include <sys/malloc.h>
   63 #include <sys/mount.h>
   64 #include <sys/namei.h>
   65 #include <sys/reboot.h>
   66 #include <sys/sleepqueue.h>
   67 #include <sys/stat.h>
   68 #include <sys/sysctl.h>
   69 #include <sys/syslog.h>
   70 #include <sys/vmmeter.h>
   71 #include <sys/vnode.h>
   72 
   73 #include <machine/stdarg.h>
   74 
   75 #include <vm/vm.h>
   76 #include <vm/vm_object.h>
   77 #include <vm/vm_extern.h>
   78 #include <vm/pmap.h>
   79 #include <vm/vm_map.h>
   80 #include <vm/vm_page.h>
   81 #include <vm/vm_kern.h>
   82 #include <vm/uma.h>
   83 
   84 static MALLOC_DEFINE(M_NETADDR, "Export Host", "Export host address structure");
   85 
   86 static void     delmntque(struct vnode *vp);
   87 static void     insmntque(struct vnode *vp, struct mount *mp);
   88 static int      flushbuflist(struct bufv *bufv, int flags, struct bufobj *bo,
   89                     int slpflag, int slptimeo);
   90 static void     syncer_shutdown(void *arg, int howto);
   91 static int      vtryrecycle(struct vnode *vp);
   92 static void     vbusy(struct vnode *vp);
   93 static void     vdropl(struct vnode *vp);
   94 static void     vinactive(struct vnode *, struct thread *);
   95 static void     v_incr_usecount(struct vnode *);
   96 static void     v_decr_usecount(struct vnode *);
   97 static void     v_decr_useonly(struct vnode *);
   98 static void     v_upgrade_usecount(struct vnode *);
   99 static void     vfree(struct vnode *);
  100 static void     vnlru_free(int);
  101 static void     vdestroy(struct vnode *);
  102 static void     vgonel(struct vnode *);
  103 static void     vfs_knllock(void *arg);
  104 static void     vfs_knlunlock(void *arg);
  105 static int      vfs_knllocked(void *arg);
  106 
  107 
  108 /*
  109  * Enable Giant pushdown based on whether or not the vm is mpsafe in this
  110  * build.  Without mpsafevm the buffer cache can not run Giant free.
  111  */
  112 #if defined(__alpha__) || defined(__amd64__) || defined(__i386__) || \
  113         defined(__sparc64__)
  114 int mpsafe_vfs = 1;
  115 #else
  116 int mpsafe_vfs;
  117 #endif
  118 TUNABLE_INT("debug.mpsafevfs", &mpsafe_vfs);
  119 SYSCTL_INT(_debug, OID_AUTO, mpsafevfs, CTLFLAG_RD, &mpsafe_vfs, 0,
  120     "MPSAFE VFS");
  121 
  122 /*
  123  * Number of vnodes in existence.  Increased whenever getnewvnode()
  124  * allocates a new vnode, never decreased.
  125  */
  126 static unsigned long    numvnodes;
  127 
  128 SYSCTL_LONG(_vfs, OID_AUTO, numvnodes, CTLFLAG_RD, &numvnodes, 0, "");
  129 
  130 /*
  131  * Conversion tables for conversion from vnode types to inode formats
  132  * and back.
  133  */
  134 enum vtype iftovt_tab[16] = {
  135         VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON,
  136         VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VBAD,
  137 };
  138 int vttoif_tab[10] = {
  139         0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK,
  140         S_IFSOCK, S_IFIFO, S_IFMT, S_IFMT
  141 };
  142 
  143 /*
  144  * List of vnodes that are ready for recycling.
  145  */
  146 static TAILQ_HEAD(freelst, vnode) vnode_free_list;
  147 
  148 /*
  149  * Free vnode target.  Free vnodes may simply be files which have been stat'd
  150  * but not read.  This is somewhat common, and a small cache of such files
  151  * should be kept to avoid recreation costs.
  152  */
  153 static u_long wantfreevnodes;
  154 SYSCTL_LONG(_vfs, OID_AUTO, wantfreevnodes, CTLFLAG_RW, &wantfreevnodes, 0, "");
  155 /* Number of vnodes in the free list. */
  156 static u_long freevnodes;
  157 SYSCTL_LONG(_vfs, OID_AUTO, freevnodes, CTLFLAG_RD, &freevnodes, 0, "");
  158 
  159 /*
  160  * Various variables used for debugging the new implementation of
  161  * reassignbuf().
  162  * XXX these are probably of (very) limited utility now.
  163  */
  164 static int reassignbufcalls;
  165 SYSCTL_INT(_vfs, OID_AUTO, reassignbufcalls, CTLFLAG_RW, &reassignbufcalls, 0, "");
  166 
  167 /*
  168  * Cache for the mount type id assigned to NFS.  This is used for
  169  * special checks in nfs/nfs_nqlease.c and vm/vnode_pager.c.
  170  */
  171 int     nfs_mount_type = -1;
  172 
  173 /* To keep more than one thread at a time from running vfs_getnewfsid */
  174 static struct mtx mntid_mtx;
  175 
  176 /*
  177  * Lock for any access to the following:
  178  *      vnode_free_list
  179  *      numvnodes
  180  *      freevnodes
  181  */
  182 static struct mtx vnode_free_list_mtx;
  183 
  184 /* Publicly exported FS */
  185 struct nfs_public nfs_pub;
  186 
  187 /* Zone for allocation of new vnodes - used exclusively by getnewvnode() */
  188 static uma_zone_t vnode_zone;
  189 static uma_zone_t vnodepoll_zone;
  190 
  191 /* Set to 1 to print out reclaim of active vnodes */
  192 int     prtactive;
  193 
  194 /*
  195  * The workitem queue.
  196  *
  197  * It is useful to delay writes of file data and filesystem metadata
  198  * for tens of seconds so that quickly created and deleted files need
  199  * not waste disk bandwidth being created and removed. To realize this,
  200  * we append vnodes to a "workitem" queue. When running with a soft
  201  * updates implementation, most pending metadata dependencies should
  202  * not wait for more than a few seconds. Thus, mounted on block devices
  203  * are delayed only about a half the time that file data is delayed.
  204  * Similarly, directory updates are more critical, so are only delayed
  205  * about a third the time that file data is delayed. Thus, there are
  206  * SYNCER_MAXDELAY queues that are processed round-robin at a rate of
  207  * one each second (driven off the filesystem syncer process). The
  208  * syncer_delayno variable indicates the next queue that is to be processed.
  209  * Items that need to be processed soon are placed in this queue:
  210  *
  211  *      syncer_workitem_pending[syncer_delayno]
  212  *
  213  * A delay of fifteen seconds is done by placing the request fifteen
  214  * entries later in the queue:
  215  *
  216  *      syncer_workitem_pending[(syncer_delayno + 15) & syncer_mask]
  217  *
  218  */
  219 static int syncer_delayno;
  220 static long syncer_mask;
  221 LIST_HEAD(synclist, bufobj);
  222 static struct synclist *syncer_workitem_pending;
  223 /*
  224  * The sync_mtx protects:
  225  *      bo->bo_synclist
  226  *      sync_vnode_count
  227  *      syncer_delayno
  228  *      syncer_state
  229  *      syncer_workitem_pending
  230  *      syncer_worklist_len
  231  *      rushjob
  232  */
  233 static struct mtx sync_mtx;
  234 
  235 #define SYNCER_MAXDELAY         32
  236 static int syncer_maxdelay = SYNCER_MAXDELAY;   /* maximum delay time */
  237 static int syncdelay = 30;              /* max time to delay syncing data */
  238 static int filedelay = 30;              /* time to delay syncing files */
  239 SYSCTL_INT(_kern, OID_AUTO, filedelay, CTLFLAG_RW, &filedelay, 0, "");
  240 static int dirdelay = 29;               /* time to delay syncing directories */
  241 SYSCTL_INT(_kern, OID_AUTO, dirdelay, CTLFLAG_RW, &dirdelay, 0, "");
  242 static int metadelay = 28;              /* time to delay syncing metadata */
  243 SYSCTL_INT(_kern, OID_AUTO, metadelay, CTLFLAG_RW, &metadelay, 0, "");
  244 static int rushjob;             /* number of slots to run ASAP */
  245 static int stat_rush_requests;  /* number of times I/O speeded up */
  246 SYSCTL_INT(_debug, OID_AUTO, rush_requests, CTLFLAG_RW, &stat_rush_requests, 0, "");
  247 
  248 /*
  249  * When shutting down the syncer, run it at four times normal speed.
  250  */
  251 #define SYNCER_SHUTDOWN_SPEEDUP         4
  252 static int sync_vnode_count;
  253 static int syncer_worklist_len;
  254 static enum { SYNCER_RUNNING, SYNCER_SHUTTING_DOWN, SYNCER_FINAL_DELAY }
  255     syncer_state;
  256 
  257 /*
  258  * Number of vnodes we want to exist at any one time.  This is mostly used
  259  * to size hash tables in vnode-related code.  It is normally not used in
  260  * getnewvnode(), as wantfreevnodes is normally nonzero.)
  261  *
  262  * XXX desiredvnodes is historical cruft and should not exist.
  263  */
  264 int desiredvnodes;
  265 SYSCTL_INT(_kern, KERN_MAXVNODES, maxvnodes, CTLFLAG_RW,
  266     &desiredvnodes, 0, "Maximum number of vnodes");
  267 SYSCTL_INT(_kern, OID_AUTO, minvnodes, CTLFLAG_RW,
  268     &wantfreevnodes, 0, "Minimum number of vnodes (legacy)");
  269 static int vnlru_nowhere;
  270 SYSCTL_INT(_debug, OID_AUTO, vnlru_nowhere, CTLFLAG_RW,
  271     &vnlru_nowhere, 0, "Number of times the vnlru process ran without success");
  272 
  273 /*
  274  * Macros to control when a vnode is freed and recycled.  All require
  275  * the vnode interlock.
  276  */
  277 #define VCANRECYCLE(vp) (((vp)->v_iflag & VI_FREE) && !(vp)->v_holdcnt)
  278 #define VSHOULDFREE(vp) (!((vp)->v_iflag & VI_FREE) && !(vp)->v_holdcnt)
  279 #define VSHOULDBUSY(vp) (((vp)->v_iflag & VI_FREE) && (vp)->v_holdcnt)
  280 
  281 
  282 /*
  283  * Initialize the vnode management data structures.
  284  */
  285 #ifndef MAXVNODES_MAX
  286 #define MAXVNODES_MAX   100000
  287 #endif
  288 static void
  289 vntblinit(void *dummy __unused)
  290 {
  291 
  292         /*
  293          * Desiredvnodes is a function of the physical memory size and
  294          * the kernel's heap size.  Specifically, desiredvnodes scales
  295          * in proportion to the physical memory size until two fifths
  296          * of the kernel's heap size is consumed by vnodes and vm
  297          * objects.
  298          */
  299         desiredvnodes = min(maxproc + cnt.v_page_count / 4, 2 * vm_kmem_size /
  300             (5 * (sizeof(struct vm_object) + sizeof(struct vnode))));
  301         if (desiredvnodes > MAXVNODES_MAX) {
  302                 if (bootverbose)
  303                         printf("Reducing kern.maxvnodes %d -> %d\n",
  304                             desiredvnodes, MAXVNODES_MAX);
  305                 desiredvnodes = MAXVNODES_MAX;
  306         }
  307         wantfreevnodes = desiredvnodes / 4; 
  308         mtx_init(&mntid_mtx, "mntid", NULL, MTX_DEF);
  309         TAILQ_INIT(&vnode_free_list);
  310         mtx_init(&vnode_free_list_mtx, "vnode_free_list", NULL, MTX_DEF);
  311         vnode_zone = uma_zcreate("VNODE", sizeof (struct vnode), NULL, NULL,
  312             NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
  313         vnodepoll_zone = uma_zcreate("VNODEPOLL", sizeof (struct vpollinfo),
  314               NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
  315         /*
  316          * Initialize the filesystem syncer.
  317          */
  318         syncer_workitem_pending = hashinit(syncer_maxdelay, M_VNODE,
  319                 &syncer_mask);
  320         syncer_maxdelay = syncer_mask + 1;
  321         mtx_init(&sync_mtx, "Syncer mtx", NULL, MTX_DEF);
  322 }
  323 SYSINIT(vfs, SI_SUB_VFS, SI_ORDER_FIRST, vntblinit, NULL)
  324 
  325 
  326 /*
  327  * Mark a mount point as busy. Used to synchronize access and to delay
  328  * unmounting. Interlock is not released on failure.
  329  */
  330 int
  331 vfs_busy(mp, flags, interlkp, td)
  332         struct mount *mp;
  333         int flags;
  334         struct mtx *interlkp;
  335         struct thread *td;
  336 {
  337         int lkflags;
  338 
  339         MNT_ILOCK(mp);
  340         MNT_REF(mp);
  341         if (mp->mnt_kern_flag & MNTK_UNMOUNT) {
  342                 if (flags & LK_NOWAIT) {
  343                         MNT_REL(mp);
  344                         MNT_IUNLOCK(mp);
  345                         return (ENOENT);
  346                 }
  347                 if (interlkp)
  348                         mtx_unlock(interlkp);
  349                 mp->mnt_kern_flag |= MNTK_MWAIT;
  350                 /*
  351                  * Since all busy locks are shared except the exclusive
  352                  * lock granted when unmounting, the only place that a
  353                  * wakeup needs to be done is at the release of the
  354                  * exclusive lock at the end of dounmount.
  355                  */
  356                 msleep(mp, MNT_MTX(mp), PVFS, "vfs_busy", 0);
  357                 MNT_REL(mp);
  358                 MNT_IUNLOCK(mp);
  359                 if (interlkp)
  360                         mtx_lock(interlkp);
  361                 return (ENOENT);
  362         }
  363         if (interlkp)
  364                 mtx_unlock(interlkp);
  365         lkflags = LK_SHARED | LK_INTERLOCK;
  366         if (lockmgr(&mp->mnt_lock, lkflags, MNT_MTX(mp), td))
  367                 panic("vfs_busy: unexpected lock failure");
  368         vfs_rel(mp);
  369         return (0);
  370 }
  371 
  372 /*
  373  * Free a busy filesystem.
  374  */
  375 void
  376 vfs_unbusy(mp, td)
  377         struct mount *mp;
  378         struct thread *td;
  379 {
  380 
  381         lockmgr(&mp->mnt_lock, LK_RELEASE, NULL, td);
  382 }
  383 
  384 /*
  385  * Lookup a mount point by filesystem identifier.
  386  */
  387 struct mount *
  388 vfs_getvfs(fsid)
  389         fsid_t *fsid;
  390 {
  391         struct mount *mp;
  392 
  393         mtx_lock(&mountlist_mtx);
  394         TAILQ_FOREACH(mp, &mountlist, mnt_list) {
  395                 if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] &&
  396                     mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) {
  397                         mtx_unlock(&mountlist_mtx);
  398                         return (mp);
  399                 }
  400         }
  401         mtx_unlock(&mountlist_mtx);
  402         return ((struct mount *) 0);
  403 }
  404 
  405 /*
  406  * Check if a user can access priveledged mount options.
  407  */
  408 int
  409 vfs_suser(struct mount *mp, struct thread *td)
  410 {
  411         int error;
  412 
  413         if ((mp->mnt_flag & MNT_USER) == 0 ||
  414             mp->mnt_cred->cr_uid != td->td_ucred->cr_uid) {
  415                 if ((error = suser(td)) != 0)
  416                         return (error);
  417         }
  418         return (0);
  419 }
  420 
  421 /*
  422  * Get a new unique fsid.  Try to make its val[0] unique, since this value
  423  * will be used to create fake device numbers for stat().  Also try (but
  424  * not so hard) make its val[0] unique mod 2^16, since some emulators only
  425  * support 16-bit device numbers.  We end up with unique val[0]'s for the
  426  * first 2^16 calls and unique val[0]'s mod 2^16 for the first 2^8 calls.
  427  *
  428  * Keep in mind that several mounts may be running in parallel.  Starting
  429  * the search one past where the previous search terminated is both a
  430  * micro-optimization and a defense against returning the same fsid to
  431  * different mounts.
  432  */
  433 void
  434 vfs_getnewfsid(mp)
  435         struct mount *mp;
  436 {
  437         static u_int16_t mntid_base;
  438         fsid_t tfsid;
  439         int mtype;
  440 
  441         mtx_lock(&mntid_mtx);
  442         mtype = mp->mnt_vfc->vfc_typenum;
  443         tfsid.val[1] = mtype;
  444         mtype = (mtype & 0xFF) << 24;
  445         for (;;) {
  446                 tfsid.val[0] = makedev(255,
  447                     mtype | ((mntid_base & 0xFF00) << 8) | (mntid_base & 0xFF));
  448                 mntid_base++;
  449                 if (vfs_getvfs(&tfsid) == NULL)
  450                         break;
  451         }
  452         mp->mnt_stat.f_fsid.val[0] = tfsid.val[0];
  453         mp->mnt_stat.f_fsid.val[1] = tfsid.val[1];
  454         mtx_unlock(&mntid_mtx);
  455 }
  456 
  457 /*
  458  * Knob to control the precision of file timestamps:
  459  *
  460  *   0 = seconds only; nanoseconds zeroed.
  461  *   1 = seconds and nanoseconds, accurate within 1/HZ.
  462  *   2 = seconds and nanoseconds, truncated to microseconds.
  463  * >=3 = seconds and nanoseconds, maximum precision.
  464  */
  465 enum { TSP_SEC, TSP_HZ, TSP_USEC, TSP_NSEC };
  466 
  467 static int timestamp_precision = TSP_SEC;
  468 SYSCTL_INT(_vfs, OID_AUTO, timestamp_precision, CTLFLAG_RW,
  469     &timestamp_precision, 0, "");
  470 
  471 /*
  472  * Get a current timestamp.
  473  */
  474 void
  475 vfs_timestamp(tsp)
  476         struct timespec *tsp;
  477 {
  478         struct timeval tv;
  479 
  480         switch (timestamp_precision) {
  481         case TSP_SEC:
  482                 tsp->tv_sec = time_second;
  483                 tsp->tv_nsec = 0;
  484                 break;
  485         case TSP_HZ:
  486                 getnanotime(tsp);
  487                 break;
  488         case TSP_USEC:
  489                 microtime(&tv);
  490                 TIMEVAL_TO_TIMESPEC(&tv, tsp);
  491                 break;
  492         case TSP_NSEC:
  493         default:
  494                 nanotime(tsp);
  495                 break;
  496         }
  497 }
  498 
  499 /*
  500  * Set vnode attributes to VNOVAL
  501  */
  502 void
  503 vattr_null(vap)
  504         struct vattr *vap;
  505 {
  506 
  507         vap->va_type = VNON;
  508         vap->va_size = VNOVAL;
  509         vap->va_bytes = VNOVAL;
  510         vap->va_mode = VNOVAL;
  511         vap->va_nlink = VNOVAL;
  512         vap->va_uid = VNOVAL;
  513         vap->va_gid = VNOVAL;
  514         vap->va_fsid = VNOVAL;
  515         vap->va_fileid = VNOVAL;
  516         vap->va_blocksize = VNOVAL;
  517         vap->va_rdev = VNOVAL;
  518         vap->va_atime.tv_sec = VNOVAL;
  519         vap->va_atime.tv_nsec = VNOVAL;
  520         vap->va_mtime.tv_sec = VNOVAL;
  521         vap->va_mtime.tv_nsec = VNOVAL;
  522         vap->va_ctime.tv_sec = VNOVAL;
  523         vap->va_ctime.tv_nsec = VNOVAL;
  524         vap->va_birthtime.tv_sec = VNOVAL;
  525         vap->va_birthtime.tv_nsec = VNOVAL;
  526         vap->va_flags = VNOVAL;
  527         vap->va_gen = VNOVAL;
  528         vap->va_vaflags = 0;
  529 }
  530 
  531 /*
  532  * This routine is called when we have too many vnodes.  It attempts
  533  * to free <count> vnodes and will potentially free vnodes that still
  534  * have VM backing store (VM backing store is typically the cause
  535  * of a vnode blowout so we want to do this).  Therefore, this operation
  536  * is not considered cheap.
  537  *
  538  * A number of conditions may prevent a vnode from being reclaimed.
  539  * the buffer cache may have references on the vnode, a directory
  540  * vnode may still have references due to the namei cache representing
  541  * underlying files, or the vnode may be in active use.   It is not
  542  * desireable to reuse such vnodes.  These conditions may cause the
  543  * number of vnodes to reach some minimum value regardless of what
  544  * you set kern.maxvnodes to.  Do not set kern.maxvnodes too low.
  545  */
  546 static int
  547 vlrureclaim(struct mount *mp)
  548 {
  549         struct thread *td;
  550         struct vnode *vp;
  551         int done;
  552         int trigger;
  553         int usevnodes;
  554         int count;
  555 
  556         /*
  557          * Calculate the trigger point, don't allow user
  558          * screwups to blow us up.   This prevents us from
  559          * recycling vnodes with lots of resident pages.  We
  560          * aren't trying to free memory, we are trying to
  561          * free vnodes.
  562          */
  563         usevnodes = desiredvnodes;
  564         if (usevnodes <= 0)
  565                 usevnodes = 1;
  566         trigger = cnt.v_page_count * 2 / usevnodes;
  567         done = 0;
  568         td = curthread;
  569         vn_start_write(NULL, &mp, V_WAIT);
  570         MNT_ILOCK(mp);
  571         count = mp->mnt_nvnodelistsize / 10 + 1;
  572         while (count != 0) {
  573                 vp = TAILQ_FIRST(&mp->mnt_nvnodelist);
  574                 while (vp != NULL && vp->v_type == VMARKER)
  575                         vp = TAILQ_NEXT(vp, v_nmntvnodes);
  576                 if (vp == NULL)
  577                         break;
  578                 TAILQ_REMOVE(&mp->mnt_nvnodelist, vp, v_nmntvnodes);
  579                 TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes);
  580                 --count;
  581                 if (!VI_TRYLOCK(vp))
  582                         goto next_iter;
  583                 /*
  584                  * If it's been deconstructed already, it's still
  585                  * referenced, or it exceeds the trigger, skip it.
  586                  */
  587                 if (vp->v_usecount || !LIST_EMPTY(&(vp)->v_cache_src) ||
  588                     (vp->v_iflag & VI_DOOMED) != 0 || (vp->v_object != NULL &&
  589                     vp->v_object->resident_page_count > trigger)) {
  590                         VI_UNLOCK(vp);
  591                         goto next_iter;
  592                 }
  593                 MNT_IUNLOCK(mp);
  594                 vholdl(vp);
  595                 if (VOP_LOCK(vp, LK_INTERLOCK|LK_EXCLUSIVE|LK_NOWAIT, td)) {
  596                         vdrop(vp);
  597                         goto next_iter_mntunlocked;
  598                 }
  599                 VI_LOCK(vp);
  600                 /*
  601                  * v_usecount may have been bumped after VOP_LOCK() dropped
  602                  * the vnode interlock and before it was locked again.
  603                  *
  604                  * It is not necessary to recheck VI_DOOMED because it can
  605                  * only be set by another thread that holds both the vnode
  606                  * lock and vnode interlock.  If another thread has the
  607                  * vnode lock before we get to VOP_LOCK() and obtains the
  608                  * vnode interlock after VOP_LOCK() drops the vnode
  609                  * interlock, the other thread will be unable to drop the
  610                  * vnode lock before our VOP_LOCK() call fails.
  611                  */
  612                 if (vp->v_usecount || !LIST_EMPTY(&(vp)->v_cache_src) ||
  613                     (vp->v_object != NULL && 
  614                     vp->v_object->resident_page_count > trigger)) {
  615                         VOP_UNLOCK(vp, LK_INTERLOCK, td);
  616                         goto next_iter_mntunlocked;
  617                 }
  618                 KASSERT((vp->v_iflag & VI_DOOMED) == 0,
  619                     ("VI_DOOMED unexpectedly detected in vlrureclaim()"));
  620                 vgonel(vp);
  621                 VOP_UNLOCK(vp, 0, td);
  622                 vdropl(vp);
  623                 done++;
  624 next_iter_mntunlocked:
  625                 if ((count % 256) != 0)
  626                         goto relock_mnt;
  627                 goto yield;
  628 next_iter:
  629                 if ((count % 256) != 0)
  630                         continue;
  631                 MNT_IUNLOCK(mp);
  632 yield:
  633                 uio_yield();
  634 relock_mnt:
  635                 MNT_ILOCK(mp);
  636         }
  637         MNT_IUNLOCK(mp);
  638         vn_finished_write(mp);
  639         return done;
  640 }
  641 
  642 /*
  643  * Attempt to keep the free list at wantfreevnodes length.
  644  */
  645 static void
  646 vnlru_free(int count)
  647 {
  648         struct vnode *vp;
  649         int vfslocked;
  650 
  651         mtx_assert(&vnode_free_list_mtx, MA_OWNED);
  652         for (; count > 0; count--) {
  653                 vp = TAILQ_FIRST(&vnode_free_list);
  654                 /*
  655                  * The list can be modified while the free_list_mtx
  656                  * has been dropped and vp could be NULL here.
  657                  */
  658                 if (!vp)
  659                         break;
  660                 VNASSERT(vp->v_op != NULL, vp,
  661                     ("vnlru_free: vnode already reclaimed."));
  662                 TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
  663                 /*
  664                  * Don't recycle if we can't get the interlock.
  665                  */
  666                 if (!VI_TRYLOCK(vp)) {
  667                         TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
  668                         continue;
  669                 }
  670                 VNASSERT(VCANRECYCLE(vp), vp,
  671                     ("vp inconsistent on freelist"));
  672                 freevnodes--;
  673                 vp->v_iflag &= ~VI_FREE;
  674                 vholdl(vp);
  675                 mtx_unlock(&vnode_free_list_mtx);
  676                 VI_UNLOCK(vp);
  677                 vfslocked = VFS_LOCK_GIANT(vp->v_mount);
  678                 vtryrecycle(vp);
  679                 VFS_UNLOCK_GIANT(vfslocked);
  680                 /*
  681                  * If the recycled succeeded this vdrop will actually free
  682                  * the vnode.  If not it will simply place it back on
  683                  * the free list.
  684                  */
  685                 vdrop(vp);
  686                 mtx_lock(&vnode_free_list_mtx);
  687         }
  688 }
  689 /*
  690  * Attempt to recycle vnodes in a context that is always safe to block.
  691  * Calling vlrurecycle() from the bowels of filesystem code has some
  692  * interesting deadlock problems.
  693  */
  694 static struct proc *vnlruproc;
  695 static int vnlruproc_sig;
  696 
  697 static void
  698 vnlru_proc(void)
  699 {
  700         struct mount *mp, *nmp;
  701         int done;
  702         struct proc *p = vnlruproc;
  703         struct thread *td = FIRST_THREAD_IN_PROC(p);
  704 
  705         mtx_lock(&Giant);
  706 
  707         EVENTHANDLER_REGISTER(shutdown_pre_sync, kproc_shutdown, p,
  708             SHUTDOWN_PRI_FIRST);
  709 
  710         for (;;) {
  711                 kthread_suspend_check(p);
  712                 mtx_lock(&vnode_free_list_mtx);
  713                 if (freevnodes > wantfreevnodes)
  714                         vnlru_free(freevnodes - wantfreevnodes);
  715                 if (numvnodes <= desiredvnodes * 9 / 10) {
  716                         vnlruproc_sig = 0;
  717                         wakeup(&vnlruproc_sig);
  718                         msleep(vnlruproc, &vnode_free_list_mtx,
  719                             PVFS|PDROP, "vlruwt", hz);
  720                         continue;
  721                 }
  722                 mtx_unlock(&vnode_free_list_mtx);
  723                 done = 0;
  724                 mtx_lock(&mountlist_mtx);
  725                 for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) {
  726                         int vfsunlocked;
  727                         if (vfs_busy(mp, LK_NOWAIT, &mountlist_mtx, td)) {
  728                                 nmp = TAILQ_NEXT(mp, mnt_list);
  729                                 continue;
  730                         }
  731                         if (!VFS_NEEDSGIANT(mp)) {
  732                                 mtx_unlock(&Giant);
  733                                 vfsunlocked = 1;
  734                         } else
  735                                 vfsunlocked = 0;
  736                         done += vlrureclaim(mp);
  737                         if (vfsunlocked)
  738                                 mtx_lock(&Giant);
  739                         mtx_lock(&mountlist_mtx);
  740                         nmp = TAILQ_NEXT(mp, mnt_list);
  741                         vfs_unbusy(mp, td);
  742                 }
  743                 mtx_unlock(&mountlist_mtx);
  744                 if (done == 0) {
  745 #if 0
  746                         /* These messages are temporary debugging aids */
  747                         if (vnlru_nowhere < 5)
  748                                 printf("vnlru process getting nowhere..\n");
  749                         else if (vnlru_nowhere == 5)
  750                                 printf("vnlru process messages stopped.\n");
  751 #endif
  752                         vnlru_nowhere++;
  753                         tsleep(vnlruproc, PPAUSE, "vlrup", hz * 3);
  754                 } else 
  755                         uio_yield();
  756         }
  757 }
  758 
  759 static struct kproc_desc vnlru_kp = {
  760         "vnlru",
  761         vnlru_proc,
  762         &vnlruproc
  763 };
  764 SYSINIT(vnlru, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &vnlru_kp)
  765 
  766 /*
  767  * Routines having to do with the management of the vnode table.
  768  */
  769 
  770 static void
  771 vdestroy(struct vnode *vp)
  772 {
  773         struct bufobj *bo;
  774 
  775         CTR1(KTR_VFS, "vdestroy vp %p", vp);
  776         mtx_lock(&vnode_free_list_mtx);
  777         numvnodes--;
  778         mtx_unlock(&vnode_free_list_mtx);
  779         bo = &vp->v_bufobj;
  780         VNASSERT((vp->v_iflag & VI_FREE) == 0, vp,
  781             ("cleaned vnode still on the free list."));
  782         VNASSERT(vp->v_data == NULL, vp, ("cleaned vnode isn't"));
  783         VNASSERT(vp->v_holdcnt == 0, vp, ("Non-zero hold count"));
  784         VNASSERT(vp->v_usecount == 0, vp, ("Non-zero use count"));
  785         VNASSERT(vp->v_writecount == 0, vp, ("Non-zero write count"));
  786         VNASSERT(bo->bo_numoutput == 0, vp, ("Clean vnode has pending I/O's"));
  787         VNASSERT(bo->bo_clean.bv_cnt == 0, vp, ("cleanbufcnt not 0"));
  788         VNASSERT(bo->bo_clean.bv_root == NULL, vp, ("cleanblkroot not NULL"));
  789         VNASSERT(bo->bo_dirty.bv_cnt == 0, vp, ("dirtybufcnt not 0"));
  790         VNASSERT(bo->bo_dirty.bv_root == NULL, vp, ("dirtyblkroot not NULL"));
  791         VNASSERT(TAILQ_EMPTY(&vp->v_cache_dst), vp, ("vp has namecache dst"));
  792         VNASSERT(LIST_EMPTY(&vp->v_cache_src), vp, ("vp has namecache src"));
  793 #ifdef MAC
  794         mac_destroy_vnode(vp);
  795 #endif
  796         if (vp->v_pollinfo != NULL) {
  797                 knlist_destroy(&vp->v_pollinfo->vpi_selinfo.si_note);
  798                 mtx_destroy(&vp->v_pollinfo->vpi_lock);
  799                 uma_zfree(vnodepoll_zone, vp->v_pollinfo);
  800         }
  801 #ifdef INVARIANTS
  802         /* XXX Elsewhere we can detect an already freed vnode via NULL v_op. */
  803         vp->v_op = NULL;
  804 #endif
  805         lockdestroy(vp->v_vnlock);
  806         mtx_destroy(&vp->v_interlock);
  807         uma_zfree(vnode_zone, vp);
  808 }
  809 
  810 /*
  811  * Try to recycle a freed vnode.  We abort if anyone picks up a reference
  812  * before we actually vgone().  This function must be called with the vnode
  813  * held to prevent the vnode from being returned to the free list midway
  814  * through vgone().
  815  */
  816 static int
  817 vtryrecycle(struct vnode *vp)
  818 {
  819         struct thread *td = curthread;
  820         struct mount *vnmp;
  821 
  822         CTR1(KTR_VFS, "vtryrecycle: trying vp %p", vp);
  823         VNASSERT(vp->v_holdcnt, vp,
  824             ("vtryrecycle: Recycling vp %p without a reference.", vp));
  825         /*
  826          * This vnode may found and locked via some other list, if so we
  827          * can't recycle it yet.
  828          */
  829         if (VOP_LOCK(vp, LK_EXCLUSIVE | LK_NOWAIT, td) != 0)
  830                 return (EWOULDBLOCK);
  831         /*
  832          * Don't recycle if its filesystem is being suspended.
  833          */
  834         if (vn_start_write(vp, &vnmp, V_NOWAIT) != 0) {
  835                 VOP_UNLOCK(vp, 0, td);
  836                 return (EBUSY);
  837         }
  838         /*
  839          * If we got this far, we need to acquire the interlock and see if
  840          * anyone picked up this vnode from another list.  If not, we will
  841          * mark it with DOOMED via vgonel() so that anyone who does find it
  842          * will skip over it.
  843          */
  844         VI_LOCK(vp);
  845         if (vp->v_usecount) {
  846                 VOP_UNLOCK(vp, LK_INTERLOCK, td);
  847                 vn_finished_write(vnmp);
  848                 return (EBUSY);
  849         }
  850         if ((vp->v_iflag & VI_DOOMED) == 0)
  851                 vgonel(vp);
  852         VOP_UNLOCK(vp, LK_INTERLOCK, td);
  853         vn_finished_write(vnmp);
  854         CTR1(KTR_VFS, "vtryrecycle: recycled vp %p", vp);
  855         return (0);
  856 }
  857 
  858 /*
  859  * Return the next vnode from the free list.
  860  */
  861 int
  862 getnewvnode(tag, mp, vops, vpp)
  863         const char *tag;
  864         struct mount *mp;
  865         struct vop_vector *vops;
  866         struct vnode **vpp;
  867 {
  868         struct vnode *vp = NULL;
  869         struct bufobj *bo;
  870 
  871         mtx_lock(&vnode_free_list_mtx);
  872         /*
  873          * Lend our context to reclaim vnodes if they've exceeded the max.
  874          */
  875         if (freevnodes > wantfreevnodes)
  876                 vnlru_free(1);
  877         /*
  878          * Wait for available vnodes.
  879          */
  880         if (numvnodes > desiredvnodes) {
  881                 if (vnlruproc_sig == 0) {
  882                         vnlruproc_sig = 1;      /* avoid unnecessary wakeups */
  883                         wakeup(vnlruproc);
  884                 }
  885                 msleep(&vnlruproc_sig, &vnode_free_list_mtx, PVFS,
  886                     "vlruwk", hz);
  887 #if 0   /* XXX Not all VFS_VGET/ffs_vget callers check returns. */
  888                 if (numvnodes > desiredvnodes) {
  889                         mtx_unlock(&vnode_free_list_mtx);
  890                         return (ENFILE);
  891                 }
  892 #endif
  893         }
  894         numvnodes++;
  895         mtx_unlock(&vnode_free_list_mtx);
  896         vp = (struct vnode *) uma_zalloc(vnode_zone, M_WAITOK|M_ZERO);
  897         /*
  898          * Setup locks.
  899          */
  900         vp->v_vnlock = &vp->v_lock;
  901         mtx_init(&vp->v_interlock, "vnode interlock", NULL, MTX_DEF);
  902         /*
  903          * By default, don't allow shared locks unless filesystems
  904          * opt-in.
  905          */
  906         lockinit(vp->v_vnlock, PVFS, tag, VLKTIMEOUT, LK_NOSHARE);
  907         /*
  908          * Initialize bufobj.
  909          */
  910         bo = &vp->v_bufobj;
  911         bo->__bo_vnode = vp;
  912         bo->bo_mtx = &vp->v_interlock;
  913         bo->bo_ops = &buf_ops_bio;
  914         bo->bo_private = vp;
  915         TAILQ_INIT(&bo->bo_clean.bv_hd);
  916         TAILQ_INIT(&bo->bo_dirty.bv_hd);
  917         /*
  918          * Initialize namecache.
  919          */
  920         LIST_INIT(&vp->v_cache_src);
  921         TAILQ_INIT(&vp->v_cache_dst);
  922         /*
  923          * Finalize various vnode identity bits.
  924          */
  925         vp->v_type = VNON;
  926         vp->v_tag = tag;
  927         vp->v_op = vops;
  928         v_incr_usecount(vp);
  929         vp->v_data = 0;
  930 #ifdef MAC
  931         mac_init_vnode(vp);
  932         if (mp != NULL && (mp->mnt_flag & MNT_MULTILABEL) == 0)
  933                 mac_associate_vnode_singlelabel(mp, vp);
  934         else if (mp == NULL)
  935                 printf("NULL mp in getnewvnode()\n");
  936 #endif
  937         if (mp != NULL) {
  938                 insmntque(vp, mp);
  939                 bo->bo_bsize = mp->mnt_stat.f_iosize;
  940                 if ((mp->mnt_kern_flag & MNTK_NOKNOTE) != 0)
  941                         vp->v_vflag |= VV_NOKNOTE;
  942         }
  943 
  944         CTR2(KTR_VFS, "getnewvnode: mp %p vp %p", mp, vp);
  945         *vpp = vp;
  946         return (0);
  947 }
  948 
  949 /*
  950  * Delete from old mount point vnode list, if on one.
  951  */
  952 static void
  953 delmntque(struct vnode *vp)
  954 {
  955         struct mount *mp;
  956 
  957         mp = vp->v_mount;
  958         if (mp == NULL)
  959                 return;
  960         MNT_ILOCK(mp);
  961         vp->v_mount = NULL;
  962         VNASSERT(mp->mnt_nvnodelistsize > 0, vp,
  963                 ("bad mount point vnode list size"));
  964         TAILQ_REMOVE(&mp->mnt_nvnodelist, vp, v_nmntvnodes);
  965         mp->mnt_nvnodelistsize--;
  966         MNT_REL(mp);
  967         MNT_IUNLOCK(mp);
  968 }
  969 
  970 /*
  971  * Insert into list of vnodes for the new mount point, if available.
  972  */
  973 static void
  974 insmntque(struct vnode *vp, struct mount *mp)
  975 {
  976 
  977         vp->v_mount = mp;
  978         VNASSERT(mp != NULL, vp, ("Don't call insmntque(foo, NULL)"));
  979         MNT_ILOCK(mp);
  980         MNT_REF(mp);
  981         TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes);
  982         VNASSERT(mp->mnt_nvnodelistsize >= 0, vp,
  983                 ("neg mount point vnode list size"));
  984         mp->mnt_nvnodelistsize++;
  985         MNT_IUNLOCK(mp);
  986 }
  987 
  988 /*
  989  * Flush out and invalidate all buffers associated with a bufobj
  990  * Called with the underlying object locked.
  991  */
  992 int
  993 bufobj_invalbuf(struct bufobj *bo, int flags, struct thread *td, int slpflag, int slptimeo)
  994 {
  995         int error;
  996 
  997         BO_LOCK(bo);
  998         if (flags & V_SAVE) {
  999                 error = bufobj_wwait(bo, slpflag, slptimeo);
 1000                 if (error) {
 1001                         BO_UNLOCK(bo);
 1002                         return (error);
 1003                 }
 1004                 if (bo->bo_dirty.bv_cnt > 0) {
 1005                         BO_UNLOCK(bo);
 1006                         if ((error = BO_SYNC(bo, MNT_WAIT, td)) != 0)
 1007                                 return (error);
 1008                         /*
 1009                          * XXX We could save a lock/unlock if this was only
 1010                          * enabled under INVARIANTS
 1011                          */
 1012                         BO_LOCK(bo);
 1013                         if (bo->bo_numoutput > 0 || bo->bo_dirty.bv_cnt > 0)
 1014                                 panic("vinvalbuf: dirty bufs");
 1015                 }
 1016         }
 1017         /*
 1018          * If you alter this loop please notice that interlock is dropped and
 1019          * reacquired in flushbuflist.  Special care is needed to ensure that
 1020          * no race conditions occur from this.
 1021          */
 1022         do {
 1023                 error = flushbuflist(&bo->bo_clean,
 1024                     flags, bo, slpflag, slptimeo);
 1025                 if (error == 0)
 1026                         error = flushbuflist(&bo->bo_dirty,
 1027                             flags, bo, slpflag, slptimeo);
 1028                 if (error != 0 && error != EAGAIN) {
 1029                         BO_UNLOCK(bo);
 1030                         return (error);
 1031                 }
 1032         } while (error != 0);
 1033 
 1034         /*
 1035          * Wait for I/O to complete.  XXX needs cleaning up.  The vnode can
 1036          * have write I/O in-progress but if there is a VM object then the
 1037          * VM object can also have read-I/O in-progress.
 1038          */
 1039         do {
 1040                 bufobj_wwait(bo, 0, 0);
 1041                 BO_UNLOCK(bo);
 1042                 if (bo->bo_object != NULL) {
 1043                         VM_OBJECT_LOCK(bo->bo_object);
 1044                         vm_object_pip_wait(bo->bo_object, "bovlbx");
 1045                         VM_OBJECT_UNLOCK(bo->bo_object);
 1046                 }
 1047                 BO_LOCK(bo);
 1048         } while (bo->bo_numoutput > 0);
 1049         BO_UNLOCK(bo);
 1050 
 1051         /*
 1052          * Destroy the copy in the VM cache, too.
 1053          */
 1054         if (bo->bo_object != NULL) {
 1055                 VM_OBJECT_LOCK(bo->bo_object);
 1056                 vm_object_page_remove(bo->bo_object, 0, 0,
 1057                         (flags & V_SAVE) ? TRUE : FALSE);
 1058                 VM_OBJECT_UNLOCK(bo->bo_object);
 1059         }
 1060 
 1061 #ifdef INVARIANTS
 1062         BO_LOCK(bo);
 1063         if ((flags & (V_ALT | V_NORMAL)) == 0 &&
 1064             (bo->bo_dirty.bv_cnt > 0 || bo->bo_clean.bv_cnt > 0))
 1065                 panic("vinvalbuf: flush failed");
 1066         BO_UNLOCK(bo);
 1067 #endif
 1068         return (0);
 1069 }
 1070 
 1071 /*
 1072  * Flush out and invalidate all buffers associated with a vnode.
 1073  * Called with the underlying object locked.
 1074  */
 1075 int
 1076 vinvalbuf(struct vnode *vp, int flags, struct thread *td, int slpflag, int slptimeo)
 1077 {
 1078 
 1079         CTR2(KTR_VFS, "vinvalbuf vp %p flags %d", vp, flags);
 1080         ASSERT_VOP_LOCKED(vp, "vinvalbuf");
 1081         return (bufobj_invalbuf(&vp->v_bufobj, flags, td, slpflag, slptimeo));
 1082 }
 1083 
 1084 /*
 1085  * Flush out buffers on the specified list.
 1086  *
 1087  */
 1088 static int
 1089 flushbuflist(bufv, flags, bo, slpflag, slptimeo)
 1090         struct bufv *bufv;
 1091         int flags;
 1092         struct bufobj *bo;
 1093         int slpflag, slptimeo;
 1094 {
 1095         struct buf *bp, *nbp;
 1096         int retval, error;
 1097         daddr_t lblkno;
 1098         b_xflags_t xflags;
 1099 
 1100         ASSERT_BO_LOCKED(bo);
 1101 
 1102         retval = 0;
 1103         TAILQ_FOREACH_SAFE(bp, &bufv->bv_hd, b_bobufs, nbp) {
 1104                 if (((flags & V_NORMAL) && (bp->b_xflags & BX_ALTDATA)) ||
 1105                     ((flags & V_ALT) && (bp->b_xflags & BX_ALTDATA) == 0)) {
 1106                         continue;
 1107                 }
 1108                 lblkno = 0;
 1109                 xflags = 0;
 1110                 if (nbp != NULL) {
 1111                         lblkno = nbp->b_lblkno;
 1112                         xflags = nbp->b_xflags &
 1113                                 (BX_BKGRDMARKER | BX_VNDIRTY | BX_VNCLEAN);
 1114                 }
 1115                 retval = EAGAIN;
 1116                 error = BUF_TIMELOCK(bp,
 1117                     LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, BO_MTX(bo),
 1118                     "flushbuf", slpflag, slptimeo);
 1119                 if (error) {
 1120                         BO_LOCK(bo);
 1121                         return (error != ENOLCK ? error : EAGAIN);
 1122                 }
 1123                 KASSERT(bp->b_bufobj == bo,
 1124                     ("bp %p wrong b_bufobj %p should be %p",
 1125                     bp, bp->b_bufobj, bo));
 1126                 if (bp->b_bufobj != bo) {       /* XXX: necessary ? */
 1127                         BUF_UNLOCK(bp);
 1128                         BO_LOCK(bo);
 1129                         return (EAGAIN);
 1130                 }
 1131                 /*
 1132                  * XXX Since there are no node locks for NFS, I
 1133                  * believe there is a slight chance that a delayed
 1134                  * write will occur while sleeping just above, so
 1135                  * check for it.
 1136                  */
 1137                 if (((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI) &&
 1138                     (flags & V_SAVE)) {
 1139                         bremfree(bp);
 1140                         bp->b_flags |= B_ASYNC;
 1141                         bwrite(bp);
 1142                         BO_LOCK(bo);
 1143                         return (EAGAIN);        /* XXX: why not loop ? */
 1144                 }
 1145                 bremfree(bp);
 1146                 bp->b_flags |= (B_INVAL | B_NOCACHE | B_RELBUF);
 1147                 bp->b_flags &= ~B_ASYNC;
 1148                 brelse(bp);
 1149                 BO_LOCK(bo);
 1150                 if (nbp != NULL &&
 1151                     (nbp->b_bufobj != bo || 
 1152                      nbp->b_lblkno != lblkno ||
 1153                      (nbp->b_xflags &
 1154                       (BX_BKGRDMARKER | BX_VNDIRTY | BX_VNCLEAN)) != xflags))
 1155                         break;                  /* nbp invalid */
 1156         }
 1157         return (retval);
 1158 }
 1159 
 1160 /*
 1161  * Truncate a file's buffer and pages to a specified length.  This
 1162  * is in lieu of the old vinvalbuf mechanism, which performed unneeded
 1163  * sync activity.
 1164  */
 1165 int
 1166 vtruncbuf(struct vnode *vp, struct ucred *cred, struct thread *td, off_t length, int blksize)
 1167 {
 1168         struct buf *bp, *nbp;
 1169         int anyfreed;
 1170         int trunclbn;
 1171         struct bufobj *bo;
 1172 
 1173         CTR2(KTR_VFS, "vtruncbuf vp %p length %jd", vp, length);
 1174         /*
 1175          * Round up to the *next* lbn.
 1176          */
 1177         trunclbn = (length + blksize - 1) / blksize;
 1178 
 1179         ASSERT_VOP_LOCKED(vp, "vtruncbuf");
 1180 restart:
 1181         VI_LOCK(vp);
 1182         bo = &vp->v_bufobj;
 1183         anyfreed = 1;
 1184         for (;anyfreed;) {
 1185                 anyfreed = 0;
 1186                 TAILQ_FOREACH_SAFE(bp, &bo->bo_clean.bv_hd, b_bobufs, nbp) {
 1187                         if (bp->b_lblkno < trunclbn)
 1188                                 continue;
 1189                         if (BUF_LOCK(bp,
 1190                             LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK,
 1191                             VI_MTX(vp)) == ENOLCK)
 1192                                 goto restart;
 1193 
 1194                         bremfree(bp);
 1195                         bp->b_flags |= (B_INVAL | B_RELBUF);
 1196                         bp->b_flags &= ~B_ASYNC;
 1197                         brelse(bp);
 1198                         anyfreed = 1;
 1199 
 1200                         if (nbp != NULL &&
 1201                             (((nbp->b_xflags & BX_VNCLEAN) == 0) ||
 1202                             (nbp->b_vp != vp) ||
 1203                             (nbp->b_flags & B_DELWRI))) {
 1204                                 goto restart;
 1205                         }
 1206                         VI_LOCK(vp);
 1207                 }
 1208 
 1209                 TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) {
 1210                         if (bp->b_lblkno < trunclbn)
 1211                                 continue;
 1212                         if (BUF_LOCK(bp,
 1213                             LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK,
 1214                             VI_MTX(vp)) == ENOLCK)
 1215                                 goto restart;
 1216                         bremfree(bp);
 1217                         bp->b_flags |= (B_INVAL | B_RELBUF);
 1218                         bp->b_flags &= ~B_ASYNC;
 1219                         brelse(bp);
 1220                         anyfreed = 1;
 1221                         if (nbp != NULL &&
 1222                             (((nbp->b_xflags & BX_VNDIRTY) == 0) ||
 1223                             (nbp->b_vp != vp) ||
 1224                             (nbp->b_flags & B_DELWRI) == 0)) {
 1225                                 goto restart;
 1226                         }
 1227                         VI_LOCK(vp);
 1228                 }
 1229         }
 1230 
 1231         if (length > 0) {
 1232 restartsync:
 1233                 TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) {
 1234                         if (bp->b_lblkno > 0)
 1235                                 continue;
 1236                         /*
 1237                          * Since we hold the vnode lock this should only
 1238                          * fail if we're racing with the buf daemon.
 1239                          */
 1240                         if (BUF_LOCK(bp,
 1241                             LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK,
 1242                             VI_MTX(vp)) == ENOLCK) {
 1243                                 goto restart;
 1244                         }
 1245                         VNASSERT((bp->b_flags & B_DELWRI), vp,
 1246                             ("buf(%p) on dirty queue without DELWRI", bp));
 1247 
 1248                         bremfree(bp);
 1249                         bawrite(bp);
 1250                         VI_LOCK(vp);
 1251                         goto restartsync;
 1252                 }
 1253         }
 1254 
 1255         bufobj_wwait(bo, 0, 0);
 1256         VI_UNLOCK(vp);
 1257         vnode_pager_setsize(vp, length);
 1258 
 1259         return (0);
 1260 }
 1261 
 1262 /*
 1263  * buf_splay() - splay tree core for the clean/dirty list of buffers in
 1264  *               a vnode.
 1265  *
 1266  *      NOTE: We have to deal with the special case of a background bitmap
 1267  *      buffer, a situation where two buffers will have the same logical
 1268  *      block offset.  We want (1) only the foreground buffer to be accessed
 1269  *      in a lookup and (2) must differentiate between the foreground and
 1270  *      background buffer in the splay tree algorithm because the splay
 1271  *      tree cannot normally handle multiple entities with the same 'index'.
 1272  *      We accomplish this by adding differentiating flags to the splay tree's
 1273  *      numerical domain.
 1274  */
 1275 static
 1276 struct buf *
 1277 buf_splay(daddr_t lblkno, b_xflags_t xflags, struct buf *root)
 1278 {
 1279         struct buf dummy;
 1280         struct buf *lefttreemax, *righttreemin, *y;
 1281 
 1282         if (root == NULL)
 1283                 return (NULL);
 1284         lefttreemax = righttreemin = &dummy;
 1285         for (;;) {
 1286                 if (lblkno < root->b_lblkno ||
 1287                     (lblkno == root->b_lblkno &&
 1288                     (xflags & BX_BKGRDMARKER) < (root->b_xflags & BX_BKGRDMARKER))) {
 1289                         if ((y = root->b_left) == NULL)
 1290                                 break;
 1291                         if (lblkno < y->b_lblkno) {
 1292                                 /* Rotate right. */
 1293                                 root->b_left = y->b_right;
 1294                                 y->b_right = root;
 1295                                 root = y;
 1296                                 if ((y = root->b_left) == NULL)
 1297                                         break;
 1298                         }
 1299                         /* Link into the new root's right tree. */
 1300                         righttreemin->b_left = root;
 1301                         righttreemin = root;
 1302                 } else if (lblkno > root->b_lblkno ||
 1303                     (lblkno == root->b_lblkno &&
 1304                     (xflags & BX_BKGRDMARKER) > (root->b_xflags & BX_BKGRDMARKER))) {
 1305                         if ((y = root->b_right) == NULL)
 1306                                 break;
 1307                         if (lblkno > y->b_lblkno) {
 1308                                 /* Rotate left. */
 1309                                 root->b_right = y->b_left;
 1310                                 y->b_left = root;
 1311                                 root = y;
 1312                                 if ((y = root->b_right) == NULL)
 1313                                         break;
 1314                         }
 1315                         /* Link into the new root's left tree. */
 1316                         lefttreemax->b_right = root;
 1317                         lefttreemax = root;
 1318                 } else {
 1319                         break;
 1320                 }
 1321                 root = y;
 1322         }
 1323         /* Assemble the new root. */
 1324         lefttreemax->b_right = root->b_left;
 1325         righttreemin->b_left = root->b_right;
 1326         root->b_left = dummy.b_right;
 1327         root->b_right = dummy.b_left;
 1328         return (root);
 1329 }
 1330 
 1331 static void
 1332 buf_vlist_remove(struct buf *bp)
 1333 {
 1334         struct buf *root;
 1335         struct bufv *bv;
 1336 
 1337         KASSERT(bp->b_bufobj != NULL, ("No b_bufobj %p", bp));
 1338         ASSERT_BO_LOCKED(bp->b_bufobj);
 1339         KASSERT((bp->b_xflags & (BX_VNDIRTY|BX_VNCLEAN)) !=
 1340             (BX_VNDIRTY|BX_VNCLEAN),
 1341             ("buf_vlist_remove: Buf %p is on two lists", bp));
 1342         if (bp->b_xflags & BX_VNDIRTY) 
 1343                 bv = &bp->b_bufobj->bo_dirty;
 1344         else
 1345                 bv = &bp->b_bufobj->bo_clean;
 1346         if (bp != bv->bv_root) {
 1347                 root = buf_splay(bp->b_lblkno, bp->b_xflags, bv->bv_root);
 1348                 KASSERT(root == bp, ("splay lookup failed in remove"));
 1349         }
 1350         if (bp->b_left == NULL) {
 1351                 root = bp->b_right;
 1352         } else {
 1353                 root = buf_splay(bp->b_lblkno, bp->b_xflags, bp->b_left);
 1354                 root->b_right = bp->b_right;
 1355         }
 1356         bv->bv_root = root;
 1357         TAILQ_REMOVE(&bv->bv_hd, bp, b_bobufs);
 1358         bv->bv_cnt--;
 1359         bp->b_xflags &= ~(BX_VNDIRTY | BX_VNCLEAN);
 1360 }
 1361 
 1362 /*
 1363  * Add the buffer to the sorted clean or dirty block list using a
 1364  * splay tree algorithm.
 1365  *
 1366  * NOTE: xflags is passed as a constant, optimizing this inline function!
 1367  */
 1368 static void
 1369 buf_vlist_add(struct buf *bp, struct bufobj *bo, b_xflags_t xflags)
 1370 {
 1371         struct buf *root;
 1372         struct bufv *bv;
 1373 
 1374         ASSERT_BO_LOCKED(bo);
 1375         KASSERT((bp->b_xflags & (BX_VNDIRTY|BX_VNCLEAN)) == 0,
 1376             ("buf_vlist_add: Buf %p has existing xflags %d", bp, bp->b_xflags));
 1377         bp->b_xflags |= xflags;
 1378         if (xflags & BX_VNDIRTY)
 1379                 bv = &bo->bo_dirty;
 1380         else
 1381                 bv = &bo->bo_clean;
 1382 
 1383         root = buf_splay(bp->b_lblkno, bp->b_xflags, bv->bv_root);
 1384         if (root == NULL) {
 1385                 bp->b_left = NULL;
 1386                 bp->b_right = NULL;
 1387                 TAILQ_INSERT_TAIL(&bv->bv_hd, bp, b_bobufs);
 1388         } else if (bp->b_lblkno < root->b_lblkno ||
 1389             (bp->b_lblkno == root->b_lblkno &&
 1390             (bp->b_xflags & BX_BKGRDMARKER) < (root->b_xflags & BX_BKGRDMARKER))) {
 1391                 bp->b_left = root->b_left;
 1392                 bp->b_right = root;
 1393                 root->b_left = NULL;
 1394                 TAILQ_INSERT_BEFORE(root, bp, b_bobufs);
 1395         } else {
 1396                 bp->b_right = root->b_right;
 1397                 bp->b_left = root;
 1398                 root->b_right = NULL;
 1399                 TAILQ_INSERT_AFTER(&bv->bv_hd, root, bp, b_bobufs);
 1400         }
 1401         bv->bv_cnt++;
 1402         bv->bv_root = bp;
 1403 }
 1404 
 1405 /*
 1406  * Lookup a buffer using the splay tree.  Note that we specifically avoid
 1407  * shadow buffers used in background bitmap writes.
 1408  *
 1409  * This code isn't quite efficient as it could be because we are maintaining
 1410  * two sorted lists and do not know which list the block resides in.
 1411  *
 1412  * During a "make buildworld" the desired buffer is found at one of
 1413  * the roots more than 60% of the time.  Thus, checking both roots
 1414  * before performing either splay eliminates unnecessary splays on the
 1415  * first tree splayed.
 1416  */
 1417 struct buf *
 1418 gbincore(struct bufobj *bo, daddr_t lblkno)
 1419 {
 1420         struct buf *bp;
 1421 
 1422         ASSERT_BO_LOCKED(bo);
 1423         if ((bp = bo->bo_clean.bv_root) != NULL &&
 1424             bp->b_lblkno == lblkno && !(bp->b_xflags & BX_BKGRDMARKER))
 1425                 return (bp);
 1426         if ((bp = bo->bo_dirty.bv_root) != NULL &&
 1427             bp->b_lblkno == lblkno && !(bp->b_xflags & BX_BKGRDMARKER))
 1428                 return (bp);
 1429         if ((bp = bo->bo_clean.bv_root) != NULL) {
 1430                 bo->bo_clean.bv_root = bp = buf_splay(lblkno, 0, bp);
 1431                 if (bp->b_lblkno == lblkno && !(bp->b_xflags & BX_BKGRDMARKER))
 1432                         return (bp);
 1433         }
 1434         if ((bp = bo->bo_dirty.bv_root) != NULL) {
 1435                 bo->bo_dirty.bv_root = bp = buf_splay(lblkno, 0, bp);
 1436                 if (bp->b_lblkno == lblkno && !(bp->b_xflags & BX_BKGRDMARKER))
 1437                         return (bp);
 1438         }
 1439         return (NULL);
 1440 }
 1441 
 1442 /*
 1443  * Associate a buffer with a vnode.
 1444  */
 1445 void
 1446 bgetvp(struct vnode *vp, struct buf *bp)
 1447 {
 1448 
 1449         VNASSERT(bp->b_vp == NULL, bp->b_vp, ("bgetvp: not free"));
 1450 
 1451         CTR3(KTR_BUF, "bgetvp(%p) vp %p flags %X", bp, vp, bp->b_flags);
 1452         VNASSERT((bp->b_xflags & (BX_VNDIRTY|BX_VNCLEAN)) == 0, vp,
 1453             ("bgetvp: bp already attached! %p", bp));
 1454 
 1455         ASSERT_VI_LOCKED(vp, "bgetvp");
 1456         vholdl(vp);
 1457         bp->b_vp = vp;
 1458         bp->b_bufobj = &vp->v_bufobj;
 1459         /*
 1460          * Insert onto list for new vnode.
 1461          */
 1462         buf_vlist_add(bp, &vp->v_bufobj, BX_VNCLEAN);
 1463 }
 1464 
 1465 /*
 1466  * Disassociate a buffer from a vnode.
 1467  */
 1468 void
 1469 brelvp(struct buf *bp)
 1470 {
 1471         struct bufobj *bo;
 1472         struct vnode *vp;
 1473 
 1474         CTR3(KTR_BUF, "brelvp(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
 1475         KASSERT(bp->b_vp != NULL, ("brelvp: NULL"));
 1476 
 1477         /*
 1478          * Delete from old vnode list, if on one.
 1479          */
 1480         vp = bp->b_vp;          /* XXX */
 1481         bo = bp->b_bufobj;
 1482         BO_LOCK(bo);
 1483         if (bp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN))
 1484                 buf_vlist_remove(bp);
 1485         else
 1486                 panic("brelvp: Buffer %p not on queue.", bp);
 1487         if ((bo->bo_flag & BO_ONWORKLST) && bo->bo_dirty.bv_cnt == 0) {
 1488                 bo->bo_flag &= ~BO_ONWORKLST;
 1489                 mtx_lock(&sync_mtx);
 1490                 LIST_REMOVE(bo, bo_synclist);
 1491                 syncer_worklist_len--;
 1492                 mtx_unlock(&sync_mtx);
 1493         }
 1494         bp->b_vp = NULL;
 1495         bp->b_bufobj = NULL;
 1496         vdropl(vp);
 1497 }
 1498 
 1499 /*
 1500  * Add an item to the syncer work queue.
 1501  */
 1502 static void
 1503 vn_syncer_add_to_worklist(struct bufobj *bo, int delay)
 1504 {
 1505         int slot;
 1506 
 1507         ASSERT_BO_LOCKED(bo);
 1508 
 1509         mtx_lock(&sync_mtx);
 1510         if (bo->bo_flag & BO_ONWORKLST)
 1511                 LIST_REMOVE(bo, bo_synclist);
 1512         else {
 1513                 bo->bo_flag |= BO_ONWORKLST;
 1514                 syncer_worklist_len++;
 1515         }
 1516 
 1517         if (delay > syncer_maxdelay - 2)
 1518                 delay = syncer_maxdelay - 2;
 1519         slot = (syncer_delayno + delay) & syncer_mask;
 1520 
 1521         LIST_INSERT_HEAD(&syncer_workitem_pending[slot], bo, bo_synclist);
 1522         mtx_unlock(&sync_mtx);
 1523 }
 1524 
 1525 static int
 1526 sysctl_vfs_worklist_len(SYSCTL_HANDLER_ARGS)
 1527 {
 1528         int error, len;
 1529 
 1530         mtx_lock(&sync_mtx);
 1531         len = syncer_worklist_len - sync_vnode_count;
 1532         mtx_unlock(&sync_mtx);
 1533         error = SYSCTL_OUT(req, &len, sizeof(len));
 1534         return (error);
 1535 }
 1536 
 1537 SYSCTL_PROC(_vfs, OID_AUTO, worklist_len, CTLTYPE_INT | CTLFLAG_RD, NULL, 0,
 1538     sysctl_vfs_worklist_len, "I", "Syncer thread worklist length");
 1539 
 1540 static struct proc *updateproc;
 1541 static void sched_sync(void);
 1542 static struct kproc_desc up_kp = {
 1543         "syncer",
 1544         sched_sync,
 1545         &updateproc
 1546 };
 1547 SYSINIT(syncer, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &up_kp)
 1548 
 1549 static int
 1550 sync_vnode(struct bufobj *bo, struct thread *td)
 1551 {
 1552         struct vnode *vp;
 1553         struct mount *mp;
 1554 
 1555         vp = bo->__bo_vnode;    /* XXX */
 1556         if (VOP_ISLOCKED(vp, NULL) != 0)
 1557                 return (1);
 1558         if (VI_TRYLOCK(vp) == 0)
 1559                 return (1);
 1560         /*
 1561          * We use vhold in case the vnode does not
 1562          * successfully sync.  vhold prevents the vnode from
 1563          * going away when we unlock the sync_mtx so that
 1564          * we can acquire the vnode interlock.
 1565          */
 1566         vholdl(vp);
 1567         mtx_unlock(&sync_mtx);
 1568         VI_UNLOCK(vp);
 1569         if (vn_start_write(vp, &mp, V_NOWAIT) != 0) {
 1570                 vdrop(vp);
 1571                 mtx_lock(&sync_mtx);
 1572                 return (1);
 1573         }
 1574         vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
 1575         (void) VOP_FSYNC(vp, MNT_LAZY, td);
 1576         VOP_UNLOCK(vp, 0, td);
 1577         vn_finished_write(mp);
 1578         VI_LOCK(vp);
 1579         if ((bo->bo_flag & BO_ONWORKLST) != 0) {
 1580                 /*
 1581                  * Put us back on the worklist.  The worklist
 1582                  * routine will remove us from our current
 1583                  * position and then add us back in at a later
 1584                  * position.
 1585                  */
 1586                 vn_syncer_add_to_worklist(bo, syncdelay);
 1587         }
 1588         vdropl(vp);
 1589         mtx_lock(&sync_mtx);
 1590         return (0);
 1591 }
 1592 
 1593 /*
 1594  * System filesystem synchronizer daemon.
 1595  */
 1596 static void
 1597 sched_sync(void)
 1598 {
 1599         struct synclist *next;
 1600         struct synclist *slp;
 1601         struct bufobj *bo;
 1602         long starttime;
 1603         struct thread *td = FIRST_THREAD_IN_PROC(updateproc);
 1604         static int dummychan;
 1605         int last_work_seen;
 1606         int net_worklist_len;
 1607         int syncer_final_iter;
 1608         int first_printf;
 1609         int error;
 1610 
 1611         mtx_lock(&Giant);
 1612         last_work_seen = 0;
 1613         syncer_final_iter = 0;
 1614         first_printf = 1;
 1615         syncer_state = SYNCER_RUNNING;
 1616         starttime = time_second;
 1617         td->td_pflags |= TDP_NORUNNINGBUF;
 1618 
 1619         EVENTHANDLER_REGISTER(shutdown_pre_sync, syncer_shutdown, td->td_proc,
 1620             SHUTDOWN_PRI_LAST);
 1621 
 1622         for (;;) {
 1623                 mtx_lock(&sync_mtx);
 1624                 if (syncer_state == SYNCER_FINAL_DELAY &&
 1625                     syncer_final_iter == 0) {
 1626                         mtx_unlock(&sync_mtx);
 1627                         kthread_suspend_check(td->td_proc);
 1628                         mtx_lock(&sync_mtx);
 1629                 }
 1630                 net_worklist_len = syncer_worklist_len - sync_vnode_count;
 1631                 if (syncer_state != SYNCER_RUNNING &&
 1632                     starttime != time_second) {
 1633                         if (first_printf) {
 1634                                 printf("\nSyncing disks, vnodes remaining...");
 1635                                 first_printf = 0;
 1636                         }
 1637                         printf("%d ", net_worklist_len);
 1638                 }
 1639                 starttime = time_second;
 1640 
 1641                 /*
 1642                  * Push files whose dirty time has expired.  Be careful
 1643                  * of interrupt race on slp queue.
 1644                  *
 1645                  * Skip over empty worklist slots when shutting down.
 1646                  */
 1647                 do {
 1648                         slp = &syncer_workitem_pending[syncer_delayno];
 1649                         syncer_delayno += 1;
 1650                         if (syncer_delayno == syncer_maxdelay)
 1651                                 syncer_delayno = 0;
 1652                         next = &syncer_workitem_pending[syncer_delayno];
 1653                         /*
 1654                          * If the worklist has wrapped since the
 1655                          * it was emptied of all but syncer vnodes, 
 1656                          * switch to the FINAL_DELAY state and run
 1657                          * for one more second.
 1658                          */
 1659                         if (syncer_state == SYNCER_SHUTTING_DOWN &&
 1660                             net_worklist_len == 0 &&
 1661                             last_work_seen == syncer_delayno) {
 1662                                 syncer_state = SYNCER_FINAL_DELAY;
 1663                                 syncer_final_iter = SYNCER_SHUTDOWN_SPEEDUP;
 1664                         }
 1665                 } while (syncer_state != SYNCER_RUNNING && LIST_EMPTY(slp) &&
 1666                     syncer_worklist_len > 0);
 1667 
 1668                 /*
 1669                  * Keep track of the last time there was anything
 1670                  * on the worklist other than syncer vnodes.
 1671                  * Return to the SHUTTING_DOWN state if any
 1672                  * new work appears.
 1673                  */
 1674                 if (net_worklist_len > 0 || syncer_state == SYNCER_RUNNING)
 1675                         last_work_seen = syncer_delayno;
 1676                 if (net_worklist_len > 0 && syncer_state == SYNCER_FINAL_DELAY)
 1677                         syncer_state = SYNCER_SHUTTING_DOWN;
 1678                 while ((bo = LIST_FIRST(slp)) != NULL) {
 1679                         error = sync_vnode(bo, td);
 1680                         if (error == 1) {
 1681                                 LIST_REMOVE(bo, bo_synclist);
 1682                                 LIST_INSERT_HEAD(next, bo, bo_synclist);
 1683                                 continue;
 1684                         }
 1685                 }
 1686                 if (syncer_state == SYNCER_FINAL_DELAY && syncer_final_iter > 0)
 1687                         syncer_final_iter--;
 1688                 mtx_unlock(&sync_mtx);
 1689                 /*
 1690                  * The variable rushjob allows the kernel to speed up the
 1691                  * processing of the filesystem syncer process. A rushjob
 1692                  * value of N tells the filesystem syncer to process the next
 1693                  * N seconds worth of work on its queue ASAP. Currently rushjob
 1694                  * is used by the soft update code to speed up the filesystem
 1695                  * syncer process when the incore state is getting so far
 1696                  * ahead of the disk that the kernel memory pool is being
 1697                  * threatened with exhaustion.
 1698                  */
 1699                 mtx_lock(&sync_mtx);
 1700                 if (rushjob > 0) {
 1701                         rushjob -= 1;
 1702                         mtx_unlock(&sync_mtx);
 1703                         continue;
 1704                 }
 1705                 mtx_unlock(&sync_mtx);
 1706                 /*
 1707                  * Just sleep for a short period if time between
 1708                  * iterations when shutting down to allow some I/O
 1709                  * to happen.
 1710                  *
 1711                  * If it has taken us less than a second to process the
 1712                  * current work, then wait. Otherwise start right over
 1713                  * again. We can still lose time if any single round
 1714                  * takes more than two seconds, but it does not really
 1715                  * matter as we are just trying to generally pace the
 1716                  * filesystem activity.
 1717                  */
 1718                 if (syncer_state != SYNCER_RUNNING)
 1719                         tsleep(&dummychan, PPAUSE, "syncfnl",
 1720                             hz / SYNCER_SHUTDOWN_SPEEDUP);
 1721                 else if (time_second == starttime)
 1722                         tsleep(&lbolt, PPAUSE, "syncer", 0);
 1723         }
 1724 }
 1725 
 1726 /*
 1727  * Request the syncer daemon to speed up its work.
 1728  * We never push it to speed up more than half of its
 1729  * normal turn time, otherwise it could take over the cpu.
 1730  */
 1731 int
 1732 speedup_syncer()
 1733 {
 1734         struct thread *td;
 1735         int ret = 0;
 1736 
 1737         td = FIRST_THREAD_IN_PROC(updateproc);
 1738         sleepq_remove(td, &lbolt);
 1739         mtx_lock(&sync_mtx);
 1740         if (rushjob < syncdelay / 2) {
 1741                 rushjob += 1;
 1742                 stat_rush_requests += 1;
 1743                 ret = 1;
 1744         }
 1745         mtx_unlock(&sync_mtx);
 1746         return (ret);
 1747 }
 1748 
 1749 /*
 1750  * Tell the syncer to speed up its work and run though its work
 1751  * list several times, then tell it to shut down.
 1752  */
 1753 static void
 1754 syncer_shutdown(void *arg, int howto)
 1755 {
 1756         struct thread *td;
 1757 
 1758         if (howto & RB_NOSYNC)
 1759                 return;
 1760         td = FIRST_THREAD_IN_PROC(updateproc);
 1761         sleepq_remove(td, &lbolt);
 1762         mtx_lock(&sync_mtx);
 1763         syncer_state = SYNCER_SHUTTING_DOWN;
 1764         rushjob = 0;
 1765         mtx_unlock(&sync_mtx);
 1766         kproc_shutdown(arg, howto);
 1767 }
 1768 
 1769 /*
 1770  * Reassign a buffer from one vnode to another.
 1771  * Used to assign file specific control information
 1772  * (indirect blocks) to the vnode to which they belong.
 1773  */
 1774 void
 1775 reassignbuf(struct buf *bp)
 1776 {
 1777         struct vnode *vp;
 1778         struct bufobj *bo;
 1779         int delay;
 1780 #ifdef INVARIANTS
 1781         struct bufv *bv;
 1782 #endif
 1783 
 1784         vp = bp->b_vp;
 1785         bo = bp->b_bufobj;
 1786         ++reassignbufcalls;
 1787 
 1788         CTR3(KTR_BUF, "reassignbuf(%p) vp %p flags %X",
 1789             bp, bp->b_vp, bp->b_flags);
 1790         /*
 1791          * B_PAGING flagged buffers cannot be reassigned because their vp
 1792          * is not fully linked in.
 1793          */
 1794         if (bp->b_flags & B_PAGING)
 1795                 panic("cannot reassign paging buffer");
 1796 
 1797         /*
 1798          * Delete from old vnode list, if on one.
 1799          */
 1800         VI_LOCK(vp);
 1801         if (bp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN))
 1802                 buf_vlist_remove(bp);
 1803         else
 1804                 panic("reassignbuf: Buffer %p not on queue.", bp);
 1805         /*
 1806          * If dirty, put on list of dirty buffers; otherwise insert onto list
 1807          * of clean buffers.
 1808          */
 1809         if (bp->b_flags & B_DELWRI) {
 1810                 if ((bo->bo_flag & BO_ONWORKLST) == 0) {
 1811                         switch (vp->v_type) {
 1812                         case VDIR:
 1813                                 delay = dirdelay;
 1814                                 break;
 1815                         case VCHR:
 1816                                 delay = metadelay;
 1817                                 break;
 1818                         default:
 1819                                 delay = filedelay;
 1820                         }
 1821                         vn_syncer_add_to_worklist(bo, delay);
 1822                 }
 1823                 buf_vlist_add(bp, bo, BX_VNDIRTY);
 1824         } else {
 1825                 buf_vlist_add(bp, bo, BX_VNCLEAN);
 1826 
 1827                 if ((bo->bo_flag & BO_ONWORKLST) && bo->bo_dirty.bv_cnt == 0) {
 1828                         mtx_lock(&sync_mtx);
 1829                         LIST_REMOVE(bo, bo_synclist);
 1830                         syncer_worklist_len--;
 1831                         mtx_unlock(&sync_mtx);
 1832                         bo->bo_flag &= ~BO_ONWORKLST;
 1833                 }
 1834         }
 1835 #ifdef INVARIANTS
 1836         bv = &bo->bo_clean;
 1837         bp = TAILQ_FIRST(&bv->bv_hd);
 1838         KASSERT(bp == NULL || bp->b_bufobj == bo,
 1839             ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo));
 1840         bp = TAILQ_LAST(&bv->bv_hd, buflists);
 1841         KASSERT(bp == NULL || bp->b_bufobj == bo,
 1842             ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo));
 1843         bv = &bo->bo_dirty;
 1844         bp = TAILQ_FIRST(&bv->bv_hd);
 1845         KASSERT(bp == NULL || bp->b_bufobj == bo,
 1846             ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo));
 1847         bp = TAILQ_LAST(&bv->bv_hd, buflists);
 1848         KASSERT(bp == NULL || bp->b_bufobj == bo,
 1849             ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo));
 1850 #endif
 1851         VI_UNLOCK(vp);
 1852 }
 1853 
 1854 /*
 1855  * Increment the use and hold counts on the vnode, taking care to reference
 1856  * the driver's usecount if this is a chardev.  The vholdl() will remove
 1857  * the vnode from the free list if it is presently free.  Requires the
 1858  * vnode interlock and returns with it held.
 1859  */
 1860 static void
 1861 v_incr_usecount(struct vnode *vp)
 1862 {
 1863 
 1864         CTR3(KTR_VFS, "v_incr_usecount: vp %p holdcnt %d usecount %d\n",
 1865             vp, vp->v_holdcnt, vp->v_usecount);
 1866         vp->v_usecount++;
 1867         if (vp->v_type == VCHR && vp->v_rdev != NULL) {
 1868                 dev_lock();
 1869                 vp->v_rdev->si_usecount++;
 1870                 dev_unlock();
 1871         }
 1872         vholdl(vp);
 1873 }
 1874 
 1875 /*
 1876  * Turn a holdcnt into a use+holdcnt such that only one call to
 1877  * v_decr_usecount is needed.
 1878  */
 1879 static void
 1880 v_upgrade_usecount(struct vnode *vp)
 1881 {
 1882 
 1883         CTR3(KTR_VFS, "v_upgrade_usecount: vp %p holdcnt %d usecount %d\n",
 1884             vp, vp->v_holdcnt, vp->v_usecount);
 1885         vp->v_usecount++;
 1886         if (vp->v_type == VCHR && vp->v_rdev != NULL) {
 1887                 dev_lock();
 1888                 vp->v_rdev->si_usecount++;
 1889                 dev_unlock();
 1890         }
 1891 }
 1892 
 1893 /*
 1894  * Decrement the vnode use and hold count along with the driver's usecount
 1895  * if this is a chardev.  The vdropl() below releases the vnode interlock
 1896  * as it may free the vnode.
 1897  */
 1898 static void
 1899 v_decr_usecount(struct vnode *vp)
 1900 {
 1901 
 1902         CTR3(KTR_VFS, "v_decr_usecount: vp %p holdcnt %d usecount %d\n",
 1903             vp, vp->v_holdcnt, vp->v_usecount);
 1904         ASSERT_VI_LOCKED(vp, __FUNCTION__);
 1905         VNASSERT(vp->v_usecount > 0, vp,
 1906             ("v_decr_usecount: negative usecount"));
 1907         vp->v_usecount--;
 1908         if (vp->v_type == VCHR && vp->v_rdev != NULL) {
 1909                 dev_lock();
 1910                 vp->v_rdev->si_usecount--;
 1911                 dev_unlock();
 1912         }
 1913         vdropl(vp);
 1914 }
 1915 
 1916 /*
 1917  * Decrement only the use count and driver use count.  This is intended to
 1918  * be paired with a follow on vdropl() to release the remaining hold count.
 1919  * In this way we may vgone() a vnode with a 0 usecount without risk of
 1920  * having it end up on a free list because the hold count is kept above 0.
 1921  */
 1922 static void
 1923 v_decr_useonly(struct vnode *vp)
 1924 {
 1925 
 1926         CTR3(KTR_VFS, "v_decr_useonly: vp %p holdcnt %d usecount %d\n",
 1927             vp, vp->v_holdcnt, vp->v_usecount);
 1928         ASSERT_VI_LOCKED(vp, __FUNCTION__);
 1929         VNASSERT(vp->v_usecount > 0, vp,
 1930             ("v_decr_useonly: negative usecount"));
 1931         vp->v_usecount--;
 1932         if (vp->v_type == VCHR && vp->v_rdev != NULL) {
 1933                 dev_lock();
 1934                 vp->v_rdev->si_usecount--;
 1935                 dev_unlock();
 1936         }
 1937 }
 1938 
 1939 /*
 1940  * Grab a particular vnode from the free list, increment its
 1941  * reference count and lock it. The vnode lock bit is set if the
 1942  * vnode is being eliminated in vgone. The process is awakened
 1943  * when the transition is completed, and an error returned to
 1944  * indicate that the vnode is no longer usable (possibly having
 1945  * been changed to a new filesystem type).
 1946  */
 1947 int
 1948 vget(struct vnode *vp, int flags, struct thread *td)
 1949 {
 1950         int oweinact;
 1951         int oldflags;
 1952         int error;
 1953 
 1954         error = 0;
 1955         oldflags = flags;
 1956         oweinact = 0;
 1957         VFS_ASSERT_GIANT(vp->v_mount);
 1958         if ((flags & LK_INTERLOCK) == 0)
 1959                 VI_LOCK(vp);
 1960         /*
 1961          * If the inactive call was deferred because vput() was called
 1962          * with a shared lock, we have to do it here before another thread
 1963          * gets a reference to data that should be dead.
 1964          */
 1965         if (vp->v_iflag & VI_OWEINACT) {
 1966                 if (flags & LK_NOWAIT) {
 1967                         VI_UNLOCK(vp);
 1968                         return (EBUSY);
 1969                 }
 1970                 flags &= ~LK_TYPE_MASK;
 1971                 flags |= LK_EXCLUSIVE;
 1972                 oweinact = 1;
 1973         }
 1974         vholdl(vp);
 1975         if ((error = vn_lock(vp, flags | LK_INTERLOCK, td)) != 0) {
 1976                 vdrop(vp);
 1977                 return (error);
 1978         }
 1979         VI_LOCK(vp);
 1980         /* Upgrade our holdcnt to a usecount. */
 1981         v_upgrade_usecount(vp);
 1982         if (vp->v_iflag & VI_DOOMED && (flags & LK_RETRY) == 0)
 1983                 panic("vget: vn_lock failed to return ENOENT\n");
 1984         if (oweinact) {
 1985                 if (vp->v_iflag & VI_OWEINACT)
 1986                         vinactive(vp, td);
 1987                 VI_UNLOCK(vp);
 1988                 if ((oldflags & LK_TYPE_MASK) == 0)
 1989                         VOP_UNLOCK(vp, 0, td);
 1990         } else
 1991                 VI_UNLOCK(vp);
 1992         return (0);
 1993 }
 1994 
 1995 /*
 1996  * Increase the reference count of a vnode.
 1997  */
 1998 void
 1999 vref(struct vnode *vp)
 2000 {
 2001 
 2002         VI_LOCK(vp);
 2003         v_incr_usecount(vp);
 2004         VI_UNLOCK(vp);
 2005 }
 2006 
 2007 /*
 2008  * Return reference count of a vnode.
 2009  *
 2010  * The results of this call are only guaranteed when some mechanism other
 2011  * than the VI lock is used to stop other processes from gaining references
 2012  * to the vnode.  This may be the case if the caller holds the only reference.
 2013  * This is also useful when stale data is acceptable as race conditions may
 2014  * be accounted for by some other means.
 2015  */
 2016 int
 2017 vrefcnt(struct vnode *vp)
 2018 {
 2019         int usecnt;
 2020 
 2021         VI_LOCK(vp);
 2022         usecnt = vp->v_usecount;
 2023         VI_UNLOCK(vp);
 2024 
 2025         return (usecnt);
 2026 }
 2027 
 2028 
 2029 /*
 2030  * Vnode put/release.
 2031  * If count drops to zero, call inactive routine and return to freelist.
 2032  */
 2033 void
 2034 vrele(vp)
 2035         struct vnode *vp;
 2036 {
 2037         struct thread *td = curthread;  /* XXX */
 2038 
 2039         KASSERT(vp != NULL, ("vrele: null vp"));
 2040         VFS_ASSERT_GIANT(vp->v_mount);
 2041 
 2042         VI_LOCK(vp);
 2043 
 2044         /* Skip this v_writecount check if we're going to panic below. */
 2045         VNASSERT(vp->v_writecount < vp->v_usecount || vp->v_usecount < 1, vp,
 2046             ("vrele: missed vn_close"));
 2047 
 2048         if (vp->v_usecount > 1 || ((vp->v_iflag & VI_DOINGINACT) &&
 2049             vp->v_usecount == 1)) {
 2050                 v_decr_usecount(vp);
 2051                 return;
 2052         }
 2053         if (vp->v_usecount != 1) {
 2054 #ifdef DIAGNOSTIC
 2055                 vprint("vrele: negative ref count", vp);
 2056 #endif
 2057                 VI_UNLOCK(vp);
 2058                 panic("vrele: negative ref cnt");
 2059         }
 2060         /*
 2061          * We want to hold the vnode until the inactive finishes to
 2062          * prevent vgone() races.  We drop the use count here and the
 2063          * hold count below when we're done.
 2064          */
 2065         v_decr_useonly(vp);
 2066         /*
 2067          * We must call VOP_INACTIVE with the node locked. Mark
 2068          * as VI_DOINGINACT to avoid recursion.
 2069          */
 2070         vp->v_iflag |= VI_OWEINACT;
 2071         if (vn_lock(vp, LK_EXCLUSIVE | LK_INTERLOCK, td) == 0) {
 2072                 VI_LOCK(vp);
 2073                 if (vp->v_usecount > 0)
 2074                         vp->v_iflag &= ~VI_OWEINACT;
 2075                 if (vp->v_iflag & VI_OWEINACT)
 2076                         vinactive(vp, td);
 2077                 VOP_UNLOCK(vp, 0, td);
 2078         } else {
 2079                 VI_LOCK(vp);
 2080                 if (vp->v_usecount > 0)
 2081                         vp->v_iflag &= ~VI_OWEINACT;
 2082         }
 2083         vdropl(vp);
 2084 }
 2085 
 2086 /*
 2087  * Release an already locked vnode.  This give the same effects as
 2088  * unlock+vrele(), but takes less time and avoids releasing and
 2089  * re-aquiring the lock (as vrele() aquires the lock internally.)
 2090  */
 2091 void
 2092 vput(vp)
 2093         struct vnode *vp;
 2094 {
 2095         struct thread *td = curthread;  /* XXX */
 2096         int error;
 2097 
 2098         KASSERT(vp != NULL, ("vput: null vp"));
 2099         ASSERT_VOP_LOCKED(vp, "vput");
 2100         VFS_ASSERT_GIANT(vp->v_mount);
 2101         VI_LOCK(vp);
 2102         /* Skip this v_writecount check if we're going to panic below. */
 2103         VNASSERT(vp->v_writecount < vp->v_usecount || vp->v_usecount < 1, vp,
 2104             ("vput: missed vn_close"));
 2105         error = 0;
 2106 
 2107         if (vp->v_usecount > 1 || ((vp->v_iflag & VI_DOINGINACT) &&
 2108             vp->v_usecount == 1)) {
 2109                 VOP_UNLOCK(vp, 0, td);
 2110                 v_decr_usecount(vp);
 2111                 return;
 2112         }
 2113 
 2114         if (vp->v_usecount != 1) {
 2115 #ifdef DIAGNOSTIC
 2116                 vprint("vput: negative ref count", vp);
 2117 #endif
 2118                 panic("vput: negative ref cnt");
 2119         }
 2120         /*
 2121          * We want to hold the vnode until the inactive finishes to
 2122          * prevent vgone() races.  We drop the use count here and the
 2123          * hold count below when we're done.
 2124          */
 2125         v_decr_useonly(vp);
 2126         vp->v_iflag |= VI_OWEINACT;
 2127         if (VOP_ISLOCKED(vp, NULL) != LK_EXCLUSIVE) {
 2128                 error = VOP_LOCK(vp, LK_EXCLUPGRADE|LK_INTERLOCK|LK_NOWAIT, td);
 2129                 VI_LOCK(vp);
 2130                 if (error) {
 2131                         if (vp->v_usecount > 0)
 2132                                 vp->v_iflag &= ~VI_OWEINACT;
 2133                         goto done;
 2134                 }
 2135         }
 2136         if (vp->v_usecount > 0)
 2137                 vp->v_iflag &= ~VI_OWEINACT;
 2138         if (vp->v_iflag & VI_OWEINACT)
 2139                 vinactive(vp, td);
 2140         VOP_UNLOCK(vp, 0, td);
 2141 done:
 2142         vdropl(vp);
 2143 }
 2144 
 2145 /*
 2146  * Somebody doesn't want the vnode recycled.
 2147  */
 2148 void
 2149 vhold(struct vnode *vp)
 2150 {
 2151 
 2152         VI_LOCK(vp);
 2153         vholdl(vp);
 2154         VI_UNLOCK(vp);
 2155 }
 2156 
 2157 void
 2158 vholdl(struct vnode *vp)
 2159 {
 2160 
 2161         vp->v_holdcnt++;
 2162         if (VSHOULDBUSY(vp))
 2163                 vbusy(vp);
 2164 }
 2165 
 2166 /*
 2167  * Note that there is one less who cares about this vnode.  vdrop() is the
 2168  * opposite of vhold().
 2169  */
 2170 void
 2171 vdrop(struct vnode *vp)
 2172 {
 2173 
 2174         VI_LOCK(vp);
 2175         vdropl(vp);
 2176 }
 2177 
 2178 /*
 2179  * Drop the hold count of the vnode.  If this is the last reference to
 2180  * the vnode we will free it if it has been vgone'd otherwise it is
 2181  * placed on the free list.
 2182  */
 2183 static void
 2184 vdropl(struct vnode *vp)
 2185 {
 2186 
 2187         if (vp->v_holdcnt <= 0)
 2188                 panic("vdrop: holdcnt %d", vp->v_holdcnt);
 2189         vp->v_holdcnt--;
 2190         if (vp->v_holdcnt == 0) {
 2191                 if (vp->v_iflag & VI_DOOMED) {
 2192                         vdestroy(vp);
 2193                         return;
 2194                 } else
 2195                         vfree(vp);
 2196         }
 2197         VI_UNLOCK(vp);
 2198 }
 2199 
 2200 /*
 2201  * Call VOP_INACTIVE on the vnode and manage the DOINGINACT and OWEINACT
 2202  * flags.  DOINGINACT prevents us from recursing in calls to vinactive.
 2203  * OWEINACT tracks whether a vnode missed a call to inactive due to a
 2204  * failed lock upgrade.
 2205  */
 2206 static void
 2207 vinactive(struct vnode *vp, struct thread *td)
 2208 {
 2209 
 2210         ASSERT_VOP_LOCKED(vp, "vinactive");
 2211         ASSERT_VI_LOCKED(vp, "vinactive");
 2212         VNASSERT((vp->v_iflag & VI_DOINGINACT) == 0, vp,
 2213             ("vinactive: recursed on VI_DOINGINACT"));
 2214         vp->v_iflag |= VI_DOINGINACT;
 2215         vp->v_iflag &= ~VI_OWEINACT;
 2216         VI_UNLOCK(vp);
 2217         VOP_INACTIVE(vp, td);
 2218         VI_LOCK(vp);
 2219         VNASSERT(vp->v_iflag & VI_DOINGINACT, vp,
 2220             ("vinactive: lost VI_DOINGINACT"));
 2221         vp->v_iflag &= ~VI_DOINGINACT;
 2222 }
 2223 
 2224 /*
 2225  * Remove any vnodes in the vnode table belonging to mount point mp.
 2226  *
 2227  * If FORCECLOSE is not specified, there should not be any active ones,
 2228  * return error if any are found (nb: this is a user error, not a
 2229  * system error). If FORCECLOSE is specified, detach any active vnodes
 2230  * that are found.
 2231  *
 2232  * If WRITECLOSE is set, only flush out regular file vnodes open for
 2233  * writing.
 2234  *
 2235  * SKIPSYSTEM causes any vnodes marked VV_SYSTEM to be skipped.
 2236  *
 2237  * `rootrefs' specifies the base reference count for the root vnode
 2238  * of this filesystem. The root vnode is considered busy if its
 2239  * v_usecount exceeds this value. On a successful return, vflush(, td)
 2240  * will call vrele() on the root vnode exactly rootrefs times.
 2241  * If the SKIPSYSTEM or WRITECLOSE flags are specified, rootrefs must
 2242  * be zero.
 2243  */
 2244 #ifdef DIAGNOSTIC
 2245 static int busyprt = 0;         /* print out busy vnodes */
 2246 SYSCTL_INT(_debug, OID_AUTO, busyprt, CTLFLAG_RW, &busyprt, 0, "");
 2247 #endif
 2248 
 2249 int
 2250 vflush(mp, rootrefs, flags, td)
 2251         struct mount *mp;
 2252         int rootrefs;
 2253         int flags;
 2254         struct thread *td;
 2255 {
 2256         struct vnode *vp, *mvp, *rootvp = NULL;
 2257         struct vattr vattr;
 2258         int busy = 0, error;
 2259 
 2260         CTR1(KTR_VFS, "vflush: mp %p", mp);
 2261         if (rootrefs > 0) {
 2262                 KASSERT((flags & (SKIPSYSTEM | WRITECLOSE)) == 0,
 2263                     ("vflush: bad args"));
 2264                 /*
 2265                  * Get the filesystem root vnode. We can vput() it
 2266                  * immediately, since with rootrefs > 0, it won't go away.
 2267                  */
 2268                 if ((error = VFS_ROOT(mp, LK_EXCLUSIVE, &rootvp, td)) != 0)
 2269                         return (error);
 2270                 vput(rootvp);
 2271 
 2272         }
 2273         MNT_ILOCK(mp);
 2274 loop:
 2275         MNT_VNODE_FOREACH(vp, mp, mvp) {
 2276 
 2277                 VI_LOCK(vp);
 2278                 vholdl(vp);
 2279                 MNT_IUNLOCK(mp);
 2280                 error = vn_lock(vp, LK_INTERLOCK | LK_EXCLUSIVE, td);
 2281                 if (error) {
 2282                         vdrop(vp);
 2283                         MNT_ILOCK(mp);
 2284                         MNT_VNODE_FOREACH_ABORT_ILOCKED(mp, mvp);
 2285                         goto loop;
 2286                 }
 2287                 /*
 2288                  * Skip over a vnodes marked VV_SYSTEM.
 2289                  */
 2290                 if ((flags & SKIPSYSTEM) && (vp->v_vflag & VV_SYSTEM)) {
 2291                         VOP_UNLOCK(vp, 0, td);
 2292                         vdrop(vp);
 2293                         MNT_ILOCK(mp);
 2294                         continue;
 2295                 }
 2296                 /*
 2297                  * If WRITECLOSE is set, flush out unlinked but still open
 2298                  * files (even if open only for reading) and regular file
 2299                  * vnodes open for writing.
 2300                  */
 2301                 if (flags & WRITECLOSE) {
 2302                         error = VOP_GETATTR(vp, &vattr, td->td_ucred, td);
 2303                         VI_LOCK(vp);
 2304 
 2305                         if ((vp->v_type == VNON ||
 2306                             (error == 0 && vattr.va_nlink > 0)) &&
 2307                             (vp->v_writecount == 0 || vp->v_type != VREG)) {
 2308                                 VOP_UNLOCK(vp, 0, td);
 2309                                 vdropl(vp);
 2310                                 MNT_ILOCK(mp);
 2311                                 continue;
 2312                         }
 2313                 } else
 2314                         VI_LOCK(vp);
 2315                 /*
 2316                  * With v_usecount == 0, all we need to do is clear out the
 2317                  * vnode data structures and we are done.
 2318                  *
 2319                  * If FORCECLOSE is set, forcibly close the vnode.
 2320                  */
 2321                 if (vp->v_usecount == 0 || (flags & FORCECLOSE)) {
 2322                         VNASSERT(vp->v_usecount == 0 ||
 2323                             (vp->v_type != VCHR && vp->v_type != VBLK), vp,
 2324                             ("device VNODE %p is FORCECLOSED", vp));
 2325                         vgonel(vp);
 2326                 } else {
 2327                         busy++;
 2328 #ifdef DIAGNOSTIC
 2329                         if (busyprt)
 2330                                 vprint("vflush: busy vnode", vp);
 2331 #endif
 2332                 }
 2333                 VOP_UNLOCK(vp, 0, td);
 2334                 vdropl(vp);
 2335                 MNT_ILOCK(mp);
 2336         }
 2337         MNT_IUNLOCK(mp);
 2338         if (rootrefs > 0 && (flags & FORCECLOSE) == 0) {
 2339                 /*
 2340                  * If just the root vnode is busy, and if its refcount
 2341                  * is equal to `rootrefs', then go ahead and kill it.
 2342                  */
 2343                 VI_LOCK(rootvp);
 2344                 KASSERT(busy > 0, ("vflush: not busy"));
 2345                 VNASSERT(rootvp->v_usecount >= rootrefs, rootvp,
 2346                     ("vflush: usecount %d < rootrefs %d",
 2347                      rootvp->v_usecount, rootrefs));
 2348                 if (busy == 1 && rootvp->v_usecount == rootrefs) {
 2349                         VOP_LOCK(rootvp, LK_EXCLUSIVE|LK_INTERLOCK, td);
 2350                         vgone(rootvp);
 2351                         VOP_UNLOCK(rootvp, 0, td);
 2352                         busy = 0;
 2353                 } else
 2354                         VI_UNLOCK(rootvp);
 2355         }
 2356         if (busy)
 2357                 return (EBUSY);
 2358         for (; rootrefs > 0; rootrefs--)
 2359                 vrele(rootvp);
 2360         return (0);
 2361 }
 2362 
 2363 /*
 2364  * Recycle an unused vnode to the front of the free list.
 2365  */
 2366 int
 2367 vrecycle(struct vnode *vp, struct thread *td)
 2368 {
 2369         int recycled;
 2370 
 2371         ASSERT_VOP_LOCKED(vp, "vrecycle");
 2372         recycled = 0;
 2373         VI_LOCK(vp);
 2374         if (vp->v_usecount == 0) {
 2375                 recycled = 1;
 2376                 vgonel(vp);
 2377         }
 2378         VI_UNLOCK(vp);
 2379         return (recycled);
 2380 }
 2381 
 2382 /*
 2383  * Eliminate all activity associated with a vnode
 2384  * in preparation for reuse.
 2385  */
 2386 void
 2387 vgone(struct vnode *vp)
 2388 {
 2389         VI_LOCK(vp);
 2390         vgonel(vp);
 2391         VI_UNLOCK(vp);
 2392 }
 2393 
 2394 /*
 2395  * vgone, with the vp interlock held.
 2396  */
 2397 void
 2398 vgonel(struct vnode *vp)
 2399 {
 2400         struct thread *td;
 2401         int oweinact;
 2402         int active;
 2403         struct mount *mp;
 2404 
 2405         CTR1(KTR_VFS, "vgonel: vp %p", vp);
 2406         ASSERT_VOP_LOCKED(vp, "vgonel");
 2407         ASSERT_VI_LOCKED(vp, "vgonel");
 2408 #if 0
 2409         /* XXX Need to fix ttyvp before I enable this. */
 2410         VNASSERT(vp->v_holdcnt, vp,
 2411             ("vgonel: vp %p has no reference.", vp));
 2412 #endif
 2413         td = curthread;
 2414 
 2415         /*
 2416          * Don't vgonel if we're already doomed.
 2417          */
 2418         if (vp->v_iflag & VI_DOOMED)
 2419                 return;
 2420         vp->v_iflag |= VI_DOOMED;
 2421         /*
 2422          * Check to see if the vnode is in use.  If so, we have to call
 2423          * VOP_CLOSE() and VOP_INACTIVE().
 2424          */
 2425         active = vp->v_usecount;
 2426         oweinact = (vp->v_iflag & VI_OWEINACT);
 2427         VI_UNLOCK(vp);
 2428         /*
 2429          * Clean out any buffers associated with the vnode.
 2430          * If the flush fails, just toss the buffers.
 2431          */
 2432         mp = NULL;
 2433         if (!TAILQ_EMPTY(&vp->v_bufobj.bo_dirty.bv_hd))
 2434                 (void) vn_start_secondary_write(vp, &mp, V_WAIT);
 2435         if (vinvalbuf(vp, V_SAVE, td, 0, 0) != 0)
 2436                 vinvalbuf(vp, 0, td, 0, 0);
 2437 
 2438         /*
 2439          * If purging an active vnode, it must be closed and
 2440          * deactivated before being reclaimed.
 2441          */
 2442         if (active)
 2443                 VOP_CLOSE(vp, FNONBLOCK, NOCRED, td);
 2444         if (oweinact || active) {
 2445                 VI_LOCK(vp);
 2446                 if ((vp->v_iflag & VI_DOINGINACT) == 0)
 2447                         vinactive(vp, td);
 2448                 VI_UNLOCK(vp);
 2449         }
 2450         /*
 2451          * Reclaim the vnode.
 2452          */
 2453         if (VOP_RECLAIM(vp, td))
 2454                 panic("vgone: cannot reclaim");
 2455         if (mp != NULL)
 2456                 vn_finished_secondary_write(mp);
 2457         VNASSERT(vp->v_object == NULL, vp,
 2458             ("vop_reclaim left v_object vp=%p, tag=%s", vp, vp->v_tag));
 2459         /*
 2460          * Delete from old mount point vnode list.
 2461          */
 2462         delmntque(vp);
 2463         cache_purge(vp);
 2464         /*
 2465          * Done with purge, reset to the standard lock and invalidate
 2466          * the vnode.
 2467          */
 2468         VI_LOCK(vp);
 2469         vp->v_vnlock = &vp->v_lock;
 2470         vp->v_op = &dead_vnodeops;
 2471         vp->v_tag = "none";
 2472         vp->v_type = VBAD;
 2473 }
 2474 
 2475 /*
 2476  * Calculate the total number of references to a special device.
 2477  */
 2478 int
 2479 vcount(vp)
 2480         struct vnode *vp;
 2481 {
 2482         int count;
 2483 
 2484         dev_lock();
 2485         count = vp->v_rdev->si_usecount;
 2486         dev_unlock();
 2487         return (count);
 2488 }
 2489 
 2490 /*
 2491  * Same as above, but using the struct cdev *as argument
 2492  */
 2493 int
 2494 count_dev(dev)
 2495         struct cdev *dev;
 2496 {
 2497         int count;
 2498 
 2499         dev_lock();
 2500         count = dev->si_usecount;
 2501         dev_unlock();
 2502         return(count);
 2503 }
 2504 
 2505 /*
 2506  * Print out a description of a vnode.
 2507  */
 2508 static char *typename[] =
 2509 {"VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD",
 2510  "VMARKER"};
 2511 
 2512 void
 2513 vn_printf(struct vnode *vp, const char *fmt, ...)
 2514 {
 2515         va_list ap;
 2516         char buf[96];
 2517 
 2518         va_start(ap, fmt);
 2519         vprintf(fmt, ap);
 2520         va_end(ap);
 2521         printf("%p: ", (void *)vp);
 2522         printf("tag %s, type %s\n", vp->v_tag, typename[vp->v_type]);
 2523         printf("    usecount %d, writecount %d, refcount %d mountedhere %p\n",
 2524             vp->v_usecount, vp->v_writecount, vp->v_holdcnt, vp->v_mountedhere);
 2525         buf[0] = '\0';
 2526         buf[1] = '\0';
 2527         if (vp->v_vflag & VV_ROOT)
 2528                 strcat(buf, "|VV_ROOT");
 2529         if (vp->v_vflag & VV_TEXT)
 2530                 strcat(buf, "|VV_TEXT");
 2531         if (vp->v_vflag & VV_SYSTEM)
 2532                 strcat(buf, "|VV_SYSTEM");
 2533         if (vp->v_iflag & VI_DOOMED)
 2534                 strcat(buf, "|VI_DOOMED");
 2535         if (vp->v_iflag & VI_FREE)
 2536                 strcat(buf, "|VI_FREE");
 2537         printf("    flags (%s)\n", buf + 1);
 2538         if (mtx_owned(VI_MTX(vp)))
 2539                 printf(" VI_LOCKed");
 2540         if (vp->v_object != NULL)
 2541                 printf("    v_object %p ref %d pages %d\n",
 2542                     vp->v_object, vp->v_object->ref_count,
 2543                     vp->v_object->resident_page_count);
 2544         printf("    ");
 2545         lockmgr_printinfo(vp->v_vnlock);
 2546         printf("\n");
 2547         if (vp->v_data != NULL)
 2548                 VOP_PRINT(vp);
 2549 }
 2550 
 2551 #ifdef DDB
 2552 #include <ddb/ddb.h>
 2553 /*
 2554  * List all of the locked vnodes in the system.
 2555  * Called when debugging the kernel.
 2556  */
 2557 DB_SHOW_COMMAND(lockedvnods, lockedvnodes)
 2558 {
 2559         struct mount *mp, *nmp;
 2560         struct vnode *vp;
 2561 
 2562         /*
 2563          * Note: because this is DDB, we can't obey the locking semantics
 2564          * for these structures, which means we could catch an inconsistent
 2565          * state and dereference a nasty pointer.  Not much to be done
 2566          * about that.
 2567          */
 2568         printf("Locked vnodes\n");
 2569         for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) {
 2570                 nmp = TAILQ_NEXT(mp, mnt_list);
 2571                 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) {
 2572                         if (vp->v_type != VMARKER && VOP_ISLOCKED(vp, NULL))
 2573                                 vprint("", vp);
 2574                 }
 2575                 nmp = TAILQ_NEXT(mp, mnt_list);
 2576         }
 2577 }
 2578 #endif
 2579 
 2580 /*
 2581  * Fill in a struct xvfsconf based on a struct vfsconf.
 2582  */
 2583 static void
 2584 vfsconf2x(struct vfsconf *vfsp, struct xvfsconf *xvfsp)
 2585 {
 2586 
 2587         strcpy(xvfsp->vfc_name, vfsp->vfc_name);
 2588         xvfsp->vfc_typenum = vfsp->vfc_typenum;
 2589         xvfsp->vfc_refcount = vfsp->vfc_refcount;
 2590         xvfsp->vfc_flags = vfsp->vfc_flags;
 2591         /*
 2592          * These are unused in userland, we keep them
 2593          * to not break binary compatibility.
 2594          */
 2595         xvfsp->vfc_vfsops = NULL;
 2596         xvfsp->vfc_next = NULL;
 2597 }
 2598 
 2599 /*
 2600  * Top level filesystem related information gathering.
 2601  */
 2602 static int
 2603 sysctl_vfs_conflist(SYSCTL_HANDLER_ARGS)
 2604 {
 2605         struct vfsconf *vfsp;
 2606         struct xvfsconf xvfsp;
 2607         int error;
 2608 
 2609         error = 0;
 2610         TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) {
 2611                 bzero(&xvfsp, sizeof(xvfsp));
 2612                 vfsconf2x(vfsp, &xvfsp);
 2613                 error = SYSCTL_OUT(req, &xvfsp, sizeof xvfsp);
 2614                 if (error)
 2615                         break;
 2616         }
 2617         return (error);
 2618 }
 2619 
 2620 SYSCTL_PROC(_vfs, OID_AUTO, conflist, CTLFLAG_RD, NULL, 0, sysctl_vfs_conflist,
 2621     "S,xvfsconf", "List of all configured filesystems");
 2622 
 2623 #ifndef BURN_BRIDGES
 2624 static int      sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS);
 2625 
 2626 static int
 2627 vfs_sysctl(SYSCTL_HANDLER_ARGS)
 2628 {
 2629         int *name = (int *)arg1 - 1;    /* XXX */
 2630         u_int namelen = arg2 + 1;       /* XXX */
 2631         struct vfsconf *vfsp;
 2632         struct xvfsconf xvfsp;
 2633 
 2634         printf("WARNING: userland calling deprecated sysctl, "
 2635             "please rebuild world\n");
 2636 
 2637 #if 1 || defined(COMPAT_PRELITE2)
 2638         /* Resolve ambiguity between VFS_VFSCONF and VFS_GENERIC. */
 2639         if (namelen == 1)
 2640                 return (sysctl_ovfs_conf(oidp, arg1, arg2, req));
 2641 #endif
 2642 
 2643         switch (name[1]) {
 2644         case VFS_MAXTYPENUM:
 2645                 if (namelen != 2)
 2646                         return (ENOTDIR);
 2647                 return (SYSCTL_OUT(req, &maxvfsconf, sizeof(int)));
 2648         case VFS_CONF:
 2649                 if (namelen != 3)
 2650                         return (ENOTDIR);       /* overloaded */
 2651                 TAILQ_FOREACH(vfsp, &vfsconf, vfc_list)
 2652                         if (vfsp->vfc_typenum == name[2])
 2653                                 break;
 2654                 if (vfsp == NULL)
 2655                         return (EOPNOTSUPP);
 2656                 bzero(&xvfsp, sizeof(xvfsp));
 2657                 vfsconf2x(vfsp, &xvfsp);
 2658                 return (SYSCTL_OUT(req, &xvfsp, sizeof(xvfsp)));
 2659         }
 2660         return (EOPNOTSUPP);
 2661 }
 2662 
 2663 static SYSCTL_NODE(_vfs, VFS_GENERIC, generic, CTLFLAG_RD | CTLFLAG_SKIP,
 2664         vfs_sysctl, "Generic filesystem");
 2665 
 2666 #if 1 || defined(COMPAT_PRELITE2)
 2667 
 2668 static int
 2669 sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS)
 2670 {
 2671         int error;
 2672         struct vfsconf *vfsp;
 2673         struct ovfsconf ovfs;
 2674 
 2675         TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) {
 2676                 bzero(&ovfs, sizeof(ovfs));
 2677                 ovfs.vfc_vfsops = vfsp->vfc_vfsops;     /* XXX used as flag */
 2678                 strcpy(ovfs.vfc_name, vfsp->vfc_name);
 2679                 ovfs.vfc_index = vfsp->vfc_typenum;
 2680                 ovfs.vfc_refcount = vfsp->vfc_refcount;
 2681                 ovfs.vfc_flags = vfsp->vfc_flags;
 2682                 error = SYSCTL_OUT(req, &ovfs, sizeof ovfs);
 2683                 if (error)
 2684                         return error;
 2685         }
 2686         return 0;
 2687 }
 2688 
 2689 #endif /* 1 || COMPAT_PRELITE2 */
 2690 #endif /* !BURN_BRIDGES */
 2691 
 2692 #define KINFO_VNODESLOP         10
 2693 #ifdef notyet
 2694 /*
 2695  * Dump vnode list (via sysctl).
 2696  */
 2697 /* ARGSUSED */
 2698 static int
 2699 sysctl_vnode(SYSCTL_HANDLER_ARGS)
 2700 {
 2701         struct xvnode *xvn;
 2702         struct thread *td = req->td;
 2703         struct mount *mp;
 2704         struct vnode *vp;
 2705         int error, len, n;
 2706 
 2707         /*
 2708          * Stale numvnodes access is not fatal here.
 2709          */
 2710         req->lock = 0;
 2711         len = (numvnodes + KINFO_VNODESLOP) * sizeof *xvn;
 2712         if (!req->oldptr)
 2713                 /* Make an estimate */
 2714                 return (SYSCTL_OUT(req, 0, len));
 2715 
 2716         error = sysctl_wire_old_buffer(req, 0);
 2717         if (error != 0)
 2718                 return (error);
 2719         xvn = malloc(len, M_TEMP, M_ZERO | M_WAITOK);
 2720         n = 0;
 2721         mtx_lock(&mountlist_mtx);
 2722         TAILQ_FOREACH(mp, &mountlist, mnt_list) {
 2723                 if (vfs_busy(mp, LK_NOWAIT, &mountlist_mtx, td))
 2724                         continue;
 2725                 MNT_ILOCK(mp);
 2726                 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) {
 2727                         if (n == len)
 2728                                 break;
 2729                         vref(vp);
 2730                         xvn[n].xv_size = sizeof *xvn;
 2731                         xvn[n].xv_vnode = vp;
 2732                         xvn[n].xv_id = 0;       /* XXX compat */
 2733 #define XV_COPY(field) xvn[n].xv_##field = vp->v_##field
 2734                         XV_COPY(usecount);
 2735                         XV_COPY(writecount);
 2736                         XV_COPY(holdcnt);
 2737                         XV_COPY(mount);
 2738                         XV_COPY(numoutput);
 2739                         XV_COPY(type);
 2740 #undef XV_COPY
 2741                         xvn[n].xv_flag = vp->v_vflag;
 2742 
 2743                         switch (vp->v_type) {
 2744                         case VREG:
 2745                         case VDIR:
 2746                         case VLNK:
 2747                                 break;
 2748                         case VBLK:
 2749                         case VCHR:
 2750                                 if (vp->v_rdev == NULL) {
 2751                                         vrele(vp);
 2752                                         continue;
 2753                                 }
 2754                                 xvn[n].xv_dev = dev2udev(vp->v_rdev);
 2755                                 break;
 2756                         case VSOCK:
 2757                                 xvn[n].xv_socket = vp->v_socket;
 2758                                 break;
 2759                         case VFIFO:
 2760                                 xvn[n].xv_fifo = vp->v_fifoinfo;
 2761                                 break;
 2762                         case VNON:
 2763                         case VBAD:
 2764                         default:
 2765                                 /* shouldn't happen? */
 2766                                 vrele(vp);
 2767                                 continue;
 2768                         }
 2769                         vrele(vp);
 2770                         ++n;
 2771                 }
 2772                 MNT_IUNLOCK(mp);
 2773                 mtx_lock(&mountlist_mtx);
 2774                 vfs_unbusy(mp, td);
 2775                 if (n == len)
 2776                         break;
 2777         }
 2778         mtx_unlock(&mountlist_mtx);
 2779 
 2780         error = SYSCTL_OUT(req, xvn, n * sizeof *xvn);
 2781         free(xvn, M_TEMP);
 2782         return (error);
 2783 }
 2784 
 2785 SYSCTL_PROC(_kern, KERN_VNODE, vnode, CTLTYPE_OPAQUE|CTLFLAG_RD,
 2786         0, 0, sysctl_vnode, "S,xvnode", "");
 2787 #endif
 2788 
 2789 /*
 2790  * Unmount all filesystems. The list is traversed in reverse order
 2791  * of mounting to avoid dependencies.
 2792  */
 2793 void
 2794 vfs_unmountall()
 2795 {
 2796         struct mount *mp;
 2797         struct thread *td;
 2798         int error;
 2799 
 2800         KASSERT(curthread != NULL, ("vfs_unmountall: NULL curthread"));
 2801         td = curthread;
 2802         /*
 2803          * Since this only runs when rebooting, it is not interlocked.
 2804          */
 2805         while(!TAILQ_EMPTY(&mountlist)) {
 2806                 mp = TAILQ_LAST(&mountlist, mntlist);
 2807                 error = dounmount(mp, MNT_FORCE, td);
 2808                 if (error) {
 2809                         TAILQ_REMOVE(&mountlist, mp, mnt_list);
 2810                         /*
 2811                          * XXX: Due to the way in which we mount the root
 2812                          * file system off of devfs, devfs will generate a
 2813                          * "busy" warning when we try to unmount it before
 2814                          * the root.  Don't print a warning as a result in
 2815                          * order to avoid false positive errors that may
 2816                          * cause needless upset.
 2817                          */
 2818                         if (strcmp(mp->mnt_vfc->vfc_name, "devfs") != 0) {
 2819                                 printf("unmount of %s failed (",
 2820                                     mp->mnt_stat.f_mntonname);
 2821                                 if (error == EBUSY)
 2822                                         printf("BUSY)\n");
 2823                                 else
 2824                                         printf("%d)\n", error);
 2825                         }
 2826                 } else {
 2827                         /* The unmount has removed mp from the mountlist */
 2828                 }
 2829         }
 2830 }
 2831 
 2832 /*
 2833  * perform msync on all vnodes under a mount point
 2834  * the mount point must be locked.
 2835  */
 2836 void
 2837 vfs_msync(struct mount *mp, int flags)
 2838 {
 2839         struct vnode *vp, *mvp;
 2840         struct vm_object *obj;
 2841 
 2842         (void) vn_start_write(NULL, &mp, V_WAIT);
 2843         MNT_ILOCK(mp);
 2844         MNT_VNODE_FOREACH(vp, mp, mvp) {
 2845                 VI_LOCK(vp);
 2846                 if ((vp->v_iflag & VI_OBJDIRTY) &&
 2847                     (flags == MNT_WAIT || VOP_ISLOCKED(vp, NULL) == 0)) {
 2848                         MNT_IUNLOCK(mp);
 2849                         if (!vget(vp,
 2850                             LK_EXCLUSIVE | LK_RETRY | LK_INTERLOCK,
 2851                             curthread)) {
 2852                                 if (vp->v_vflag & VV_NOSYNC) {  /* unlinked */
 2853                                         vput(vp);
 2854                                         MNT_ILOCK(mp);
 2855                                         continue;
 2856                                 }
 2857 
 2858                                 obj = vp->v_object;
 2859                                 if (obj != NULL) {
 2860                                         VM_OBJECT_LOCK(obj);
 2861                                         vm_object_page_clean(obj, 0, 0,
 2862                                             flags == MNT_WAIT ?
 2863                                             OBJPC_SYNC : OBJPC_NOSYNC);
 2864                                         VM_OBJECT_UNLOCK(obj);
 2865                                 }
 2866                                 vput(vp);
 2867                         }
 2868                         MNT_ILOCK(mp);
 2869                 } else
 2870                         VI_UNLOCK(vp);
 2871         }
 2872         MNT_IUNLOCK(mp);
 2873         vn_finished_write(mp);
 2874 }
 2875 
 2876 /*
 2877  * Mark a vnode as free, putting it up for recycling.
 2878  */
 2879 static void
 2880 vfree(struct vnode *vp)
 2881 {
 2882 
 2883         CTR1(KTR_VFS, "vfree vp %p", vp);
 2884         ASSERT_VI_LOCKED(vp, "vfree");
 2885         mtx_lock(&vnode_free_list_mtx);
 2886         VNASSERT(vp->v_op != NULL, vp, ("vfree: vnode already reclaimed."));
 2887         VNASSERT((vp->v_iflag & VI_FREE) == 0, vp, ("vnode already free"));
 2888         VNASSERT(VSHOULDFREE(vp), vp, ("vfree: freeing when we shouldn't"));
 2889         VNASSERT((vp->v_iflag & VI_DOOMED) == 0, vp,
 2890             ("vfree: Freeing doomed vnode"));
 2891         if (vp->v_iflag & VI_AGE) {
 2892                 TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
 2893         } else {
 2894                 TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
 2895         }
 2896         freevnodes++;
 2897         vp->v_iflag &= ~VI_AGE;
 2898         vp->v_iflag |= VI_FREE;
 2899         mtx_unlock(&vnode_free_list_mtx);
 2900 }
 2901 
 2902 /*
 2903  * Opposite of vfree() - mark a vnode as in use.
 2904  */
 2905 static void
 2906 vbusy(struct vnode *vp)
 2907 {
 2908         CTR1(KTR_VFS, "vbusy vp %p", vp);
 2909         ASSERT_VI_LOCKED(vp, "vbusy");
 2910         VNASSERT((vp->v_iflag & VI_FREE) != 0, vp, ("vnode not free"));
 2911         VNASSERT(vp->v_op != NULL, vp, ("vbusy: vnode already reclaimed."));
 2912 
 2913         mtx_lock(&vnode_free_list_mtx);
 2914         TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
 2915         freevnodes--;
 2916         vp->v_iflag &= ~(VI_FREE|VI_AGE);
 2917         mtx_unlock(&vnode_free_list_mtx);
 2918 }
 2919 
 2920 /*
 2921  * Initalize per-vnode helper structure to hold poll-related state.
 2922  */
 2923 void
 2924 v_addpollinfo(struct vnode *vp)
 2925 {
 2926         struct vpollinfo *vi;
 2927 
 2928         vi = uma_zalloc(vnodepoll_zone, M_WAITOK);
 2929         if (vp->v_pollinfo != NULL) {
 2930                 uma_zfree(vnodepoll_zone, vi);
 2931                 return;
 2932         }
 2933         vp->v_pollinfo = vi;
 2934         mtx_init(&vp->v_pollinfo->vpi_lock, "vnode pollinfo", NULL, MTX_DEF);
 2935         knlist_init(&vp->v_pollinfo->vpi_selinfo.si_note, vp, vfs_knllock,
 2936             vfs_knlunlock, vfs_knllocked);
 2937 }
 2938 
 2939 /*
 2940  * Record a process's interest in events which might happen to
 2941  * a vnode.  Because poll uses the historic select-style interface
 2942  * internally, this routine serves as both the ``check for any
 2943  * pending events'' and the ``record my interest in future events''
 2944  * functions.  (These are done together, while the lock is held,
 2945  * to avoid race conditions.)
 2946  */
 2947 int
 2948 vn_pollrecord(vp, td, events)
 2949         struct vnode *vp;
 2950         struct thread *td;
 2951         short events;
 2952 {
 2953 
 2954         if (vp->v_pollinfo == NULL)
 2955                 v_addpollinfo(vp);
 2956         mtx_lock(&vp->v_pollinfo->vpi_lock);
 2957         if (vp->v_pollinfo->vpi_revents & events) {
 2958                 /*
 2959                  * This leaves events we are not interested
 2960                  * in available for the other process which
 2961                  * which presumably had requested them
 2962                  * (otherwise they would never have been
 2963                  * recorded).
 2964                  */
 2965                 events &= vp->v_pollinfo->vpi_revents;
 2966                 vp->v_pollinfo->vpi_revents &= ~events;
 2967 
 2968                 mtx_unlock(&vp->v_pollinfo->vpi_lock);
 2969                 return events;
 2970         }
 2971         vp->v_pollinfo->vpi_events |= events;
 2972         selrecord(td, &vp->v_pollinfo->vpi_selinfo);
 2973         mtx_unlock(&vp->v_pollinfo->vpi_lock);
 2974         return 0;
 2975 }
 2976 
 2977 /*
 2978  * Routine to create and manage a filesystem syncer vnode.
 2979  */
 2980 #define sync_close ((int (*)(struct  vop_close_args *))nullop)
 2981 static int      sync_fsync(struct  vop_fsync_args *);
 2982 static int      sync_inactive(struct  vop_inactive_args *);
 2983 static int      sync_reclaim(struct  vop_reclaim_args *);
 2984 
 2985 static struct vop_vector sync_vnodeops = {
 2986         .vop_bypass =   VOP_EOPNOTSUPP,
 2987         .vop_close =    sync_close,             /* close */
 2988         .vop_fsync =    sync_fsync,             /* fsync */
 2989         .vop_inactive = sync_inactive,  /* inactive */
 2990         .vop_reclaim =  sync_reclaim,   /* reclaim */
 2991         .vop_lock =     vop_stdlock,    /* lock */
 2992         .vop_unlock =   vop_stdunlock,  /* unlock */
 2993         .vop_islocked = vop_stdislocked,        /* islocked */
 2994 };
 2995 
 2996 /*
 2997  * Create a new filesystem syncer vnode for the specified mount point.
 2998  */
 2999 int
 3000 vfs_allocate_syncvnode(mp)
 3001         struct mount *mp;
 3002 {
 3003         struct vnode *vp;
 3004         static long start, incr, next;
 3005         int error;
 3006 
 3007         /* Allocate a new vnode */
 3008         if ((error = getnewvnode("syncer", mp, &sync_vnodeops, &vp)) != 0) {
 3009                 mp->mnt_syncer = NULL;
 3010                 return (error);
 3011         }
 3012         vp->v_type = VNON;
 3013         /*
 3014          * Place the vnode onto the syncer worklist. We attempt to
 3015          * scatter them about on the list so that they will go off
 3016          * at evenly distributed times even if all the filesystems
 3017          * are mounted at once.
 3018          */
 3019         next += incr;
 3020         if (next == 0 || next > syncer_maxdelay) {
 3021                 start /= 2;
 3022                 incr /= 2;
 3023                 if (start == 0) {
 3024                         start = syncer_maxdelay / 2;
 3025                         incr = syncer_maxdelay;
 3026                 }
 3027                 next = start;
 3028         }
 3029         VI_LOCK(vp);
 3030         vn_syncer_add_to_worklist(&vp->v_bufobj,
 3031             syncdelay > 0 ? next % syncdelay : 0);
 3032         /* XXX - vn_syncer_add_to_worklist() also grabs and drops sync_mtx. */
 3033         mtx_lock(&sync_mtx);
 3034         sync_vnode_count++;
 3035         mtx_unlock(&sync_mtx);
 3036         VI_UNLOCK(vp);
 3037         mp->mnt_syncer = vp;
 3038         return (0);
 3039 }
 3040 
 3041 /*
 3042  * Do a lazy sync of the filesystem.
 3043  */
 3044 static int
 3045 sync_fsync(ap)
 3046         struct vop_fsync_args /* {
 3047                 struct vnode *a_vp;
 3048                 struct ucred *a_cred;
 3049                 int a_waitfor;
 3050                 struct thread *a_td;
 3051         } */ *ap;
 3052 {
 3053         struct vnode *syncvp = ap->a_vp;
 3054         struct mount *mp = syncvp->v_mount;
 3055         struct thread *td = ap->a_td;
 3056         int error, asyncflag;
 3057         struct bufobj *bo;
 3058 
 3059         /*
 3060          * We only need to do something if this is a lazy evaluation.
 3061          */
 3062         if (ap->a_waitfor != MNT_LAZY)
 3063                 return (0);
 3064 
 3065         /*
 3066          * Move ourselves to the back of the sync list.
 3067          */
 3068         bo = &syncvp->v_bufobj;
 3069         BO_LOCK(bo);
 3070         vn_syncer_add_to_worklist(bo, syncdelay);
 3071         BO_UNLOCK(bo);
 3072 
 3073         /*
 3074          * Walk the list of vnodes pushing all that are dirty and
 3075          * not already on the sync list.
 3076          */
 3077         mtx_lock(&mountlist_mtx);
 3078         if (vfs_busy(mp, LK_EXCLUSIVE | LK_NOWAIT, &mountlist_mtx, td) != 0) {
 3079                 mtx_unlock(&mountlist_mtx);
 3080                 return (0);
 3081         }
 3082         if (vn_start_write(NULL, &mp, V_NOWAIT) != 0) {
 3083                 vfs_unbusy(mp, td);
 3084                 return (0);
 3085         }
 3086         asyncflag = mp->mnt_flag & MNT_ASYNC;
 3087         mp->mnt_flag &= ~MNT_ASYNC;
 3088         vfs_msync(mp, MNT_NOWAIT);
 3089         error = VFS_SYNC(mp, MNT_LAZY, td);
 3090         if (asyncflag)
 3091                 mp->mnt_flag |= MNT_ASYNC;
 3092         vn_finished_write(mp);
 3093         vfs_unbusy(mp, td);
 3094         return (error);
 3095 }
 3096 
 3097 /*
 3098  * The syncer vnode is no referenced.
 3099  */
 3100 static int
 3101 sync_inactive(ap)
 3102         struct vop_inactive_args /* {
 3103                 struct vnode *a_vp;
 3104                 struct thread *a_td;
 3105         } */ *ap;
 3106 {
 3107 
 3108         vgone(ap->a_vp);
 3109         return (0);
 3110 }
 3111 
 3112 /*
 3113  * The syncer vnode is no longer needed and is being decommissioned.
 3114  *
 3115  * Modifications to the worklist must be protected by sync_mtx.
 3116  */
 3117 static int
 3118 sync_reclaim(ap)
 3119         struct vop_reclaim_args /* {
 3120                 struct vnode *a_vp;
 3121         } */ *ap;
 3122 {
 3123         struct vnode *vp = ap->a_vp;
 3124         struct bufobj *bo;
 3125 
 3126         VI_LOCK(vp);
 3127         bo = &vp->v_bufobj;
 3128         vp->v_mount->mnt_syncer = NULL;
 3129         if (bo->bo_flag & BO_ONWORKLST) {
 3130                 mtx_lock(&sync_mtx);
 3131                 LIST_REMOVE(bo, bo_synclist);
 3132                 syncer_worklist_len--;
 3133                 sync_vnode_count--;
 3134                 mtx_unlock(&sync_mtx);
 3135                 bo->bo_flag &= ~BO_ONWORKLST;
 3136         }
 3137         VI_UNLOCK(vp);
 3138 
 3139         return (0);
 3140 }
 3141 
 3142 /*
 3143  * Check if vnode represents a disk device
 3144  */
 3145 int
 3146 vn_isdisk(vp, errp)
 3147         struct vnode *vp;
 3148         int *errp;
 3149 {
 3150         int error;
 3151 
 3152         error = 0;
 3153         dev_lock();
 3154         if (vp->v_type != VCHR)
 3155                 error = ENOTBLK;
 3156         else if (vp->v_rdev == NULL)
 3157                 error = ENXIO;
 3158         else if (vp->v_rdev->si_devsw == NULL)
 3159                 error = ENXIO;
 3160         else if (!(vp->v_rdev->si_devsw->d_flags & D_DISK))
 3161                 error = ENOTBLK;
 3162         dev_unlock();
 3163         if (errp != NULL)
 3164                 *errp = error;
 3165         return (error == 0);
 3166 }
 3167 
 3168 /*
 3169  * Common filesystem object access control check routine.  Accepts a
 3170  * vnode's type, "mode", uid and gid, requested access mode, credentials,
 3171  * and optional call-by-reference privused argument allowing vaccess()
 3172  * to indicate to the caller whether privilege was used to satisfy the
 3173  * request (obsoleted).  Returns 0 on success, or an errno on failure.
 3174  */
 3175 int
 3176 vaccess(type, file_mode, file_uid, file_gid, acc_mode, cred, privused)
 3177         enum vtype type;
 3178         mode_t file_mode;
 3179         uid_t file_uid;
 3180         gid_t file_gid;
 3181         mode_t acc_mode;
 3182         struct ucred *cred;
 3183         int *privused;
 3184 {
 3185         mode_t dac_granted;
 3186 #ifdef CAPABILITIES
 3187         mode_t cap_granted;
 3188 #endif
 3189 
 3190         /*
 3191          * Look for a normal, non-privileged way to access the file/directory
 3192          * as requested.  If it exists, go with that.
 3193          */
 3194 
 3195         if (privused != NULL)
 3196                 *privused = 0;
 3197 
 3198         dac_granted = 0;
 3199 
 3200         /* Check the owner. */
 3201         if (cred->cr_uid == file_uid) {
 3202                 dac_granted |= VADMIN;
 3203                 if (file_mode & S_IXUSR)
 3204                         dac_granted |= VEXEC;
 3205                 if (file_mode & S_IRUSR)
 3206                         dac_granted |= VREAD;
 3207                 if (file_mode & S_IWUSR)
 3208                         dac_granted |= (VWRITE | VAPPEND);
 3209 
 3210                 if ((acc_mode & dac_granted) == acc_mode)
 3211                         return (0);
 3212 
 3213                 goto privcheck;
 3214         }
 3215 
 3216         /* Otherwise, check the groups (first match) */
 3217         if (groupmember(file_gid, cred)) {
 3218                 if (file_mode & S_IXGRP)
 3219                         dac_granted |= VEXEC;
 3220                 if (file_mode & S_IRGRP)
 3221                         dac_granted |= VREAD;
 3222                 if (file_mode & S_IWGRP)
 3223                         dac_granted |= (VWRITE | VAPPEND);
 3224 
 3225                 if ((acc_mode & dac_granted) == acc_mode)
 3226                         return (0);
 3227 
 3228                 goto privcheck;
 3229         }
 3230 
 3231         /* Otherwise, check everyone else. */
 3232         if (file_mode & S_IXOTH)
 3233                 dac_granted |= VEXEC;
 3234         if (file_mode & S_IROTH)
 3235                 dac_granted |= VREAD;
 3236         if (file_mode & S_IWOTH)
 3237                 dac_granted |= (VWRITE | VAPPEND);
 3238         if ((acc_mode & dac_granted) == acc_mode)
 3239                 return (0);
 3240 
 3241 privcheck:
 3242         if (!suser_cred(cred, SUSER_ALLOWJAIL)) {
 3243                 /* XXX audit: privilege used */
 3244                 if (privused != NULL)
 3245                         *privused = 1;
 3246                 return (0);
 3247         }
 3248 
 3249 #ifdef CAPABILITIES
 3250         /*
 3251          * Build a capability mask to determine if the set of capabilities
 3252          * satisfies the requirements when combined with the granted mask
 3253          * from above.
 3254          * For each capability, if the capability is required, bitwise
 3255          * or the request type onto the cap_granted mask.
 3256          */
 3257         cap_granted = 0;
 3258 
 3259         if (type == VDIR) {
 3260                 /*
 3261                  * For directories, use CAP_DAC_READ_SEARCH to satisfy
 3262                  * VEXEC requests, instead of CAP_DAC_EXECUTE.
 3263                  */
 3264                 if ((acc_mode & VEXEC) && ((dac_granted & VEXEC) == 0) &&
 3265                     !cap_check(cred, NULL, CAP_DAC_READ_SEARCH, SUSER_ALLOWJAIL))
 3266                         cap_granted |= VEXEC;
 3267         } else {
 3268                 if ((acc_mode & VEXEC) && ((dac_granted & VEXEC) == 0) &&
 3269                     !cap_check(cred, NULL, CAP_DAC_EXECUTE, SUSER_ALLOWJAIL))
 3270                         cap_granted |= VEXEC;
 3271         }
 3272 
 3273         if ((acc_mode & VREAD) && ((dac_granted & VREAD) == 0) &&
 3274             !cap_check(cred, NULL, CAP_DAC_READ_SEARCH, SUSER_ALLOWJAIL))
 3275                 cap_granted |= VREAD;
 3276 
 3277         if ((acc_mode & VWRITE) && ((dac_granted & VWRITE) == 0) &&
 3278             !cap_check(cred, NULL, CAP_DAC_WRITE, SUSER_ALLOWJAIL))
 3279                 cap_granted |= (VWRITE | VAPPEND);
 3280 
 3281         if ((acc_mode & VADMIN) && ((dac_granted & VADMIN) == 0) &&
 3282             !cap_check(cred, NULL, CAP_FOWNER, SUSER_ALLOWJAIL))
 3283                 cap_granted |= VADMIN;
 3284 
 3285         if ((acc_mode & (cap_granted | dac_granted)) == acc_mode) {
 3286                 /* XXX audit: privilege used */
 3287                 if (privused != NULL)
 3288                         *privused = 1;
 3289                 return (0);
 3290         }
 3291 #endif
 3292 
 3293         return ((acc_mode & VADMIN) ? EPERM : EACCES);
 3294 }
 3295 
 3296 /*
 3297  * Credential check based on process requesting service, and per-attribute
 3298  * permissions.
 3299  */
 3300 int
 3301 extattr_check_cred(struct vnode *vp, int attrnamespace,
 3302     struct ucred *cred, struct thread *td, int access)
 3303 {
 3304 
 3305         /*
 3306          * Kernel-invoked always succeeds.
 3307          */
 3308         if (cred == NOCRED)
 3309                 return (0);
 3310 
 3311         /*
 3312          * Do not allow privileged processes in jail to directly
 3313          * manipulate system attributes.
 3314          *
 3315          * XXX What capability should apply here?
 3316          * Probably CAP_SYS_SETFFLAG.
 3317          */
 3318         switch (attrnamespace) {
 3319         case EXTATTR_NAMESPACE_SYSTEM:
 3320                 /* Potentially should be: return (EPERM); */
 3321                 return (suser_cred(cred, 0));
 3322         case EXTATTR_NAMESPACE_USER:
 3323                 return (VOP_ACCESS(vp, access, cred, td));
 3324         default:
 3325                 return (EPERM);
 3326         }
 3327 }
 3328 
 3329 #ifdef DEBUG_VFS_LOCKS
 3330 /*
 3331  * This only exists to supress warnings from unlocked specfs accesses.  It is
 3332  * no longer ok to have an unlocked VFS.
 3333  */
 3334 #define IGNORE_LOCK(vp) ((vp)->v_type == VCHR || (vp)->v_type == VBAD)
 3335 
 3336 int vfs_badlock_ddb = 1;        /* Drop into debugger on violation. */
 3337 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_ddb, CTLFLAG_RW, &vfs_badlock_ddb, 0, "");
 3338 
 3339 int vfs_badlock_mutex = 1;      /* Check for interlock across VOPs. */
 3340 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_mutex, CTLFLAG_RW, &vfs_badlock_mutex, 0, "");
 3341 
 3342 int vfs_badlock_print = 1;      /* Print lock violations. */
 3343 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_print, CTLFLAG_RW, &vfs_badlock_print, 0, "");
 3344 
 3345 #ifdef KDB
 3346 int vfs_badlock_backtrace = 1;  /* Print backtrace at lock violations. */
 3347 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_backtrace, CTLFLAG_RW, &vfs_badlock_backtrace, 0, "");
 3348 #endif
 3349 
 3350 static void
 3351 vfs_badlock(const char *msg, const char *str, struct vnode *vp)
 3352 {
 3353 
 3354 #ifdef KDB
 3355         if (vfs_badlock_backtrace)
 3356                 kdb_backtrace();
 3357 #endif
 3358         if (vfs_badlock_print)
 3359                 printf("%s: %p %s\n", str, (void *)vp, msg);
 3360         if (vfs_badlock_ddb)
 3361                 kdb_enter("lock violation");
 3362 }
 3363 
 3364 void
 3365 assert_vi_locked(struct vnode *vp, const char *str)
 3366 {
 3367 
 3368         if (vfs_badlock_mutex && !mtx_owned(VI_MTX(vp)))
 3369                 vfs_badlock("interlock is not locked but should be", str, vp);
 3370 }
 3371 
 3372 void
 3373 assert_vi_unlocked(struct vnode *vp, const char *str)
 3374 {
 3375 
 3376         if (vfs_badlock_mutex && mtx_owned(VI_MTX(vp)))
 3377                 vfs_badlock("interlock is locked but should not be", str, vp);
 3378 }
 3379 
 3380 void
 3381 assert_vop_locked(struct vnode *vp, const char *str)
 3382 {
 3383 
 3384         if (vp && !IGNORE_LOCK(vp) && VOP_ISLOCKED(vp, NULL) == 0)
 3385                 vfs_badlock("is not locked but should be", str, vp);
 3386 }
 3387 
 3388 void
 3389 assert_vop_unlocked(struct vnode *vp, const char *str)
 3390 {
 3391 
 3392         if (vp && !IGNORE_LOCK(vp) &&
 3393             VOP_ISLOCKED(vp, curthread) == LK_EXCLUSIVE)
 3394                 vfs_badlock("is locked but should not be", str, vp);
 3395 }
 3396 
 3397 void
 3398 assert_vop_elocked(struct vnode *vp, const char *str)
 3399 {
 3400 
 3401         if (vp && !IGNORE_LOCK(vp) &&
 3402             VOP_ISLOCKED(vp, curthread) != LK_EXCLUSIVE)
 3403                 vfs_badlock("is not exclusive locked but should be", str, vp);
 3404 }
 3405 
 3406 #if 0
 3407 void
 3408 assert_vop_elocked_other(struct vnode *vp, const char *str)
 3409 {
 3410 
 3411         if (vp && !IGNORE_LOCK(vp) &&
 3412             VOP_ISLOCKED(vp, curthread) != LK_EXCLOTHER)
 3413                 vfs_badlock("is not exclusive locked by another thread",
 3414                     str, vp);
 3415 }
 3416 
 3417 void
 3418 assert_vop_slocked(struct vnode *vp, const char *str)
 3419 {
 3420 
 3421         if (vp && !IGNORE_LOCK(vp) &&
 3422             VOP_ISLOCKED(vp, curthread) != LK_SHARED)
 3423                 vfs_badlock("is not locked shared but should be", str, vp);
 3424 }
 3425 #endif /* 0 */
 3426 #endif /* DEBUG_VFS_LOCKS */
 3427 
 3428 void
 3429 vop_rename_pre(void *ap)
 3430 {
 3431         struct vop_rename_args *a = ap;
 3432 
 3433 #ifdef DEBUG_VFS_LOCKS
 3434         if (a->a_tvp)
 3435                 ASSERT_VI_UNLOCKED(a->a_tvp, "VOP_RENAME");
 3436         ASSERT_VI_UNLOCKED(a->a_tdvp, "VOP_RENAME");
 3437         ASSERT_VI_UNLOCKED(a->a_fvp, "VOP_RENAME");
 3438         ASSERT_VI_UNLOCKED(a->a_fdvp, "VOP_RENAME");
 3439 
 3440         /* Check the source (from). */
 3441         if (a->a_tdvp != a->a_fdvp)
 3442                 ASSERT_VOP_UNLOCKED(a->a_fdvp, "vop_rename: fdvp locked");
 3443         if (a->a_tvp != a->a_fvp)
 3444                 ASSERT_VOP_UNLOCKED(a->a_fvp, "vop_rename: tvp locked");
 3445 
 3446         /* Check the target. */
 3447         if (a->a_tvp)
 3448                 ASSERT_VOP_LOCKED(a->a_tvp, "vop_rename: tvp not locked");
 3449         ASSERT_VOP_LOCKED(a->a_tdvp, "vop_rename: tdvp not locked");
 3450 #endif
 3451         if (a->a_tdvp != a->a_fdvp)
 3452                 vhold(a->a_fdvp);
 3453         if (a->a_tvp != a->a_fvp)
 3454                 vhold(a->a_fvp);
 3455         vhold(a->a_tdvp);
 3456         if (a->a_tvp)
 3457                 vhold(a->a_tvp);
 3458 }
 3459 
 3460 void
 3461 vop_strategy_pre(void *ap)
 3462 {
 3463 #ifdef DEBUG_VFS_LOCKS
 3464         struct vop_strategy_args *a;
 3465         struct buf *bp;
 3466 
 3467         a = ap;
 3468         bp = a->a_bp;
 3469 
 3470         /*
 3471          * Cluster ops lock their component buffers but not the IO container.
 3472          */
 3473         if ((bp->b_flags & B_CLUSTER) != 0)
 3474                 return;
 3475 
 3476         if (BUF_REFCNT(bp) < 1) {
 3477                 if (vfs_badlock_print)
 3478                         printf(
 3479                             "VOP_STRATEGY: bp is not locked but should be\n");
 3480                 if (vfs_badlock_ddb)
 3481                         kdb_enter("lock violation");
 3482         }
 3483 #endif
 3484 }
 3485 
 3486 void
 3487 vop_lookup_pre(void *ap)
 3488 {
 3489 #ifdef DEBUG_VFS_LOCKS
 3490         struct vop_lookup_args *a;
 3491         struct vnode *dvp;
 3492 
 3493         a = ap;
 3494         dvp = a->a_dvp;
 3495         ASSERT_VI_UNLOCKED(dvp, "VOP_LOOKUP");
 3496         ASSERT_VOP_LOCKED(dvp, "VOP_LOOKUP");
 3497 #endif
 3498 }
 3499 
 3500 void
 3501 vop_lookup_post(void *ap, int rc)
 3502 {
 3503 #ifdef DEBUG_VFS_LOCKS
 3504         struct vop_lookup_args *a;
 3505         struct vnode *dvp;
 3506         struct vnode *vp;
 3507 
 3508         a = ap;
 3509         dvp = a->a_dvp;
 3510         vp = *(a->a_vpp);
 3511 
 3512         ASSERT_VI_UNLOCKED(dvp, "VOP_LOOKUP");
 3513         ASSERT_VOP_LOCKED(dvp, "VOP_LOOKUP");
 3514 
 3515         if (!rc)
 3516                 ASSERT_VOP_LOCKED(vp, "VOP_LOOKUP (child)");
 3517 #endif
 3518 }
 3519 
 3520 void
 3521 vop_lock_pre(void *ap)
 3522 {
 3523 #ifdef DEBUG_VFS_LOCKS
 3524         struct vop_lock_args *a = ap;
 3525 
 3526         if ((a->a_flags & LK_INTERLOCK) == 0)
 3527                 ASSERT_VI_UNLOCKED(a->a_vp, "VOP_LOCK");
 3528         else
 3529                 ASSERT_VI_LOCKED(a->a_vp, "VOP_LOCK");
 3530 #endif
 3531 }
 3532 
 3533 void
 3534 vop_lock_post(void *ap, int rc)
 3535 {
 3536 #ifdef DEBUG_VFS_LOCKS
 3537         struct vop_lock_args *a = ap;
 3538 
 3539         ASSERT_VI_UNLOCKED(a->a_vp, "VOP_LOCK");
 3540         if (rc == 0)
 3541                 ASSERT_VOP_LOCKED(a->a_vp, "VOP_LOCK");
 3542 #endif
 3543 }
 3544 
 3545 void
 3546 vop_unlock_pre(void *ap)
 3547 {
 3548 #ifdef DEBUG_VFS_LOCKS
 3549         struct vop_unlock_args *a = ap;
 3550 
 3551         if (a->a_flags & LK_INTERLOCK)
 3552                 ASSERT_VI_LOCKED(a->a_vp, "VOP_UNLOCK");
 3553         ASSERT_VOP_LOCKED(a->a_vp, "VOP_UNLOCK");
 3554 #endif
 3555 }
 3556 
 3557 void
 3558 vop_unlock_post(void *ap, int rc)
 3559 {
 3560 #ifdef DEBUG_VFS_LOCKS
 3561         struct vop_unlock_args *a = ap;
 3562 
 3563         if (a->a_flags & LK_INTERLOCK)
 3564                 ASSERT_VI_UNLOCKED(a->a_vp, "VOP_UNLOCK");
 3565 #endif
 3566 }
 3567 
 3568 void
 3569 vop_create_post(void *ap, int rc)
 3570 {
 3571         struct vop_create_args *a = ap;
 3572 
 3573         if (!rc)
 3574                 VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE); 
 3575 }
 3576 
 3577 void
 3578 vop_link_post(void *ap, int rc)
 3579 {
 3580         struct vop_link_args *a = ap;
 3581         
 3582         if (!rc) {
 3583                 VFS_KNOTE_LOCKED(a->a_vp, NOTE_LINK); 
 3584                 VFS_KNOTE_LOCKED(a->a_tdvp, NOTE_WRITE);
 3585         }
 3586 }
 3587 
 3588 void
 3589 vop_mkdir_post(void *ap, int rc)
 3590 {
 3591         struct vop_mkdir_args *a = ap;
 3592 
 3593         if (!rc)
 3594                 VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE | NOTE_LINK);
 3595 }
 3596 
 3597 void
 3598 vop_mknod_post(void *ap, int rc)
 3599 {
 3600         struct vop_mknod_args *a = ap;
 3601 
 3602         if (!rc)
 3603                 VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE);
 3604 }
 3605 
 3606 void
 3607 vop_remove_post(void *ap, int rc)
 3608 {
 3609         struct vop_remove_args *a = ap;
 3610 
 3611         if (!rc) {
 3612                 VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE);
 3613                 VFS_KNOTE_LOCKED(a->a_vp, NOTE_DELETE);
 3614         }
 3615 }
 3616 
 3617 void
 3618 vop_rename_post(void *ap, int rc)
 3619 {
 3620         struct vop_rename_args *a = ap;
 3621 
 3622         if (!rc) {
 3623                 VFS_KNOTE_UNLOCKED(a->a_fdvp, NOTE_WRITE);
 3624                 VFS_KNOTE_UNLOCKED(a->a_tdvp, NOTE_WRITE);
 3625                 VFS_KNOTE_UNLOCKED(a->a_fvp, NOTE_RENAME);
 3626                 if (a->a_tvp)
 3627                         VFS_KNOTE_UNLOCKED(a->a_tvp, NOTE_DELETE);
 3628         }
 3629         if (a->a_tdvp != a->a_fdvp)
 3630                 vdrop(a->a_fdvp);
 3631         if (a->a_tvp != a->a_fvp)
 3632                 vdrop(a->a_fvp);
 3633         vdrop(a->a_tdvp);
 3634         if (a->a_tvp)
 3635                 vdrop(a->a_tvp);
 3636 }
 3637 
 3638 void
 3639 vop_rmdir_post(void *ap, int rc)
 3640 {
 3641         struct vop_rmdir_args *a = ap;
 3642 
 3643         if (!rc) {
 3644                 VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE | NOTE_LINK);
 3645                 VFS_KNOTE_LOCKED(a->a_vp, NOTE_DELETE);
 3646         }
 3647 }
 3648 
 3649 void
 3650 vop_setattr_post(void *ap, int rc)
 3651 {
 3652         struct vop_setattr_args *a = ap;
 3653 
 3654         if (!rc)
 3655                 VFS_KNOTE_LOCKED(a->a_vp, NOTE_ATTRIB);
 3656 }
 3657 
 3658 void
 3659 vop_symlink_post(void *ap, int rc)
 3660 {
 3661         struct vop_symlink_args *a = ap;
 3662         
 3663         if (!rc)
 3664                 VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE);
 3665 }
 3666 
 3667 static struct knlist fs_knlist;
 3668 
 3669 static void
 3670 vfs_event_init(void *arg)
 3671 {
 3672         knlist_init(&fs_knlist, NULL, NULL, NULL, NULL);
 3673 }
 3674 /* XXX - correct order? */
 3675 SYSINIT(vfs_knlist, SI_SUB_VFS, SI_ORDER_ANY, vfs_event_init, NULL);
 3676 
 3677 void
 3678 vfs_event_signal(fsid_t *fsid, u_int32_t event, intptr_t data __unused)
 3679 {
 3680 
 3681         KNOTE_UNLOCKED(&fs_knlist, event);
 3682 }
 3683 
 3684 static int      filt_fsattach(struct knote *kn);
 3685 static void     filt_fsdetach(struct knote *kn);
 3686 static int      filt_fsevent(struct knote *kn, long hint);
 3687 
 3688 struct filterops fs_filtops =
 3689         { 0, filt_fsattach, filt_fsdetach, filt_fsevent };
 3690 
 3691 static int
 3692 filt_fsattach(struct knote *kn)
 3693 {
 3694 
 3695         kn->kn_flags |= EV_CLEAR;
 3696         knlist_add(&fs_knlist, kn, 0);
 3697         return (0);
 3698 }
 3699 
 3700 static void
 3701 filt_fsdetach(struct knote *kn)
 3702 {
 3703 
 3704         knlist_remove(&fs_knlist, kn, 0);
 3705 }
 3706 
 3707 static int
 3708 filt_fsevent(struct knote *kn, long hint)
 3709 {
 3710 
 3711         kn->kn_fflags |= hint;
 3712         return (kn->kn_fflags != 0);
 3713 }
 3714 
 3715 static int
 3716 sysctl_vfs_ctl(SYSCTL_HANDLER_ARGS)
 3717 {
 3718         struct vfsidctl vc;
 3719         int error;
 3720         struct mount *mp;
 3721 
 3722         error = SYSCTL_IN(req, &vc, sizeof(vc));
 3723         if (error)
 3724                 return (error);
 3725         if (vc.vc_vers != VFS_CTL_VERS1)
 3726                 return (EINVAL);
 3727         mp = vfs_getvfs(&vc.vc_fsid);
 3728         if (mp == NULL)
 3729                 return (ENOENT);
 3730         /* ensure that a specific sysctl goes to the right filesystem. */
 3731         if (strcmp(vc.vc_fstypename, "*") != 0 &&
 3732             strcmp(vc.vc_fstypename, mp->mnt_vfc->vfc_name) != 0) {
 3733                 return (EINVAL);
 3734         }
 3735         VCTLTOREQ(&vc, req);
 3736         return (VFS_SYSCTL(mp, vc.vc_op, req));
 3737 }
 3738 
 3739 SYSCTL_PROC(_vfs, OID_AUTO, ctl, CTLFLAG_WR,
 3740         NULL, 0, sysctl_vfs_ctl, "", "Sysctl by fsid");
 3741 
 3742 /*
 3743  * Function to initialize a va_filerev field sensibly.
 3744  * XXX: Wouldn't a random number make a lot more sense ??
 3745  */
 3746 u_quad_t
 3747 init_va_filerev(void)
 3748 {
 3749         struct bintime bt;
 3750 
 3751         getbinuptime(&bt);
 3752         return (((u_quad_t)bt.sec << 32LL) | (bt.frac >> 32LL));
 3753 }
 3754 
 3755 static int      filt_vfsread(struct knote *kn, long hint);
 3756 static int      filt_vfswrite(struct knote *kn, long hint);
 3757 static int      filt_vfsvnode(struct knote *kn, long hint);
 3758 static void     filt_vfsdetach(struct knote *kn);
 3759 static struct filterops vfsread_filtops =
 3760         { 1, NULL, filt_vfsdetach, filt_vfsread };
 3761 static struct filterops vfswrite_filtops =
 3762         { 1, NULL, filt_vfsdetach, filt_vfswrite };
 3763 static struct filterops vfsvnode_filtops =
 3764         { 1, NULL, filt_vfsdetach, filt_vfsvnode };
 3765 
 3766 static void
 3767 vfs_knllock(void *arg)
 3768 {
 3769         struct vnode *vp = arg;
 3770 
 3771         vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, curthread);
 3772 }
 3773 
 3774 static void
 3775 vfs_knlunlock(void *arg)
 3776 {
 3777         struct vnode *vp = arg;
 3778 
 3779         VOP_UNLOCK(vp, 0, curthread);
 3780 }
 3781 
 3782 static int
 3783 vfs_knllocked(void *arg)
 3784 {
 3785         struct vnode *vp = arg;
 3786 
 3787         return (VOP_ISLOCKED(vp, curthread) == LK_EXCLUSIVE);
 3788 }
 3789 
 3790 int
 3791 vfs_kqfilter(struct vop_kqfilter_args *ap)
 3792 {
 3793         struct vnode *vp = ap->a_vp;
 3794         struct knote *kn = ap->a_kn;
 3795         struct knlist *knl; 
 3796 
 3797         switch (kn->kn_filter) {
 3798         case EVFILT_READ:
 3799                 kn->kn_fop = &vfsread_filtops;
 3800                 break;
 3801         case EVFILT_WRITE:
 3802                 kn->kn_fop = &vfswrite_filtops;
 3803                 break;
 3804         case EVFILT_VNODE:
 3805                 kn->kn_fop = &vfsvnode_filtops;
 3806                 break;
 3807         default:
 3808                 return (EINVAL);
 3809         }
 3810 
 3811         kn->kn_hook = (caddr_t)vp;
 3812 
 3813         if (vp->v_pollinfo == NULL)
 3814                 v_addpollinfo(vp);
 3815         if (vp->v_pollinfo == NULL)
 3816                 return (ENOMEM);
 3817         knl = &vp->v_pollinfo->vpi_selinfo.si_note;
 3818         knlist_add(knl, kn, 0);
 3819 
 3820         return (0);
 3821 }
 3822 
 3823 /*
 3824  * Detach knote from vnode
 3825  */
 3826 static void
 3827 filt_vfsdetach(struct knote *kn)
 3828 {
 3829         struct vnode *vp = (struct vnode *)kn->kn_hook;
 3830 
 3831         KASSERT(vp->v_pollinfo != NULL, ("Missing v_pollinfo"));
 3832         knlist_remove(&vp->v_pollinfo->vpi_selinfo.si_note, kn, 0);
 3833 }
 3834 
 3835 /*ARGSUSED*/
 3836 static int
 3837 filt_vfsread(struct knote *kn, long hint)
 3838 {
 3839         struct vnode *vp = (struct vnode *)kn->kn_hook;
 3840         struct vattr va;
 3841 
 3842         /*
 3843          * filesystem is gone, so set the EOF flag and schedule
 3844          * the knote for deletion.
 3845          */
 3846         if (hint == NOTE_REVOKE) {
 3847                 kn->kn_flags |= (EV_EOF | EV_ONESHOT);
 3848                 return (1);
 3849         }
 3850 
 3851         if (VOP_GETATTR(vp, &va, curthread->td_ucred, curthread)) 
 3852                 return (0);
 3853 
 3854         kn->kn_data = va.va_size - kn->kn_fp->f_offset;
 3855         return (kn->kn_data != 0);
 3856 }
 3857 
 3858 /*ARGSUSED*/
 3859 static int
 3860 filt_vfswrite(struct knote *kn, long hint)
 3861 {
 3862         /*
 3863          * filesystem is gone, so set the EOF flag and schedule
 3864          * the knote for deletion.
 3865          */
 3866         if (hint == NOTE_REVOKE)
 3867                 kn->kn_flags |= (EV_EOF | EV_ONESHOT);
 3868 
 3869         kn->kn_data = 0;
 3870         return (1);
 3871 }
 3872 
 3873 static int
 3874 filt_vfsvnode(struct knote *kn, long hint)
 3875 {
 3876         if (kn->kn_sfflags & hint)
 3877                 kn->kn_fflags |= hint;
 3878         if (hint == NOTE_REVOKE) {
 3879                 kn->kn_flags |= EV_EOF;
 3880                 return (1);
 3881         }
 3882         return (kn->kn_fflags != 0);
 3883 }
 3884 
 3885 int
 3886 vfs_read_dirent(struct vop_readdir_args *ap, struct dirent *dp, off_t off)
 3887 {
 3888         int error;
 3889 
 3890         if (dp->d_reclen > ap->a_uio->uio_resid)
 3891                 return (ENAMETOOLONG);
 3892         error = uiomove(dp, dp->d_reclen, ap->a_uio);
 3893         if (error) {
 3894                 if (ap->a_ncookies != NULL) {
 3895                         if (ap->a_cookies != NULL)
 3896                                 free(ap->a_cookies, M_TEMP);
 3897                         ap->a_cookies = NULL;
 3898                         *ap->a_ncookies = 0;
 3899                 }
 3900                 return (error);
 3901         }
 3902         if (ap->a_ncookies == NULL)
 3903                 return (0);
 3904 
 3905         KASSERT(ap->a_cookies,
 3906             ("NULL ap->a_cookies value with non-NULL ap->a_ncookies!"));
 3907 
 3908         *ap->a_cookies = realloc(*ap->a_cookies,
 3909             (*ap->a_ncookies + 1) * sizeof(u_long), M_TEMP, M_WAITOK | M_ZERO);
 3910         (*ap->a_cookies)[*ap->a_ncookies] = off;
 3911         return (0);
 3912 }
 3913 
 3914 /*
 3915  * Mark for update the access time of the file if the filesystem
 3916  * supports VA_MARK_ATIME.  This functionality is used by execve
 3917  * and mmap, so we want to avoid the synchronous I/O implied by
 3918  * directly setting va_atime for the sake of efficiency.
 3919  */
 3920 void
 3921 vfs_mark_atime(struct vnode *vp, struct thread *td)
 3922 {
 3923         struct vattr atimeattr;
 3924 
 3925         if ((vp->v_mount->mnt_flag & (MNT_NOATIME | MNT_RDONLY)) == 0) {
 3926                 VATTR_NULL(&atimeattr);
 3927                 atimeattr.va_vaflags |= VA_MARK_ATIME;
 3928                 (void)VOP_SETATTR(vp, &atimeattr, td->td_ucred, td);
 3929         }
 3930 }

Cache object: 15929e1b48c1e1f8e956a550676e535e


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.