The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/kern/vfs_subr.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*
    2  * Copyright (c) 1989, 1993
    3  *      The Regents of the University of California.  All rights reserved.
    4  * (c) UNIX System Laboratories, Inc.
    5  * All or some portions of this file are derived from material licensed
    6  * to the University of California by American Telephone and Telegraph
    7  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
    8  * the permission of UNIX System Laboratories, Inc.
    9  *
   10  * Redistribution and use in source and binary forms, with or without
   11  * modification, are permitted provided that the following conditions
   12  * are met:
   13  * 1. Redistributions of source code must retain the above copyright
   14  *    notice, this list of conditions and the following disclaimer.
   15  * 2. Redistributions in binary form must reproduce the above copyright
   16  *    notice, this list of conditions and the following disclaimer in the
   17  *    documentation and/or other materials provided with the distribution.
   18  * 3. All advertising materials mentioning features or use of this software
   19  *    must display the following acknowledgement:
   20  *      This product includes software developed by the University of
   21  *      California, Berkeley and its contributors.
   22  * 4. Neither the name of the University nor the names of its contributors
   23  *    may be used to endorse or promote products derived from this software
   24  *    without specific prior written permission.
   25  *
   26  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
   27  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   28  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   29  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   30  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   31  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   32  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   33  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   34  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   35  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   36  * SUCH DAMAGE.
   37  *
   38  *      @(#)vfs_subr.c  8.31 (Berkeley) 5/26/95
   39  */
   40 
   41 /*
   42  * External virtual filesystem routines
   43  */
   44 
   45 #include <sys/cdefs.h>
   46 __FBSDID("$FreeBSD: releng/5.2/sys/kern/vfs_subr.c 123639 2003-12-18 16:22:16Z jeff $");
   47 
   48 #include "opt_ddb.h"
   49 #include "opt_mac.h"
   50 
   51 #include <sys/param.h>
   52 #include <sys/systm.h>
   53 #include <sys/bio.h>
   54 #include <sys/buf.h>
   55 #include <sys/conf.h>
   56 #include <sys/eventhandler.h>
   57 #include <sys/extattr.h>
   58 #include <sys/fcntl.h>
   59 #include <sys/kernel.h>
   60 #include <sys/kthread.h>
   61 #include <sys/mac.h>
   62 #include <sys/malloc.h>
   63 #include <sys/mount.h>
   64 #include <sys/namei.h>
   65 #include <sys/stat.h>
   66 #include <sys/sysctl.h>
   67 #include <sys/syslog.h>
   68 #include <sys/vmmeter.h>
   69 #include <sys/vnode.h>
   70 
   71 #include <vm/vm.h>
   72 #include <vm/vm_object.h>
   73 #include <vm/vm_extern.h>
   74 #include <vm/pmap.h>
   75 #include <vm/vm_map.h>
   76 #include <vm/vm_page.h>
   77 #include <vm/vm_kern.h>
   78 #include <vm/uma.h>
   79 
   80 static MALLOC_DEFINE(M_NETADDR, "Export Host", "Export host address structure");
   81 
   82 static void     addalias(struct vnode *vp, dev_t nvp_rdev);
   83 static void     insmntque(struct vnode *vp, struct mount *mp);
   84 static void     vclean(struct vnode *vp, int flags, struct thread *td);
   85 static void     vlruvp(struct vnode *vp);
   86 static int      flushbuflist(struct buf *blist, int flags, struct vnode *vp,
   87                     int slpflag, int slptimeo, int *errorp);
   88 static int      vtryrecycle(struct vnode *vp);
   89 static void     vx_lock(struct vnode *vp);
   90 static void     vx_unlock(struct vnode *vp);
   91 static void     vgonechrl(struct vnode *vp, struct thread *td);
   92 
   93 
   94 /*
   95  * Number of vnodes in existence.  Increased whenever getnewvnode()
   96  * allocates a new vnode, never decreased.
   97  */
   98 static unsigned long    numvnodes;
   99 
  100 SYSCTL_LONG(_vfs, OID_AUTO, numvnodes, CTLFLAG_RD, &numvnodes, 0, "");
  101 
  102 /*
  103  * Conversion tables for conversion from vnode types to inode formats
  104  * and back.
  105  */
  106 enum vtype iftovt_tab[16] = {
  107         VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON,
  108         VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VBAD,
  109 };
  110 int vttoif_tab[9] = {
  111         0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK,
  112         S_IFSOCK, S_IFIFO, S_IFMT,
  113 };
  114 
  115 /*
  116  * List of vnodes that are ready for recycling.
  117  */
  118 static TAILQ_HEAD(freelst, vnode) vnode_free_list;
  119 
  120 /*
  121  * Minimum number of free vnodes.  If there are fewer than this free vnodes,
  122  * getnewvnode() will return a newly allocated vnode.
  123  */
  124 static u_long wantfreevnodes = 25;
  125 SYSCTL_LONG(_vfs, OID_AUTO, wantfreevnodes, CTLFLAG_RW, &wantfreevnodes, 0, "");
  126 /* Number of vnodes in the free list. */
  127 static u_long freevnodes;
  128 SYSCTL_LONG(_vfs, OID_AUTO, freevnodes, CTLFLAG_RD, &freevnodes, 0, "");
  129 
  130 /*
  131  * Various variables used for debugging the new implementation of
  132  * reassignbuf().
  133  * XXX these are probably of (very) limited utility now.
  134  */
  135 static int reassignbufcalls;
  136 SYSCTL_INT(_vfs, OID_AUTO, reassignbufcalls, CTLFLAG_RW, &reassignbufcalls, 0, "");
  137 static int nameileafonly;
  138 SYSCTL_INT(_vfs, OID_AUTO, nameileafonly, CTLFLAG_RW, &nameileafonly, 0, "");
  139 
  140 /*
  141  * Cache for the mount type id assigned to NFS.  This is used for
  142  * special checks in nfs/nfs_nqlease.c and vm/vnode_pager.c.
  143  */
  144 int     nfs_mount_type = -1;
  145 
  146 /* To keep more than one thread at a time from running vfs_getnewfsid */
  147 static struct mtx mntid_mtx;
  148 
  149 /*
  150  * Lock for any access to the following:
  151  *      vnode_free_list
  152  *      numvnodes
  153  *      freevnodes
  154  */
  155 static struct mtx vnode_free_list_mtx;
  156 
  157 /*
  158  * For any iteration/modification of dev->si_hlist (linked through
  159  * v_specnext)
  160  */
  161 static struct mtx spechash_mtx;
  162 
  163 /* Publicly exported FS */
  164 struct nfs_public nfs_pub;
  165 
  166 /* Zone for allocation of new vnodes - used exclusively by getnewvnode() */
  167 static uma_zone_t vnode_zone;
  168 static uma_zone_t vnodepoll_zone;
  169 
  170 /* Set to 1 to print out reclaim of active vnodes */
  171 int     prtactive;
  172 
  173 /*
  174  * The workitem queue.
  175  *
  176  * It is useful to delay writes of file data and filesystem metadata
  177  * for tens of seconds so that quickly created and deleted files need
  178  * not waste disk bandwidth being created and removed. To realize this,
  179  * we append vnodes to a "workitem" queue. When running with a soft
  180  * updates implementation, most pending metadata dependencies should
  181  * not wait for more than a few seconds. Thus, mounted on block devices
  182  * are delayed only about a half the time that file data is delayed.
  183  * Similarly, directory updates are more critical, so are only delayed
  184  * about a third the time that file data is delayed. Thus, there are
  185  * SYNCER_MAXDELAY queues that are processed round-robin at a rate of
  186  * one each second (driven off the filesystem syncer process). The
  187  * syncer_delayno variable indicates the next queue that is to be processed.
  188  * Items that need to be processed soon are placed in this queue:
  189  *
  190  *      syncer_workitem_pending[syncer_delayno]
  191  *
  192  * A delay of fifteen seconds is done by placing the request fifteen
  193  * entries later in the queue:
  194  *
  195  *      syncer_workitem_pending[(syncer_delayno + 15) & syncer_mask]
  196  *
  197  */
  198 static int syncer_delayno;
  199 static long syncer_mask;
  200 LIST_HEAD(synclist, vnode);
  201 static struct synclist *syncer_workitem_pending;
  202 /*
  203  * The sync_mtx protects:
  204  *      vp->v_synclist
  205  *      syncer_delayno
  206  *      syncer_workitem_pending
  207  *      rushjob
  208  */
  209 static struct mtx sync_mtx;
  210 
  211 #define SYNCER_MAXDELAY         32
  212 static int syncer_maxdelay = SYNCER_MAXDELAY;   /* maximum delay time */
  213 static int syncdelay = 30;              /* max time to delay syncing data */
  214 static int filedelay = 30;              /* time to delay syncing files */
  215 SYSCTL_INT(_kern, OID_AUTO, filedelay, CTLFLAG_RW, &filedelay, 0, "");
  216 static int dirdelay = 29;               /* time to delay syncing directories */
  217 SYSCTL_INT(_kern, OID_AUTO, dirdelay, CTLFLAG_RW, &dirdelay, 0, "");
  218 static int metadelay = 28;              /* time to delay syncing metadata */
  219 SYSCTL_INT(_kern, OID_AUTO, metadelay, CTLFLAG_RW, &metadelay, 0, "");
  220 static int rushjob;             /* number of slots to run ASAP */
  221 static int stat_rush_requests;  /* number of times I/O speeded up */
  222 SYSCTL_INT(_debug, OID_AUTO, rush_requests, CTLFLAG_RW, &stat_rush_requests, 0, "");
  223 
  224 /*
  225  * Number of vnodes we want to exist at any one time.  This is mostly used
  226  * to size hash tables in vnode-related code.  It is normally not used in
  227  * getnewvnode(), as wantfreevnodes is normally nonzero.)
  228  *
  229  * XXX desiredvnodes is historical cruft and should not exist.
  230  */
  231 int desiredvnodes;
  232 SYSCTL_INT(_kern, KERN_MAXVNODES, maxvnodes, CTLFLAG_RW,
  233     &desiredvnodes, 0, "Maximum number of vnodes");
  234 static int minvnodes;
  235 SYSCTL_INT(_kern, OID_AUTO, minvnodes, CTLFLAG_RW,
  236     &minvnodes, 0, "Minimum number of vnodes");
  237 static int vnlru_nowhere;
  238 SYSCTL_INT(_debug, OID_AUTO, vnlru_nowhere, CTLFLAG_RW, &vnlru_nowhere, 0,
  239     "Number of times the vnlru process ran without success");
  240 
  241 /* Hook for calling soft updates */
  242 int (*softdep_process_worklist_hook)(struct mount *);
  243 
  244 /*
  245  * This only exists to supress warnings from unlocked specfs accesses.  It is
  246  * no longer ok to have an unlocked VFS.
  247  */
  248 #define IGNORE_LOCK(vp) ((vp)->v_type == VCHR || (vp)->v_type == VBAD)
  249 
  250 /* Print lock violations */
  251 int vfs_badlock_print = 1;
  252 
  253 /* Panic on violation */
  254 int vfs_badlock_panic = 1;
  255 
  256 /* Check for interlock across VOPs */
  257 int vfs_badlock_mutex = 1;
  258 
  259 static void
  260 vfs_badlock(const char *msg, const char *str, struct vnode *vp)
  261 {
  262         if (vfs_badlock_print)
  263                 printf("%s: %p %s\n", str, vp, msg);
  264         if (vfs_badlock_panic)
  265                 Debugger("Lock violation.\n");
  266 }
  267 
  268 void
  269 assert_vi_unlocked(struct vnode *vp, const char *str)
  270 { 
  271         if (vfs_badlock_mutex && mtx_owned(VI_MTX(vp)))
  272                 vfs_badlock("interlock is locked but should not be", str, vp);
  273 }
  274 
  275 void
  276 assert_vi_locked(struct vnode *vp, const char *str)
  277 {
  278         if (vfs_badlock_mutex && !mtx_owned(VI_MTX(vp)))
  279                 vfs_badlock("interlock is not locked but should be", str, vp);
  280 }
  281 
  282 void
  283 assert_vop_locked(struct vnode *vp, const char *str)
  284 {
  285         if (vp && !IGNORE_LOCK(vp) && !VOP_ISLOCKED(vp, NULL))
  286                 vfs_badlock("is not locked but should be", str, vp);
  287 }
  288 
  289 void
  290 assert_vop_unlocked(struct vnode *vp, const char *str)
  291 {
  292         if (vp && !IGNORE_LOCK(vp) &&
  293             VOP_ISLOCKED(vp, curthread) == LK_EXCLUSIVE)
  294                 vfs_badlock("is locked but should not be", str, vp);
  295 }
  296 
  297 void
  298 assert_vop_elocked(struct vnode *vp, const char *str)
  299 {
  300         if (vp && !IGNORE_LOCK(vp) &&
  301             VOP_ISLOCKED(vp, curthread) != LK_EXCLUSIVE)
  302                 vfs_badlock("is not exclusive locked but should be", str, vp);
  303 }
  304 
  305 void
  306 assert_vop_elocked_other(struct vnode *vp, const char *str)
  307 {
  308         if (vp && !IGNORE_LOCK(vp) &&
  309             VOP_ISLOCKED(vp, curthread) != LK_EXCLOTHER)
  310                 vfs_badlock("is not exclusive locked by another thread",
  311                     str, vp);
  312 }
  313 
  314 void
  315 assert_vop_slocked(struct vnode *vp, const char *str)
  316 {
  317         if (vp && !IGNORE_LOCK(vp) &&
  318             VOP_ISLOCKED(vp, curthread) != LK_SHARED)
  319                 vfs_badlock("is not locked shared but should be", str, vp);
  320 }
  321 
  322 void
  323 vop_rename_pre(void *ap)
  324 {
  325         struct vop_rename_args *a = ap;
  326 
  327         if (a->a_tvp)
  328                 ASSERT_VI_UNLOCKED(a->a_tvp, "VOP_RENAME");
  329         ASSERT_VI_UNLOCKED(a->a_tdvp, "VOP_RENAME");
  330         ASSERT_VI_UNLOCKED(a->a_fvp, "VOP_RENAME");
  331         ASSERT_VI_UNLOCKED(a->a_fdvp, "VOP_RENAME");
  332 
  333         /* Check the source (from) */
  334         if (a->a_tdvp != a->a_fdvp)
  335                 ASSERT_VOP_UNLOCKED(a->a_fdvp, "vop_rename: fdvp locked.\n");
  336         if (a->a_tvp != a->a_fvp)
  337                 ASSERT_VOP_UNLOCKED(a->a_fvp, "vop_rename: tvp locked.\n");
  338 
  339         /* Check the target */
  340         if (a->a_tvp)
  341                 ASSERT_VOP_LOCKED(a->a_tvp, "vop_rename: tvp not locked.\n");
  342 
  343         ASSERT_VOP_LOCKED(a->a_tdvp, "vop_rename: tdvp not locked.\n");
  344 }
  345 
  346 void
  347 vop_strategy_pre(void *ap)
  348 {
  349         struct vop_strategy_args *a = ap;
  350         struct buf *bp;
  351 
  352         bp = a->a_bp;
  353 
  354         /*
  355          * Cluster ops lock their component buffers but not the IO container.
  356          */
  357         if ((bp->b_flags & B_CLUSTER) != 0)
  358                 return;
  359 
  360         if (BUF_REFCNT(bp) < 1) {
  361                 if (vfs_badlock_print)
  362                         printf("VOP_STRATEGY: bp is not locked but should be.\n");
  363                 if (vfs_badlock_panic)
  364                         Debugger("Lock violation.\n");
  365         }
  366 }
  367 
  368 void
  369 vop_lookup_pre(void *ap)
  370 {
  371         struct vop_lookup_args *a = ap;
  372         struct vnode *dvp;
  373 
  374         dvp = a->a_dvp;
  375 
  376         ASSERT_VI_UNLOCKED(dvp, "VOP_LOOKUP");
  377         ASSERT_VOP_LOCKED(dvp, "VOP_LOOKUP");
  378 }
  379 
  380 void
  381 vop_lookup_post(void *ap, int rc)
  382 {
  383         struct vop_lookup_args *a = ap;
  384         struct componentname *cnp;
  385         struct vnode *dvp;
  386         struct vnode *vp;
  387         int flags;
  388 
  389         dvp = a->a_dvp;
  390         cnp = a->a_cnp;
  391         vp = *(a->a_vpp);
  392         flags = cnp->cn_flags;
  393 
  394 
  395         ASSERT_VI_UNLOCKED(dvp, "VOP_LOOKUP");
  396         /*
  397          * If this is the last path component for this lookup and LOCPARENT
  398          * is set, OR if there is an error the directory has to be locked.
  399          */
  400         if ((flags & LOCKPARENT) && (flags & ISLASTCN))
  401                 ASSERT_VOP_LOCKED(dvp, "VOP_LOOKUP (LOCKPARENT)");
  402         else if (rc != 0)
  403                 ASSERT_VOP_LOCKED(dvp, "VOP_LOOKUP (error)");
  404         else if (dvp != vp)
  405                 ASSERT_VOP_UNLOCKED(dvp, "VOP_LOOKUP (dvp)");
  406 
  407         if (flags & PDIRUNLOCK)
  408                 ASSERT_VOP_UNLOCKED(dvp, "VOP_LOOKUP (PDIRUNLOCK)");
  409 }
  410 
  411 void
  412 vop_unlock_pre(void *ap)
  413 {
  414         struct vop_unlock_args *a = ap;
  415 
  416         if (a->a_flags & LK_INTERLOCK)
  417                 ASSERT_VI_LOCKED(a->a_vp, "VOP_UNLOCK");
  418 
  419         ASSERT_VOP_LOCKED(a->a_vp, "VOP_UNLOCK");
  420 }
  421 
  422 void
  423 vop_unlock_post(void *ap, int rc)
  424 {
  425         struct vop_unlock_args *a = ap;
  426 
  427         if (a->a_flags & LK_INTERLOCK)
  428                 ASSERT_VI_UNLOCKED(a->a_vp, "VOP_UNLOCK");
  429 }
  430 
  431 void
  432 vop_lock_pre(void *ap)
  433 {
  434         struct vop_lock_args *a = ap;
  435 
  436         if ((a->a_flags & LK_INTERLOCK) == 0)
  437                 ASSERT_VI_UNLOCKED(a->a_vp, "VOP_LOCK");
  438         else
  439                 ASSERT_VI_LOCKED(a->a_vp, "VOP_LOCK");
  440 }
  441 
  442 void
  443 vop_lock_post(void *ap, int rc)
  444 {
  445         struct vop_lock_args *a;
  446 
  447         a = ap;
  448 
  449         ASSERT_VI_UNLOCKED(a->a_vp, "VOP_LOCK");
  450         if (rc == 0)
  451                 ASSERT_VOP_LOCKED(a->a_vp, "VOP_LOCK");
  452 }
  453 
  454 void
  455 v_addpollinfo(struct vnode *vp)
  456 {
  457         vp->v_pollinfo = uma_zalloc(vnodepoll_zone, M_WAITOK);
  458         mtx_init(&vp->v_pollinfo->vpi_lock, "vnode pollinfo", NULL, MTX_DEF);
  459 }
  460 
  461 /*
  462  * Initialize the vnode management data structures.
  463  */
  464 static void
  465 vntblinit(void *dummy __unused)
  466 {
  467 
  468         /*
  469          * Desiredvnodes is a function of the physical memory size and
  470          * the kernel's heap size.  Specifically, desiredvnodes scales
  471          * in proportion to the physical memory size until two fifths
  472          * of the kernel's heap size is consumed by vnodes and vm
  473          * objects.  
  474          */
  475         desiredvnodes = min(maxproc + cnt.v_page_count / 4, 2 * vm_kmem_size /
  476             (5 * (sizeof(struct vm_object) + sizeof(struct vnode))));
  477         minvnodes = desiredvnodes / 4;
  478         mtx_init(&mountlist_mtx, "mountlist", NULL, MTX_DEF);
  479         mtx_init(&mntid_mtx, "mntid", NULL, MTX_DEF);
  480         mtx_init(&spechash_mtx, "spechash", NULL, MTX_DEF);
  481         TAILQ_INIT(&vnode_free_list);
  482         mtx_init(&vnode_free_list_mtx, "vnode_free_list", NULL, MTX_DEF);
  483         vnode_zone = uma_zcreate("VNODE", sizeof (struct vnode), NULL, NULL,
  484             NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
  485         vnodepoll_zone = uma_zcreate("VNODEPOLL", sizeof (struct vpollinfo),
  486               NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
  487         /*
  488          * Initialize the filesystem syncer.
  489          */
  490         syncer_workitem_pending = hashinit(syncer_maxdelay, M_VNODE,
  491                 &syncer_mask);
  492         syncer_maxdelay = syncer_mask + 1;
  493         mtx_init(&sync_mtx, "Syncer mtx", NULL, MTX_DEF);
  494 }
  495 SYSINIT(vfs, SI_SUB_VFS, SI_ORDER_FIRST, vntblinit, NULL)
  496 
  497 
  498 /*
  499  * Mark a mount point as busy. Used to synchronize access and to delay
  500  * unmounting. Interlock is not released on failure.
  501  */
  502 int
  503 vfs_busy(mp, flags, interlkp, td)
  504         struct mount *mp;
  505         int flags;
  506         struct mtx *interlkp;
  507         struct thread *td;
  508 {
  509         int lkflags;
  510 
  511         if (mp->mnt_kern_flag & MNTK_UNMOUNT) {
  512                 if (flags & LK_NOWAIT)
  513                         return (ENOENT);
  514                 mp->mnt_kern_flag |= MNTK_MWAIT;
  515                 /*
  516                  * Since all busy locks are shared except the exclusive
  517                  * lock granted when unmounting, the only place that a
  518                  * wakeup needs to be done is at the release of the
  519                  * exclusive lock at the end of dounmount.
  520                  */
  521                 msleep(mp, interlkp, PVFS, "vfs_busy", 0);
  522                 return (ENOENT);
  523         }
  524         lkflags = LK_SHARED | LK_NOPAUSE;
  525         if (interlkp)
  526                 lkflags |= LK_INTERLOCK;
  527         if (lockmgr(&mp->mnt_lock, lkflags, interlkp, td))
  528                 panic("vfs_busy: unexpected lock failure");
  529         return (0);
  530 }
  531 
  532 /*
  533  * Free a busy filesystem.
  534  */
  535 void
  536 vfs_unbusy(mp, td)
  537         struct mount *mp;
  538         struct thread *td;
  539 {
  540 
  541         lockmgr(&mp->mnt_lock, LK_RELEASE, NULL, td);
  542 }
  543 
  544 /*
  545  * Lookup a mount point by filesystem identifier.
  546  */
  547 struct mount *
  548 vfs_getvfs(fsid)
  549         fsid_t *fsid;
  550 {
  551         register struct mount *mp;
  552 
  553         mtx_lock(&mountlist_mtx);
  554         TAILQ_FOREACH(mp, &mountlist, mnt_list) {
  555                 if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] &&
  556                     mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) {
  557                         mtx_unlock(&mountlist_mtx);
  558                         return (mp);
  559                 }
  560         }
  561         mtx_unlock(&mountlist_mtx);
  562         return ((struct mount *) 0);
  563 }
  564 
  565 /*
  566  * Get a new unique fsid.  Try to make its val[0] unique, since this value
  567  * will be used to create fake device numbers for stat().  Also try (but
  568  * not so hard) make its val[0] unique mod 2^16, since some emulators only
  569  * support 16-bit device numbers.  We end up with unique val[0]'s for the
  570  * first 2^16 calls and unique val[0]'s mod 2^16 for the first 2^8 calls.
  571  *
  572  * Keep in mind that several mounts may be running in parallel.  Starting
  573  * the search one past where the previous search terminated is both a
  574  * micro-optimization and a defense against returning the same fsid to
  575  * different mounts.
  576  */
  577 void
  578 vfs_getnewfsid(mp)
  579         struct mount *mp;
  580 {
  581         static u_int16_t mntid_base;
  582         fsid_t tfsid;
  583         int mtype;
  584 
  585         mtx_lock(&mntid_mtx);
  586         mtype = mp->mnt_vfc->vfc_typenum;
  587         tfsid.val[1] = mtype;
  588         mtype = (mtype & 0xFF) << 24;
  589         for (;;) {
  590                 tfsid.val[0] = makeudev(255,
  591                     mtype | ((mntid_base & 0xFF00) << 8) | (mntid_base & 0xFF));
  592                 mntid_base++;
  593                 if (vfs_getvfs(&tfsid) == NULL)
  594                         break;
  595         }
  596         mp->mnt_stat.f_fsid.val[0] = tfsid.val[0];
  597         mp->mnt_stat.f_fsid.val[1] = tfsid.val[1];
  598         mtx_unlock(&mntid_mtx);
  599 }
  600 
  601 /*
  602  * Knob to control the precision of file timestamps:
  603  *
  604  *   0 = seconds only; nanoseconds zeroed.
  605  *   1 = seconds and nanoseconds, accurate within 1/HZ.
  606  *   2 = seconds and nanoseconds, truncated to microseconds.
  607  * >=3 = seconds and nanoseconds, maximum precision.
  608  */
  609 enum { TSP_SEC, TSP_HZ, TSP_USEC, TSP_NSEC };
  610 
  611 static int timestamp_precision = TSP_SEC;
  612 SYSCTL_INT(_vfs, OID_AUTO, timestamp_precision, CTLFLAG_RW,
  613     &timestamp_precision, 0, "");
  614 
  615 /*
  616  * Get a current timestamp.
  617  */
  618 void
  619 vfs_timestamp(tsp)
  620         struct timespec *tsp;
  621 {
  622         struct timeval tv;
  623 
  624         switch (timestamp_precision) {
  625         case TSP_SEC:
  626                 tsp->tv_sec = time_second;
  627                 tsp->tv_nsec = 0;
  628                 break;
  629         case TSP_HZ:
  630                 getnanotime(tsp);
  631                 break;
  632         case TSP_USEC:
  633                 microtime(&tv);
  634                 TIMEVAL_TO_TIMESPEC(&tv, tsp);
  635                 break;
  636         case TSP_NSEC:
  637         default:
  638                 nanotime(tsp);
  639                 break;
  640         }
  641 }
  642 
  643 /*
  644  * Set vnode attributes to VNOVAL
  645  */
  646 void
  647 vattr_null(vap)
  648         register struct vattr *vap;
  649 {
  650 
  651         vap->va_type = VNON;
  652         vap->va_size = VNOVAL;
  653         vap->va_bytes = VNOVAL;
  654         vap->va_mode = VNOVAL;
  655         vap->va_nlink = VNOVAL;
  656         vap->va_uid = VNOVAL;
  657         vap->va_gid = VNOVAL;
  658         vap->va_fsid = VNOVAL;
  659         vap->va_fileid = VNOVAL;
  660         vap->va_blocksize = VNOVAL;
  661         vap->va_rdev = VNOVAL;
  662         vap->va_atime.tv_sec = VNOVAL;
  663         vap->va_atime.tv_nsec = VNOVAL;
  664         vap->va_mtime.tv_sec = VNOVAL;
  665         vap->va_mtime.tv_nsec = VNOVAL;
  666         vap->va_ctime.tv_sec = VNOVAL;
  667         vap->va_ctime.tv_nsec = VNOVAL;
  668         vap->va_birthtime.tv_sec = VNOVAL;
  669         vap->va_birthtime.tv_nsec = VNOVAL;
  670         vap->va_flags = VNOVAL;
  671         vap->va_gen = VNOVAL;
  672         vap->va_vaflags = 0;
  673 }
  674 
  675 /*
  676  * This routine is called when we have too many vnodes.  It attempts
  677  * to free <count> vnodes and will potentially free vnodes that still
  678  * have VM backing store (VM backing store is typically the cause
  679  * of a vnode blowout so we want to do this).  Therefore, this operation
  680  * is not considered cheap.
  681  *
  682  * A number of conditions may prevent a vnode from being reclaimed.
  683  * the buffer cache may have references on the vnode, a directory
  684  * vnode may still have references due to the namei cache representing
  685  * underlying files, or the vnode may be in active use.   It is not
  686  * desireable to reuse such vnodes.  These conditions may cause the
  687  * number of vnodes to reach some minimum value regardless of what
  688  * you set kern.maxvnodes to.  Do not set kern.maxvnodes too low.
  689  */
  690 static int
  691 vlrureclaim(struct mount *mp)
  692 {
  693         struct vnode *vp;
  694         int done;
  695         int trigger;
  696         int usevnodes;
  697         int count;
  698 
  699         /*
  700          * Calculate the trigger point, don't allow user
  701          * screwups to blow us up.   This prevents us from
  702          * recycling vnodes with lots of resident pages.  We
  703          * aren't trying to free memory, we are trying to
  704          * free vnodes.
  705          */
  706         usevnodes = desiredvnodes;
  707         if (usevnodes <= 0)
  708                 usevnodes = 1;
  709         trigger = cnt.v_page_count * 2 / usevnodes;
  710 
  711         done = 0;
  712         MNT_ILOCK(mp);
  713         count = mp->mnt_nvnodelistsize / 10 + 1;
  714         while (count && (vp = TAILQ_FIRST(&mp->mnt_nvnodelist)) != NULL) {
  715                 TAILQ_REMOVE(&mp->mnt_nvnodelist, vp, v_nmntvnodes);
  716                 TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes);
  717 
  718                 if (vp->v_type != VNON &&
  719                     vp->v_type != VBAD &&
  720                     VI_TRYLOCK(vp)) {
  721                         if (VMIGHTFREE(vp) &&           /* critical path opt */
  722                             (vp->v_object == NULL ||
  723                             vp->v_object->resident_page_count < trigger)) {
  724                                 MNT_IUNLOCK(mp);
  725                                 vgonel(vp, curthread);
  726                                 done++;
  727                                 MNT_ILOCK(mp);
  728                         } else
  729                                 VI_UNLOCK(vp);
  730                 }
  731                 --count;
  732         }
  733         MNT_IUNLOCK(mp);
  734         return done;
  735 }
  736 
  737 /*
  738  * Attempt to recycle vnodes in a context that is always safe to block.
  739  * Calling vlrurecycle() from the bowels of filesystem code has some
  740  * interesting deadlock problems.
  741  */
  742 static struct proc *vnlruproc;
  743 static int vnlruproc_sig;
  744 
  745 static void
  746 vnlru_proc(void)
  747 {
  748         struct mount *mp, *nmp;
  749         int done;
  750         struct proc *p = vnlruproc;
  751         struct thread *td = FIRST_THREAD_IN_PROC(p);    /* XXXKSE */
  752 
  753         mtx_lock(&Giant);
  754 
  755         EVENTHANDLER_REGISTER(shutdown_pre_sync, kproc_shutdown, p,
  756             SHUTDOWN_PRI_FIRST);
  757 
  758         for (;;) {
  759                 kthread_suspend_check(p);
  760                 mtx_lock(&vnode_free_list_mtx);
  761                 if (numvnodes - freevnodes <= desiredvnodes * 9 / 10) {
  762                         mtx_unlock(&vnode_free_list_mtx);
  763                         vnlruproc_sig = 0;
  764                         wakeup(&vnlruproc_sig);
  765                         tsleep(vnlruproc, PVFS, "vlruwt", hz);
  766                         continue;
  767                 }
  768                 mtx_unlock(&vnode_free_list_mtx);
  769                 done = 0;
  770                 mtx_lock(&mountlist_mtx);
  771                 for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) {
  772                         if (vfs_busy(mp, LK_NOWAIT, &mountlist_mtx, td)) {
  773                                 nmp = TAILQ_NEXT(mp, mnt_list);
  774                                 continue;
  775                         }
  776                         done += vlrureclaim(mp);
  777                         mtx_lock(&mountlist_mtx);
  778                         nmp = TAILQ_NEXT(mp, mnt_list);
  779                         vfs_unbusy(mp, td);
  780                 }
  781                 mtx_unlock(&mountlist_mtx);
  782                 if (done == 0) {
  783 #if 0
  784                         /* These messages are temporary debugging aids */
  785                         if (vnlru_nowhere < 5)
  786                                 printf("vnlru process getting nowhere..\n");
  787                         else if (vnlru_nowhere == 5)
  788                                 printf("vnlru process messages stopped.\n");
  789 #endif
  790                         vnlru_nowhere++;
  791                         tsleep(vnlruproc, PPAUSE, "vlrup", hz * 3);
  792                 }
  793         }
  794 }
  795 
  796 static struct kproc_desc vnlru_kp = {
  797         "vnlru",
  798         vnlru_proc,
  799         &vnlruproc
  800 };
  801 SYSINIT(vnlru, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &vnlru_kp)
  802 
  803 
  804 /*
  805  * Routines having to do with the management of the vnode table.
  806  */
  807 
  808 /*
  809  * Check to see if a free vnode can be recycled. If it can,
  810  * recycle it and return it with the vnode interlock held.
  811  */
  812 static int
  813 vtryrecycle(struct vnode *vp)
  814 {
  815         struct thread *td = curthread;
  816         vm_object_t object;
  817         struct mount *vnmp;
  818         int error;
  819 
  820         /* Don't recycle if we can't get the interlock */
  821         if (!VI_TRYLOCK(vp))
  822                 return (EWOULDBLOCK);
  823         /*
  824          * This vnode may found and locked via some other list, if so we
  825          * can't recycle it yet.
  826          */
  827         if (vn_lock(vp, LK_INTERLOCK | LK_EXCLUSIVE | LK_NOWAIT, td) != 0)
  828                 return (EWOULDBLOCK);
  829         /*
  830          * Don't recycle if its filesystem is being suspended.
  831          */
  832         if (vn_start_write(vp, &vnmp, V_NOWAIT) != 0) {
  833                 error = EBUSY;
  834                 goto done;
  835         }
  836 
  837         /*
  838          * Don't recycle if we still have cached pages.
  839          */
  840         if (VOP_GETVOBJECT(vp, &object) == 0) {
  841                 VM_OBJECT_LOCK(object);
  842                 if (object->resident_page_count ||
  843                     object->ref_count) {
  844                         VM_OBJECT_UNLOCK(object);
  845                         error = EBUSY;
  846                         goto done;
  847                 }
  848                 VM_OBJECT_UNLOCK(object);
  849         }
  850         if (LIST_FIRST(&vp->v_cache_src)) {
  851                 /*
  852                  * note: nameileafonly sysctl is temporary,
  853                  * for debugging only, and will eventually be
  854                  * removed.
  855                  */
  856                 if (nameileafonly > 0) {
  857                         /*
  858                          * Do not reuse namei-cached directory
  859                          * vnodes that have cached
  860                          * subdirectories.
  861                          */
  862                         if (cache_leaf_test(vp) < 0) {
  863                                 error = EISDIR;
  864                                 goto done;
  865                         }
  866                 } else if (nameileafonly < 0 ||
  867                             vmiodirenable == 0) {
  868                         /*
  869                          * Do not reuse namei-cached directory
  870                          * vnodes if nameileafonly is -1 or
  871                          * if VMIO backing for directories is
  872                          * turned off (otherwise we reuse them
  873                          * too quickly).
  874                          */
  875                         error = EBUSY;
  876                         goto done;
  877                 }
  878         }
  879         /*
  880          * If we got this far, we need to acquire the interlock and see if
  881          * anyone picked up this vnode from another list.  If not, we will
  882          * mark it with XLOCK via vgonel() so that anyone who does find it
  883          * will skip over it.
  884          */
  885         VI_LOCK(vp);
  886         if (VSHOULDBUSY(vp) && (vp->v_iflag & VI_XLOCK) == 0) {
  887                 VI_UNLOCK(vp);
  888                 error = EBUSY;
  889                 goto done;
  890         }
  891         mtx_lock(&vnode_free_list_mtx);
  892         TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
  893         vp->v_iflag &= ~VI_FREE;
  894         mtx_unlock(&vnode_free_list_mtx);
  895         vp->v_iflag |= VI_DOOMED;
  896         if (vp->v_type != VBAD) {
  897                 VOP_UNLOCK(vp, 0, td);
  898                 vgonel(vp, td);
  899                 VI_LOCK(vp);
  900         } else
  901                 VOP_UNLOCK(vp, 0, td);
  902         vn_finished_write(vnmp);
  903         return (0);
  904 done:
  905         VOP_UNLOCK(vp, 0, td);
  906         return (error);
  907 }
  908 
  909 /*
  910  * Return the next vnode from the free list.
  911  */
  912 int
  913 getnewvnode(tag, mp, vops, vpp)
  914         const char *tag;
  915         struct mount *mp;
  916         vop_t **vops;
  917         struct vnode **vpp;
  918 {
  919         struct vnode *vp = NULL;
  920         struct vpollinfo *pollinfo = NULL;
  921 
  922         mtx_lock(&vnode_free_list_mtx);
  923 
  924         /*
  925          * Try to reuse vnodes if we hit the max.  This situation only
  926          * occurs in certain large-memory (2G+) situations.  We cannot
  927          * attempt to directly reclaim vnodes due to nasty recursion
  928          * problems.
  929          */
  930         while (numvnodes - freevnodes > desiredvnodes) {
  931                 if (vnlruproc_sig == 0) {
  932                         vnlruproc_sig = 1;      /* avoid unnecessary wakeups */
  933                         wakeup(vnlruproc);
  934                 }
  935                 mtx_unlock(&vnode_free_list_mtx);
  936                 tsleep(&vnlruproc_sig, PVFS, "vlruwk", hz);
  937                 mtx_lock(&vnode_free_list_mtx);
  938         }
  939 
  940         /*
  941          * Attempt to reuse a vnode already on the free list, allocating
  942          * a new vnode if we can't find one or if we have not reached a
  943          * good minimum for good LRU performance.
  944          */
  945 
  946         if (freevnodes >= wantfreevnodes && numvnodes >= minvnodes) {
  947                 int error;
  948                 int count;
  949 
  950                 for (count = 0; count < freevnodes; count++) {
  951                         vp = TAILQ_FIRST(&vnode_free_list);
  952 
  953                         KASSERT(vp->v_usecount == 0 &&
  954                             (vp->v_iflag & VI_DOINGINACT) == 0,
  955                             ("getnewvnode: free vnode isn't"));
  956 
  957                         TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
  958                         TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
  959                         mtx_unlock(&vnode_free_list_mtx);
  960                         error = vtryrecycle(vp);
  961                         mtx_lock(&vnode_free_list_mtx);
  962                         if (error == 0)
  963                                 break;
  964                         vp = NULL;
  965                 }
  966         }
  967         if (vp) {
  968                 freevnodes--;
  969                 mtx_unlock(&vnode_free_list_mtx);
  970 
  971 #ifdef INVARIANTS
  972                 {
  973                         if (vp->v_data)
  974                                 panic("cleaned vnode isn't");
  975                         if (vp->v_numoutput)
  976                                 panic("Clean vnode has pending I/O's");
  977                         if (vp->v_writecount != 0)
  978                                 panic("Non-zero write count");
  979                 }
  980 #endif
  981                 if ((pollinfo = vp->v_pollinfo) != NULL) {
  982                         /*
  983                          * To avoid lock order reversals, the call to
  984                          * uma_zfree() must be delayed until the vnode
  985                          * interlock is released.   
  986                          */
  987                         vp->v_pollinfo = NULL;
  988                 }
  989 #ifdef MAC
  990                 mac_destroy_vnode(vp);
  991 #endif
  992                 vp->v_iflag = 0;
  993                 vp->v_vflag = 0;
  994                 vp->v_lastw = 0;
  995                 vp->v_lasta = 0;
  996                 vp->v_cstart = 0;
  997                 vp->v_clen = 0;
  998                 vp->v_socket = 0;
  999                 lockdestroy(vp->v_vnlock);
 1000                 lockinit(vp->v_vnlock, PVFS, tag, VLKTIMEOUT, LK_NOPAUSE);
 1001                 KASSERT(vp->v_cleanbufcnt == 0, ("cleanbufcnt not 0"));
 1002                 KASSERT(vp->v_cleanblkroot == NULL, ("cleanblkroot not NULL"));
 1003                 KASSERT(vp->v_dirtybufcnt == 0, ("dirtybufcnt not 0"));
 1004                 KASSERT(vp->v_dirtyblkroot == NULL, ("dirtyblkroot not NULL"));
 1005         } else {
 1006                 numvnodes++;
 1007                 mtx_unlock(&vnode_free_list_mtx);
 1008 
 1009                 vp = (struct vnode *) uma_zalloc(vnode_zone, M_WAITOK|M_ZERO);
 1010                 mtx_init(&vp->v_interlock, "vnode interlock", NULL, MTX_DEF);
 1011                 VI_LOCK(vp);
 1012                 vp->v_dd = vp;
 1013                 vp->v_vnlock = &vp->v_lock;
 1014                 lockinit(vp->v_vnlock, PVFS, tag, VLKTIMEOUT, LK_NOPAUSE);
 1015                 cache_purge(vp);                /* Sets up v_id. */
 1016                 LIST_INIT(&vp->v_cache_src);
 1017                 TAILQ_INIT(&vp->v_cache_dst);
 1018         }
 1019 
 1020         TAILQ_INIT(&vp->v_cleanblkhd);
 1021         TAILQ_INIT(&vp->v_dirtyblkhd);
 1022         vp->v_type = VNON;
 1023         vp->v_tag = tag;
 1024         vp->v_op = vops;
 1025         *vpp = vp;
 1026         vp->v_usecount = 1;
 1027         vp->v_data = 0;
 1028         vp->v_cachedid = -1;
 1029         VI_UNLOCK(vp);
 1030         if (pollinfo != NULL) {
 1031                 mtx_destroy(&pollinfo->vpi_lock);
 1032                 uma_zfree(vnodepoll_zone, pollinfo);
 1033         }
 1034 #ifdef MAC
 1035         mac_init_vnode(vp);
 1036         if (mp != NULL && (mp->mnt_flag & MNT_MULTILABEL) == 0)
 1037                 mac_associate_vnode_singlelabel(mp, vp);
 1038 #endif
 1039         insmntque(vp, mp);
 1040 
 1041         return (0);
 1042 }
 1043 
 1044 /*
 1045  * Move a vnode from one mount queue to another.
 1046  */
 1047 static void
 1048 insmntque(vp, mp)
 1049         register struct vnode *vp;
 1050         register struct mount *mp;
 1051 {
 1052 
 1053         /*
 1054          * Delete from old mount point vnode list, if on one.
 1055          */
 1056         if (vp->v_mount != NULL) {
 1057                 MNT_ILOCK(vp->v_mount);
 1058                 KASSERT(vp->v_mount->mnt_nvnodelistsize > 0,
 1059                         ("bad mount point vnode list size"));
 1060                 TAILQ_REMOVE(&vp->v_mount->mnt_nvnodelist, vp, v_nmntvnodes);
 1061                 vp->v_mount->mnt_nvnodelistsize--;
 1062                 MNT_IUNLOCK(vp->v_mount);
 1063         }
 1064         /*
 1065          * Insert into list of vnodes for the new mount point, if available.
 1066          */
 1067         if ((vp->v_mount = mp) != NULL) {
 1068                 MNT_ILOCK(vp->v_mount);
 1069                 TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes);
 1070                 mp->mnt_nvnodelistsize++;
 1071                 MNT_IUNLOCK(vp->v_mount);
 1072         }
 1073 }
 1074 
 1075 /*
 1076  * Update outstanding I/O count and do wakeup if requested.
 1077  */
 1078 void
 1079 vwakeup(bp)
 1080         register struct buf *bp;
 1081 {
 1082         register struct vnode *vp;
 1083 
 1084         bp->b_flags &= ~B_WRITEINPROG;
 1085         if ((vp = bp->b_vp)) {
 1086                 VI_LOCK(vp);
 1087                 vp->v_numoutput--;
 1088                 if (vp->v_numoutput < 0)
 1089                         panic("vwakeup: neg numoutput");
 1090                 if ((vp->v_numoutput == 0) && (vp->v_iflag & VI_BWAIT)) {
 1091                         vp->v_iflag &= ~VI_BWAIT;
 1092                         wakeup(&vp->v_numoutput);
 1093                 }
 1094                 VI_UNLOCK(vp);
 1095         }
 1096 }
 1097 
 1098 /*
 1099  * Flush out and invalidate all buffers associated with a vnode.
 1100  * Called with the underlying object locked.
 1101  */
 1102 int
 1103 vinvalbuf(vp, flags, cred, td, slpflag, slptimeo)
 1104         struct vnode *vp;
 1105         int flags;
 1106         struct ucred *cred;
 1107         struct thread *td;
 1108         int slpflag, slptimeo;
 1109 {
 1110         struct buf *blist;
 1111         int error;
 1112         vm_object_t object;
 1113 
 1114         GIANT_REQUIRED;
 1115 
 1116         ASSERT_VOP_LOCKED(vp, "vinvalbuf");
 1117 
 1118         VI_LOCK(vp);
 1119         if (flags & V_SAVE) {
 1120                 while (vp->v_numoutput) {
 1121                         vp->v_iflag |= VI_BWAIT;
 1122                         error = msleep(&vp->v_numoutput, VI_MTX(vp),
 1123                             slpflag | (PRIBIO + 1), "vinvlbuf", slptimeo);
 1124                         if (error) {
 1125                                 VI_UNLOCK(vp);
 1126                                 return (error);
 1127                         }
 1128                 }
 1129                 if (!TAILQ_EMPTY(&vp->v_dirtyblkhd)) {
 1130                         VI_UNLOCK(vp);
 1131                         if ((error = VOP_FSYNC(vp, cred, MNT_WAIT, td)) != 0)
 1132                                 return (error);
 1133                         /*
 1134                          * XXX We could save a lock/unlock if this was only
 1135                          * enabled under INVARIANTS
 1136                          */
 1137                         VI_LOCK(vp);
 1138                         if (vp->v_numoutput > 0 ||
 1139                             !TAILQ_EMPTY(&vp->v_dirtyblkhd))
 1140                                 panic("vinvalbuf: dirty bufs");
 1141                 }
 1142         }
 1143         /*
 1144          * If you alter this loop please notice that interlock is dropped and
 1145          * reacquired in flushbuflist.  Special care is needed to ensure that
 1146          * no race conditions occur from this.
 1147          */
 1148         for (error = 0;;) {
 1149                 if ((blist = TAILQ_FIRST(&vp->v_cleanblkhd)) != 0 &&
 1150                     flushbuflist(blist, flags, vp, slpflag, slptimeo, &error)) {
 1151                         if (error)
 1152                                 break;
 1153                         continue;
 1154                 }
 1155                 if ((blist = TAILQ_FIRST(&vp->v_dirtyblkhd)) != 0 &&
 1156                     flushbuflist(blist, flags, vp, slpflag, slptimeo, &error)) {
 1157                         if (error)
 1158                                 break;
 1159                         continue;
 1160                 }
 1161                 break;
 1162         }
 1163         if (error) {
 1164                 VI_UNLOCK(vp);
 1165                 return (error);
 1166         }
 1167 
 1168         /*
 1169          * Wait for I/O to complete.  XXX needs cleaning up.  The vnode can
 1170          * have write I/O in-progress but if there is a VM object then the
 1171          * VM object can also have read-I/O in-progress.
 1172          */
 1173         do {
 1174                 while (vp->v_numoutput > 0) {
 1175                         vp->v_iflag |= VI_BWAIT;
 1176                         msleep(&vp->v_numoutput, VI_MTX(vp), PVM, "vnvlbv", 0);
 1177                 }
 1178                 VI_UNLOCK(vp);
 1179                 if (VOP_GETVOBJECT(vp, &object) == 0) {
 1180                         VM_OBJECT_LOCK(object);
 1181                         vm_object_pip_wait(object, "vnvlbx");
 1182                         VM_OBJECT_UNLOCK(object);
 1183                 }
 1184                 VI_LOCK(vp);
 1185         } while (vp->v_numoutput > 0);
 1186         VI_UNLOCK(vp);
 1187 
 1188         /*
 1189          * Destroy the copy in the VM cache, too.
 1190          */
 1191         if (VOP_GETVOBJECT(vp, &object) == 0) {
 1192                 VM_OBJECT_LOCK(object);
 1193                 vm_object_page_remove(object, 0, 0,
 1194                         (flags & V_SAVE) ? TRUE : FALSE);
 1195                 VM_OBJECT_UNLOCK(object);
 1196         }
 1197 
 1198 #ifdef INVARIANTS
 1199         VI_LOCK(vp);
 1200         if ((flags & (V_ALT | V_NORMAL)) == 0 &&
 1201             (!TAILQ_EMPTY(&vp->v_dirtyblkhd) ||
 1202              !TAILQ_EMPTY(&vp->v_cleanblkhd)))
 1203                 panic("vinvalbuf: flush failed");
 1204         VI_UNLOCK(vp);
 1205 #endif
 1206         return (0);
 1207 }
 1208 
 1209 /*
 1210  * Flush out buffers on the specified list.
 1211  *
 1212  */
 1213 static int
 1214 flushbuflist(blist, flags, vp, slpflag, slptimeo, errorp)
 1215         struct buf *blist;
 1216         int flags;
 1217         struct vnode *vp;
 1218         int slpflag, slptimeo;
 1219         int *errorp;
 1220 {
 1221         struct buf *bp, *nbp;
 1222         int found, error;
 1223 
 1224         ASSERT_VI_LOCKED(vp, "flushbuflist");
 1225 
 1226         for (found = 0, bp = blist; bp; bp = nbp) {
 1227                 nbp = TAILQ_NEXT(bp, b_vnbufs);
 1228                 if (((flags & V_NORMAL) && (bp->b_xflags & BX_ALTDATA)) ||
 1229                     ((flags & V_ALT) && (bp->b_xflags & BX_ALTDATA) == 0)) {
 1230                         continue;
 1231                 }
 1232                 found += 1;
 1233                 error = BUF_TIMELOCK(bp,
 1234                     LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, VI_MTX(vp),
 1235                     "flushbuf", slpflag, slptimeo);
 1236                 if (error) {
 1237                         if (error != ENOLCK)
 1238                                 *errorp = error;
 1239                         goto done;
 1240                 }
 1241                 /*
 1242                  * XXX Since there are no node locks for NFS, I
 1243                  * believe there is a slight chance that a delayed
 1244                  * write will occur while sleeping just above, so
 1245                  * check for it.  Note that vfs_bio_awrite expects
 1246                  * buffers to reside on a queue, while BUF_WRITE and
 1247                  * brelse do not.
 1248                  */
 1249                 if (((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI) &&
 1250                         (flags & V_SAVE)) {
 1251 
 1252                         if (bp->b_vp == vp) {
 1253                                 if (bp->b_flags & B_CLUSTEROK) {
 1254                                         vfs_bio_awrite(bp);
 1255                                 } else {
 1256                                         bremfree(bp);
 1257                                         bp->b_flags |= B_ASYNC;
 1258                                         BUF_WRITE(bp);
 1259                                 }
 1260                         } else {
 1261                                 bremfree(bp);
 1262                                 (void) BUF_WRITE(bp);
 1263                         }
 1264                         goto done;
 1265                 }
 1266                 bremfree(bp);
 1267                 bp->b_flags |= (B_INVAL | B_NOCACHE | B_RELBUF);
 1268                 bp->b_flags &= ~B_ASYNC;
 1269                 brelse(bp);
 1270                 VI_LOCK(vp);
 1271         }
 1272         return (found);
 1273 done:
 1274         VI_LOCK(vp);
 1275         return (found);
 1276 }
 1277 
 1278 /*
 1279  * Truncate a file's buffer and pages to a specified length.  This
 1280  * is in lieu of the old vinvalbuf mechanism, which performed unneeded
 1281  * sync activity.
 1282  */
 1283 int
 1284 vtruncbuf(vp, cred, td, length, blksize)
 1285         register struct vnode *vp;
 1286         struct ucred *cred;
 1287         struct thread *td;
 1288         off_t length;
 1289         int blksize;
 1290 {
 1291         register struct buf *bp;
 1292         struct buf *nbp;
 1293         int anyfreed;
 1294         int trunclbn;
 1295 
 1296         /*
 1297          * Round up to the *next* lbn.
 1298          */
 1299         trunclbn = (length + blksize - 1) / blksize;
 1300 
 1301         ASSERT_VOP_LOCKED(vp, "vtruncbuf");
 1302 restart:
 1303         VI_LOCK(vp);
 1304         anyfreed = 1;
 1305         for (;anyfreed;) {
 1306                 anyfreed = 0;
 1307                 for (bp = TAILQ_FIRST(&vp->v_cleanblkhd); bp; bp = nbp) {
 1308                         nbp = TAILQ_NEXT(bp, b_vnbufs);
 1309                         if (bp->b_lblkno >= trunclbn) {
 1310                                 if (BUF_LOCK(bp,
 1311                                     LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK,
 1312                                     VI_MTX(vp)) == ENOLCK)
 1313                                         goto restart;
 1314 
 1315                                 bremfree(bp);
 1316                                 bp->b_flags |= (B_INVAL | B_RELBUF);
 1317                                 bp->b_flags &= ~B_ASYNC;
 1318                                 brelse(bp);
 1319                                 anyfreed = 1;
 1320 
 1321                                 if (nbp &&
 1322                                     (((nbp->b_xflags & BX_VNCLEAN) == 0) ||
 1323                                     (nbp->b_vp != vp) ||
 1324                                     (nbp->b_flags & B_DELWRI))) {
 1325                                         goto restart;
 1326                                 }
 1327                                 VI_LOCK(vp);
 1328                         }
 1329                 }
 1330 
 1331                 for (bp = TAILQ_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) {
 1332                         nbp = TAILQ_NEXT(bp, b_vnbufs);
 1333                         if (bp->b_lblkno >= trunclbn) {
 1334                                 if (BUF_LOCK(bp,
 1335                                     LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK,
 1336                                     VI_MTX(vp)) == ENOLCK)
 1337                                         goto restart;
 1338                                 bremfree(bp);
 1339                                 bp->b_flags |= (B_INVAL | B_RELBUF);
 1340                                 bp->b_flags &= ~B_ASYNC;
 1341                                 brelse(bp);
 1342                                 anyfreed = 1;
 1343                                 if (nbp &&
 1344                                     (((nbp->b_xflags & BX_VNDIRTY) == 0) ||
 1345                                     (nbp->b_vp != vp) ||
 1346                                     (nbp->b_flags & B_DELWRI) == 0)) {
 1347                                         goto restart;
 1348                                 }
 1349                                 VI_LOCK(vp);
 1350                         }
 1351                 }
 1352         }
 1353 
 1354         if (length > 0) {
 1355 restartsync:
 1356                 for (bp = TAILQ_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) {
 1357                         nbp = TAILQ_NEXT(bp, b_vnbufs);
 1358                         if (bp->b_lblkno > 0)
 1359                                 continue;
 1360                         /*
 1361                          * Since we hold the vnode lock this should only
 1362                          * fail if we're racing with the buf daemon.
 1363                          */
 1364                         if (BUF_LOCK(bp,
 1365                             LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK,
 1366                             VI_MTX(vp)) == ENOLCK) {
 1367                                 goto restart;
 1368                         }
 1369                         KASSERT((bp->b_flags & B_DELWRI),
 1370                             ("buf(%p) on dirty queue without DELWRI.", bp));
 1371 
 1372                         bremfree(bp);
 1373                         bawrite(bp);
 1374                         VI_LOCK(vp);
 1375                         goto restartsync;
 1376                 }
 1377         }
 1378         
 1379         while (vp->v_numoutput > 0) {
 1380                 vp->v_iflag |= VI_BWAIT;
 1381                 msleep(&vp->v_numoutput, VI_MTX(vp), PVM, "vbtrunc", 0);
 1382         }
 1383         VI_UNLOCK(vp);
 1384         vnode_pager_setsize(vp, length);
 1385 
 1386         return (0);
 1387 }
 1388 
 1389 /*
 1390  * buf_splay() - splay tree core for the clean/dirty list of buffers in
 1391  *               a vnode.
 1392  *
 1393  *      NOTE: We have to deal with the special case of a background bitmap
 1394  *      buffer, a situation where two buffers will have the same logical
 1395  *      block offset.  We want (1) only the foreground buffer to be accessed
 1396  *      in a lookup and (2) must differentiate between the foreground and
 1397  *      background buffer in the splay tree algorithm because the splay
 1398  *      tree cannot normally handle multiple entities with the same 'index'.
 1399  *      We accomplish this by adding differentiating flags to the splay tree's
 1400  *      numerical domain.
 1401  */
 1402 static
 1403 struct buf *
 1404 buf_splay(daddr_t lblkno, b_xflags_t xflags, struct buf *root)
 1405 {
 1406         struct buf dummy;
 1407         struct buf *lefttreemax, *righttreemin, *y;
 1408 
 1409         if (root == NULL)
 1410                 return (NULL);
 1411         lefttreemax = righttreemin = &dummy;
 1412         for (;;) {
 1413                 if (lblkno < root->b_lblkno ||
 1414                     (lblkno == root->b_lblkno &&
 1415                     (xflags & BX_BKGRDMARKER) < (root->b_xflags & BX_BKGRDMARKER))) {
 1416                         if ((y = root->b_left) == NULL)
 1417                                 break;
 1418                         if (lblkno < y->b_lblkno) {
 1419                                 /* Rotate right. */
 1420                                 root->b_left = y->b_right;
 1421                                 y->b_right = root;
 1422                                 root = y;
 1423                                 if ((y = root->b_left) == NULL)
 1424                                         break;
 1425                         }
 1426                         /* Link into the new root's right tree. */
 1427                         righttreemin->b_left = root;
 1428                         righttreemin = root;
 1429                 } else if (lblkno > root->b_lblkno ||
 1430                     (lblkno == root->b_lblkno &&
 1431                     (xflags & BX_BKGRDMARKER) > (root->b_xflags & BX_BKGRDMARKER))) {
 1432                         if ((y = root->b_right) == NULL)
 1433                                 break;
 1434                         if (lblkno > y->b_lblkno) {
 1435                                 /* Rotate left. */
 1436                                 root->b_right = y->b_left;
 1437                                 y->b_left = root;
 1438                                 root = y;
 1439                                 if ((y = root->b_right) == NULL)
 1440                                         break;
 1441                         }
 1442                         /* Link into the new root's left tree. */
 1443                         lefttreemax->b_right = root;
 1444                         lefttreemax = root;
 1445                 } else {
 1446                         break;
 1447                 }
 1448                 root = y;
 1449         }
 1450         /* Assemble the new root. */
 1451         lefttreemax->b_right = root->b_left;
 1452         righttreemin->b_left = root->b_right;
 1453         root->b_left = dummy.b_right;
 1454         root->b_right = dummy.b_left;
 1455         return (root);
 1456 }
 1457 
 1458 static
 1459 void
 1460 buf_vlist_remove(struct buf *bp)
 1461 {
 1462         struct vnode *vp = bp->b_vp;
 1463         struct buf *root;
 1464 
 1465         ASSERT_VI_LOCKED(vp, "buf_vlist_remove");
 1466         if (bp->b_xflags & BX_VNDIRTY) {
 1467                 if (bp != vp->v_dirtyblkroot) {
 1468                         root = buf_splay(bp->b_lblkno, bp->b_xflags, vp->v_dirtyblkroot);
 1469                         KASSERT(root == bp, ("splay lookup failed during dirty remove"));
 1470                 }
 1471                 if (bp->b_left == NULL) {
 1472                         root = bp->b_right;
 1473                 } else {
 1474                         root = buf_splay(bp->b_lblkno, bp->b_xflags, bp->b_left);
 1475                         root->b_right = bp->b_right;
 1476                 }
 1477                 vp->v_dirtyblkroot = root;
 1478                 TAILQ_REMOVE(&vp->v_dirtyblkhd, bp, b_vnbufs);
 1479                 vp->v_dirtybufcnt--;
 1480         } else {
 1481                 /* KASSERT(bp->b_xflags & BX_VNCLEAN, ("bp wasn't clean")); */
 1482                 if (bp != vp->v_cleanblkroot) {
 1483                         root = buf_splay(bp->b_lblkno, bp->b_xflags, vp->v_cleanblkroot);
 1484                         KASSERT(root == bp, ("splay lookup failed during clean remove"));
 1485                 }
 1486                 if (bp->b_left == NULL) {
 1487                         root = bp->b_right;
 1488                 } else {
 1489                         root = buf_splay(bp->b_lblkno, bp->b_xflags, bp->b_left);
 1490                         root->b_right = bp->b_right;
 1491                 }
 1492                 vp->v_cleanblkroot = root;
 1493                 TAILQ_REMOVE(&vp->v_cleanblkhd, bp, b_vnbufs);
 1494                 vp->v_cleanbufcnt--;
 1495         }
 1496         bp->b_xflags &= ~(BX_VNDIRTY | BX_VNCLEAN);
 1497 }
 1498 
 1499 /*
 1500  * Add the buffer to the sorted clean or dirty block list using a
 1501  * splay tree algorithm.
 1502  *
 1503  * NOTE: xflags is passed as a constant, optimizing this inline function!
 1504  */
 1505 static 
 1506 void
 1507 buf_vlist_add(struct buf *bp, struct vnode *vp, b_xflags_t xflags)
 1508 {
 1509         struct buf *root;
 1510 
 1511         ASSERT_VI_LOCKED(vp, "buf_vlist_add");
 1512         bp->b_xflags |= xflags;
 1513         if (xflags & BX_VNDIRTY) {
 1514                 root = buf_splay(bp->b_lblkno, bp->b_xflags, vp->v_dirtyblkroot);
 1515                 if (root == NULL) {
 1516                         bp->b_left = NULL;
 1517                         bp->b_right = NULL;
 1518                         TAILQ_INSERT_TAIL(&vp->v_dirtyblkhd, bp, b_vnbufs);
 1519                 } else if (bp->b_lblkno < root->b_lblkno ||
 1520                     (bp->b_lblkno == root->b_lblkno &&
 1521                     (bp->b_xflags & BX_BKGRDMARKER) < (root->b_xflags & BX_BKGRDMARKER))) {
 1522                         bp->b_left = root->b_left;
 1523                         bp->b_right = root;
 1524                         root->b_left = NULL;
 1525                         TAILQ_INSERT_BEFORE(root, bp, b_vnbufs);
 1526                 } else {
 1527                         bp->b_right = root->b_right;
 1528                         bp->b_left = root;
 1529                         root->b_right = NULL;
 1530                         TAILQ_INSERT_AFTER(&vp->v_dirtyblkhd, 
 1531                             root, bp, b_vnbufs);
 1532                 }
 1533                 vp->v_dirtybufcnt++;
 1534                 vp->v_dirtyblkroot = bp;
 1535         } else {
 1536                 /* KASSERT(xflags & BX_VNCLEAN, ("xflags not clean")); */
 1537                 root = buf_splay(bp->b_lblkno, bp->b_xflags, vp->v_cleanblkroot);
 1538                 if (root == NULL) {
 1539                         bp->b_left = NULL;
 1540                         bp->b_right = NULL;
 1541                         TAILQ_INSERT_TAIL(&vp->v_cleanblkhd, bp, b_vnbufs);
 1542                 } else if (bp->b_lblkno < root->b_lblkno ||
 1543                     (bp->b_lblkno == root->b_lblkno &&
 1544                     (bp->b_xflags & BX_BKGRDMARKER) < (root->b_xflags & BX_BKGRDMARKER))) {
 1545                         bp->b_left = root->b_left;
 1546                         bp->b_right = root;
 1547                         root->b_left = NULL;
 1548                         TAILQ_INSERT_BEFORE(root, bp, b_vnbufs);
 1549                 } else {
 1550                         bp->b_right = root->b_right;
 1551                         bp->b_left = root;
 1552                         root->b_right = NULL;
 1553                         TAILQ_INSERT_AFTER(&vp->v_cleanblkhd, 
 1554                             root, bp, b_vnbufs);
 1555                 }
 1556                 vp->v_cleanbufcnt++;
 1557                 vp->v_cleanblkroot = bp;
 1558         }
 1559 }
 1560 
 1561 /*
 1562  * Lookup a buffer using the splay tree.  Note that we specifically avoid
 1563  * shadow buffers used in background bitmap writes.
 1564  *
 1565  * This code isn't quite efficient as it could be because we are maintaining
 1566  * two sorted lists and do not know which list the block resides in.
 1567  *
 1568  * During a "make buildworld" the desired buffer is found at one of
 1569  * the roots more than 60% of the time.  Thus, checking both roots
 1570  * before performing either splay eliminates unnecessary splays on the
 1571  * first tree splayed.
 1572  */
 1573 struct buf *
 1574 gbincore(struct vnode *vp, daddr_t lblkno)
 1575 {
 1576         struct buf *bp;
 1577 
 1578         GIANT_REQUIRED;
 1579 
 1580         ASSERT_VI_LOCKED(vp, "gbincore");
 1581         if ((bp = vp->v_cleanblkroot) != NULL &&
 1582             bp->b_lblkno == lblkno && !(bp->b_xflags & BX_BKGRDMARKER))
 1583                 return (bp);
 1584         if ((bp = vp->v_dirtyblkroot) != NULL &&
 1585             bp->b_lblkno == lblkno && !(bp->b_xflags & BX_BKGRDMARKER))
 1586                 return (bp);
 1587         if ((bp = vp->v_cleanblkroot) != NULL) {
 1588                 vp->v_cleanblkroot = bp = buf_splay(lblkno, 0, bp);
 1589                 if (bp->b_lblkno == lblkno && !(bp->b_xflags & BX_BKGRDMARKER))
 1590                         return (bp);
 1591         }
 1592         if ((bp = vp->v_dirtyblkroot) != NULL) {
 1593                 vp->v_dirtyblkroot = bp = buf_splay(lblkno, 0, bp);
 1594                 if (bp->b_lblkno == lblkno && !(bp->b_xflags & BX_BKGRDMARKER))
 1595                         return (bp);
 1596         }
 1597         return (NULL);
 1598 }
 1599 
 1600 /*
 1601  * Associate a buffer with a vnode.
 1602  */
 1603 void
 1604 bgetvp(vp, bp)
 1605         register struct vnode *vp;
 1606         register struct buf *bp;
 1607 {
 1608         KASSERT(bp->b_vp == NULL, ("bgetvp: not free"));
 1609 
 1610         KASSERT((bp->b_xflags & (BX_VNDIRTY|BX_VNCLEAN)) == 0,
 1611             ("bgetvp: bp already attached! %p", bp));
 1612 
 1613         ASSERT_VI_LOCKED(vp, "bgetvp");
 1614         vholdl(vp);
 1615         bp->b_vp = vp;
 1616         bp->b_dev = vn_todev(vp);
 1617         /*
 1618          * Insert onto list for new vnode.
 1619          */
 1620         buf_vlist_add(bp, vp, BX_VNCLEAN);
 1621 }
 1622 
 1623 /*
 1624  * Disassociate a buffer from a vnode.
 1625  */
 1626 void
 1627 brelvp(bp)
 1628         register struct buf *bp;
 1629 {
 1630         struct vnode *vp;
 1631 
 1632         KASSERT(bp->b_vp != NULL, ("brelvp: NULL"));
 1633 
 1634         /*
 1635          * Delete from old vnode list, if on one.
 1636          */
 1637         vp = bp->b_vp;
 1638         VI_LOCK(vp);
 1639         if (bp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN))
 1640                 buf_vlist_remove(bp);
 1641         if ((vp->v_iflag & VI_ONWORKLST) && TAILQ_EMPTY(&vp->v_dirtyblkhd)) {
 1642                 vp->v_iflag &= ~VI_ONWORKLST;
 1643                 mtx_lock(&sync_mtx);
 1644                 LIST_REMOVE(vp, v_synclist);
 1645                 mtx_unlock(&sync_mtx);
 1646         }
 1647         vdropl(vp);
 1648         bp->b_vp = (struct vnode *) 0;
 1649         if (bp->b_object)
 1650                 bp->b_object = NULL;
 1651         VI_UNLOCK(vp);
 1652 }
 1653 
 1654 /*
 1655  * Add an item to the syncer work queue.
 1656  */
 1657 static void
 1658 vn_syncer_add_to_worklist(struct vnode *vp, int delay)
 1659 {
 1660         int slot;
 1661 
 1662         ASSERT_VI_LOCKED(vp, "vn_syncer_add_to_worklist");
 1663 
 1664         mtx_lock(&sync_mtx);
 1665         if (vp->v_iflag & VI_ONWORKLST)
 1666                 LIST_REMOVE(vp, v_synclist);
 1667         else
 1668                 vp->v_iflag |= VI_ONWORKLST;
 1669 
 1670         if (delay > syncer_maxdelay - 2)
 1671                 delay = syncer_maxdelay - 2;
 1672         slot = (syncer_delayno + delay) & syncer_mask;
 1673 
 1674         LIST_INSERT_HEAD(&syncer_workitem_pending[slot], vp, v_synclist);
 1675         mtx_unlock(&sync_mtx);
 1676 }
 1677 
 1678 struct  proc *updateproc;
 1679 static void sched_sync(void);
 1680 static struct kproc_desc up_kp = {
 1681         "syncer",
 1682         sched_sync,
 1683         &updateproc
 1684 };
 1685 SYSINIT(syncer, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &up_kp)
 1686 
 1687 /*
 1688  * System filesystem synchronizer daemon.
 1689  */
 1690 static void
 1691 sched_sync(void)
 1692 {
 1693         struct synclist *next;
 1694         struct synclist *slp;
 1695         struct vnode *vp;
 1696         struct mount *mp;
 1697         long starttime;
 1698         struct thread *td = FIRST_THREAD_IN_PROC(updateproc);  /* XXXKSE */
 1699 
 1700         mtx_lock(&Giant);
 1701 
 1702         EVENTHANDLER_REGISTER(shutdown_pre_sync, kproc_shutdown, td->td_proc,
 1703             SHUTDOWN_PRI_LAST);
 1704 
 1705         for (;;) {
 1706                 kthread_suspend_check(td->td_proc);
 1707 
 1708                 starttime = time_second;
 1709 
 1710                 /*
 1711                  * Push files whose dirty time has expired.  Be careful
 1712                  * of interrupt race on slp queue.
 1713                  */
 1714                 mtx_lock(&sync_mtx);
 1715                 slp = &syncer_workitem_pending[syncer_delayno];
 1716                 syncer_delayno += 1;
 1717                 if (syncer_delayno == syncer_maxdelay)
 1718                         syncer_delayno = 0;
 1719                 next = &syncer_workitem_pending[syncer_delayno];
 1720 
 1721                 while ((vp = LIST_FIRST(slp)) != NULL) {
 1722                         if (VOP_ISLOCKED(vp, NULL) != 0 || 
 1723                             vn_start_write(vp, &mp, V_NOWAIT) != 0) {
 1724                                 LIST_REMOVE(vp, v_synclist);
 1725                                 LIST_INSERT_HEAD(next, vp, v_synclist);
 1726                                 continue;
 1727                         }
 1728                         if (VI_TRYLOCK(vp) == 0) {
 1729                                 LIST_REMOVE(vp, v_synclist);
 1730                                 LIST_INSERT_HEAD(next, vp, v_synclist);
 1731                                 vn_finished_write(mp);
 1732                                 continue;
 1733                         }
 1734                         /*
 1735                          * We use vhold in case the vnode does not
 1736                          * successfully sync.  vhold prevents the vnode from
 1737                          * going away when we unlock the sync_mtx so that
 1738                          * we can acquire the vnode interlock.
 1739                          */
 1740                         vholdl(vp);
 1741                         mtx_unlock(&sync_mtx);
 1742                         vn_lock(vp, LK_EXCLUSIVE | LK_RETRY | LK_INTERLOCK, td);
 1743                         (void) VOP_FSYNC(vp, td->td_ucred, MNT_LAZY, td);
 1744                         VOP_UNLOCK(vp, 0, td);
 1745                         vn_finished_write(mp);
 1746                         VI_LOCK(vp);
 1747                         if ((vp->v_iflag & VI_ONWORKLST) != 0) {
 1748                                 /*
 1749                                  * Put us back on the worklist.  The worklist
 1750                                  * routine will remove us from our current
 1751                                  * position and then add us back in at a later
 1752                                  * position.
 1753                                  */
 1754                                 vn_syncer_add_to_worklist(vp, syncdelay);
 1755                         }
 1756                         vdropl(vp);
 1757                         VI_UNLOCK(vp);
 1758                         mtx_lock(&sync_mtx);
 1759                 }
 1760                 mtx_unlock(&sync_mtx);
 1761 
 1762                 /*
 1763                  * Do soft update processing.
 1764                  */
 1765                 if (softdep_process_worklist_hook != NULL)
 1766                         (*softdep_process_worklist_hook)(NULL);
 1767 
 1768                 /*
 1769                  * The variable rushjob allows the kernel to speed up the
 1770                  * processing of the filesystem syncer process. A rushjob
 1771                  * value of N tells the filesystem syncer to process the next
 1772                  * N seconds worth of work on its queue ASAP. Currently rushjob
 1773                  * is used by the soft update code to speed up the filesystem
 1774                  * syncer process when the incore state is getting so far
 1775                  * ahead of the disk that the kernel memory pool is being
 1776                  * threatened with exhaustion.
 1777                  */
 1778                 mtx_lock(&sync_mtx);
 1779                 if (rushjob > 0) {
 1780                         rushjob -= 1;
 1781                         mtx_unlock(&sync_mtx);
 1782                         continue;
 1783                 }
 1784                 mtx_unlock(&sync_mtx);
 1785                 /*
 1786                  * If it has taken us less than a second to process the
 1787                  * current work, then wait. Otherwise start right over
 1788                  * again. We can still lose time if any single round
 1789                  * takes more than two seconds, but it does not really
 1790                  * matter as we are just trying to generally pace the
 1791                  * filesystem activity.
 1792                  */
 1793                 if (time_second == starttime)
 1794                         tsleep(&lbolt, PPAUSE, "syncer", 0);
 1795         }
 1796 }
 1797 
 1798 /*
 1799  * Request the syncer daemon to speed up its work.
 1800  * We never push it to speed up more than half of its
 1801  * normal turn time, otherwise it could take over the cpu.
 1802  * XXXKSE  only one update?
 1803  */
 1804 int
 1805 speedup_syncer()
 1806 {
 1807         struct thread *td;
 1808         int ret = 0;
 1809 
 1810         td = FIRST_THREAD_IN_PROC(updateproc);
 1811         mtx_lock_spin(&sched_lock);
 1812         if (td->td_wchan == &lbolt) {
 1813                 unsleep(td);
 1814                 TD_CLR_SLEEPING(td);
 1815                 setrunnable(td);
 1816         }
 1817         mtx_unlock_spin(&sched_lock);
 1818         mtx_lock(&sync_mtx);
 1819         if (rushjob < syncdelay / 2) {
 1820                 rushjob += 1;
 1821                 stat_rush_requests += 1;
 1822                 ret = 1;
 1823         }
 1824         mtx_unlock(&sync_mtx);
 1825         return (ret);
 1826 }
 1827 
 1828 /*
 1829  * Associate a p-buffer with a vnode.
 1830  *
 1831  * Also sets B_PAGING flag to indicate that vnode is not fully associated
 1832  * with the buffer.  i.e. the bp has not been linked into the vnode or
 1833  * ref-counted.
 1834  */
 1835 void
 1836 pbgetvp(vp, bp)
 1837         register struct vnode *vp;
 1838         register struct buf *bp;
 1839 {
 1840 
 1841         KASSERT(bp->b_vp == NULL, ("pbgetvp: not free"));
 1842 
 1843         bp->b_vp = vp;
 1844         bp->b_object = vp->v_object;
 1845         bp->b_flags |= B_PAGING;
 1846         bp->b_dev = vn_todev(vp);
 1847 }
 1848 
 1849 /*
 1850  * Disassociate a p-buffer from a vnode.
 1851  */
 1852 void
 1853 pbrelvp(bp)
 1854         register struct buf *bp;
 1855 {
 1856 
 1857         KASSERT(bp->b_vp != NULL, ("pbrelvp: NULL"));
 1858 
 1859         /* XXX REMOVE ME */
 1860         VI_LOCK(bp->b_vp);
 1861         if (TAILQ_NEXT(bp, b_vnbufs) != NULL) {
 1862                 panic(
 1863                     "relpbuf(): b_vp was probably reassignbuf()d %p %x",
 1864                     bp,
 1865                     (int)bp->b_flags
 1866                 );
 1867         }
 1868         VI_UNLOCK(bp->b_vp);
 1869         bp->b_vp = (struct vnode *) 0;
 1870         bp->b_object = NULL;
 1871         bp->b_flags &= ~B_PAGING;
 1872 }
 1873 
 1874 /*
 1875  * Reassign a buffer from one vnode to another.
 1876  * Used to assign file specific control information
 1877  * (indirect blocks) to the vnode to which they belong.
 1878  */
 1879 void
 1880 reassignbuf(bp, newvp)
 1881         register struct buf *bp;
 1882         register struct vnode *newvp;
 1883 {
 1884         struct vnode *vp;
 1885         int delay;
 1886 
 1887         if (newvp == NULL) {
 1888                 printf("reassignbuf: NULL");
 1889                 return;
 1890         }
 1891         vp = bp->b_vp;
 1892         ++reassignbufcalls;
 1893 
 1894         /*
 1895          * B_PAGING flagged buffers cannot be reassigned because their vp
 1896          * is not fully linked in.
 1897          */
 1898         if (bp->b_flags & B_PAGING)
 1899                 panic("cannot reassign paging buffer");
 1900 
 1901         /*
 1902          * Delete from old vnode list, if on one.
 1903          */
 1904         VI_LOCK(vp);
 1905         if (bp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN)) {
 1906                 buf_vlist_remove(bp);
 1907                 if (vp != newvp) {
 1908                         vdropl(bp->b_vp);
 1909                         bp->b_vp = NULL;        /* for clarification */
 1910                 }
 1911         }
 1912         if (vp != newvp) {
 1913                 VI_UNLOCK(vp);
 1914                 VI_LOCK(newvp);
 1915         }
 1916         /*
 1917          * If dirty, put on list of dirty buffers; otherwise insert onto list
 1918          * of clean buffers.
 1919          */
 1920         if (bp->b_flags & B_DELWRI) {
 1921                 if ((newvp->v_iflag & VI_ONWORKLST) == 0) {
 1922                         switch (newvp->v_type) {
 1923                         case VDIR:
 1924                                 delay = dirdelay;
 1925                                 break;
 1926                         case VCHR:
 1927                                 if (newvp->v_rdev->si_mountpoint != NULL) {
 1928                                         delay = metadelay;
 1929                                         break;
 1930                                 }
 1931                                 /* FALLTHROUGH */
 1932                         default:
 1933                                 delay = filedelay;
 1934                         }
 1935                         vn_syncer_add_to_worklist(newvp, delay);
 1936                 }
 1937                 buf_vlist_add(bp, newvp, BX_VNDIRTY);
 1938         } else {
 1939                 buf_vlist_add(bp, newvp, BX_VNCLEAN);
 1940 
 1941                 if ((newvp->v_iflag & VI_ONWORKLST) &&
 1942                     TAILQ_EMPTY(&newvp->v_dirtyblkhd)) {
 1943                         mtx_lock(&sync_mtx);
 1944                         LIST_REMOVE(newvp, v_synclist);
 1945                         mtx_unlock(&sync_mtx);
 1946                         newvp->v_iflag &= ~VI_ONWORKLST;
 1947                 }
 1948         }
 1949         if (bp->b_vp != newvp) {
 1950                 bp->b_vp = newvp;
 1951                 vholdl(bp->b_vp);
 1952         }
 1953         VI_UNLOCK(newvp);
 1954 }
 1955 
 1956 /*
 1957  * Create a vnode for a device.
 1958  * Used for mounting the root filesystem.
 1959  */
 1960 int
 1961 bdevvp(dev, vpp)
 1962         dev_t dev;
 1963         struct vnode **vpp;
 1964 {
 1965         register struct vnode *vp;
 1966         struct vnode *nvp;
 1967         int error;
 1968 
 1969         if (dev == NODEV) {
 1970                 *vpp = NULLVP;
 1971                 return (ENXIO);
 1972         }
 1973         if (vfinddev(dev, VCHR, vpp))
 1974                 return (0);
 1975         error = getnewvnode("none", (struct mount *)0, spec_vnodeop_p, &nvp);
 1976         if (error) {
 1977                 *vpp = NULLVP;
 1978                 return (error);
 1979         }
 1980         vp = nvp;
 1981         vp->v_type = VCHR;
 1982         addalias(vp, dev);
 1983         *vpp = vp;
 1984         return (0);
 1985 }
 1986 
 1987 static void
 1988 v_incr_usecount(struct vnode *vp, int delta)
 1989 {
 1990         vp->v_usecount += delta;
 1991         if (vp->v_type == VCHR && vp->v_rdev != NULL) {
 1992                 mtx_lock(&spechash_mtx);
 1993                 vp->v_rdev->si_usecount += delta;
 1994                 mtx_unlock(&spechash_mtx);
 1995         }
 1996 }
 1997 
 1998 /*
 1999  * Add vnode to the alias list hung off the dev_t.
 2000  *
 2001  * The reason for this gunk is that multiple vnodes can reference
 2002  * the same physical device, so checking vp->v_usecount to see
 2003  * how many users there are is inadequate; the v_usecount for
 2004  * the vnodes need to be accumulated.  vcount() does that.
 2005  */
 2006 struct vnode *
 2007 addaliasu(nvp, nvp_rdev)
 2008         struct vnode *nvp;
 2009         udev_t nvp_rdev;
 2010 {
 2011         struct vnode *ovp;
 2012         vop_t **ops;
 2013         dev_t dev;
 2014 
 2015         if (nvp->v_type == VBLK)
 2016                 return (nvp);
 2017         if (nvp->v_type != VCHR)
 2018                 panic("addaliasu on non-special vnode");
 2019         dev = udev2dev(nvp_rdev, 0);
 2020         /*
 2021          * Check to see if we have a bdevvp vnode with no associated
 2022          * filesystem. If so, we want to associate the filesystem of
 2023          * the new newly instigated vnode with the bdevvp vnode and
 2024          * discard the newly created vnode rather than leaving the
 2025          * bdevvp vnode lying around with no associated filesystem.
 2026          */
 2027         if (vfinddev(dev, nvp->v_type, &ovp) == 0 || ovp->v_data != NULL) {
 2028                 addalias(nvp, dev);
 2029                 return (nvp);
 2030         }
 2031         /*
 2032          * Discard unneeded vnode, but save its node specific data.
 2033          * Note that if there is a lock, it is carried over in the
 2034          * node specific data to the replacement vnode.
 2035          */
 2036         vref(ovp);
 2037         ovp->v_data = nvp->v_data;
 2038         ovp->v_tag = nvp->v_tag;
 2039         nvp->v_data = NULL;
 2040         lockdestroy(ovp->v_vnlock);
 2041         lockinit(ovp->v_vnlock, PVFS, nvp->v_vnlock->lk_wmesg,
 2042             nvp->v_vnlock->lk_timo, nvp->v_vnlock->lk_flags & LK_EXTFLG_MASK);
 2043         ops = ovp->v_op;
 2044         ovp->v_op = nvp->v_op;
 2045         if (VOP_ISLOCKED(nvp, curthread)) {
 2046                 VOP_UNLOCK(nvp, 0, curthread);
 2047                 vn_lock(ovp, LK_EXCLUSIVE | LK_RETRY, curthread);
 2048         }
 2049         nvp->v_op = ops;
 2050         insmntque(ovp, nvp->v_mount);
 2051         vrele(nvp);
 2052         vgone(nvp);
 2053         return (ovp);
 2054 }
 2055 
 2056 /* This is a local helper function that do the same as addaliasu, but for a
 2057  * dev_t instead of an udev_t. */
 2058 static void
 2059 addalias(nvp, dev)
 2060         struct vnode *nvp;
 2061         dev_t dev;
 2062 {
 2063 
 2064         KASSERT(nvp->v_type == VCHR, ("addalias on non-special vnode"));
 2065         nvp->v_rdev = dev;
 2066         VI_LOCK(nvp);
 2067         mtx_lock(&spechash_mtx);
 2068         SLIST_INSERT_HEAD(&dev->si_hlist, nvp, v_specnext);
 2069         dev->si_usecount += nvp->v_usecount;
 2070         mtx_unlock(&spechash_mtx);
 2071         VI_UNLOCK(nvp);
 2072 }
 2073 
 2074 /*
 2075  * Grab a particular vnode from the free list, increment its
 2076  * reference count and lock it. The vnode lock bit is set if the
 2077  * vnode is being eliminated in vgone. The process is awakened
 2078  * when the transition is completed, and an error returned to
 2079  * indicate that the vnode is no longer usable (possibly having
 2080  * been changed to a new filesystem type).
 2081  */
 2082 int
 2083 vget(vp, flags, td)
 2084         register struct vnode *vp;
 2085         int flags;
 2086         struct thread *td;
 2087 {
 2088         int error;
 2089 
 2090         /*
 2091          * If the vnode is in the process of being cleaned out for
 2092          * another use, we wait for the cleaning to finish and then
 2093          * return failure. Cleaning is determined by checking that
 2094          * the VI_XLOCK flag is set.
 2095          */
 2096         if ((flags & LK_INTERLOCK) == 0)
 2097                 VI_LOCK(vp);
 2098         if (vp->v_iflag & VI_XLOCK && vp->v_vxproc != curthread) {
 2099                 if ((flags & LK_NOWAIT) == 0) {
 2100                         vp->v_iflag |= VI_XWANT;
 2101                         msleep(vp, VI_MTX(vp), PINOD | PDROP, "vget", 0);
 2102                         return (ENOENT);
 2103                 }
 2104                 VI_UNLOCK(vp);
 2105                 return (EBUSY);
 2106         }
 2107 
 2108         v_incr_usecount(vp, 1);
 2109 
 2110         if (VSHOULDBUSY(vp))
 2111                 vbusy(vp);
 2112         if (flags & LK_TYPE_MASK) {
 2113                 if ((error = vn_lock(vp, flags | LK_INTERLOCK, td)) != 0) {
 2114                         /*
 2115                          * must expand vrele here because we do not want
 2116                          * to call VOP_INACTIVE if the reference count
 2117                          * drops back to zero since it was never really
 2118                          * active. We must remove it from the free list
 2119                          * before sleeping so that multiple processes do
 2120                          * not try to recycle it.
 2121                          */
 2122                         VI_LOCK(vp);
 2123                         v_incr_usecount(vp, -1);
 2124                         if (VSHOULDFREE(vp))
 2125                                 vfree(vp);
 2126                         else
 2127                                 vlruvp(vp);
 2128                         VI_UNLOCK(vp);
 2129                 }
 2130                 return (error);
 2131         }
 2132         VI_UNLOCK(vp);
 2133         return (0);
 2134 }
 2135 
 2136 /*
 2137  * Increase the reference count of a vnode.
 2138  */
 2139 void
 2140 vref(struct vnode *vp)
 2141 {
 2142         VI_LOCK(vp);
 2143         v_incr_usecount(vp, 1);
 2144         VI_UNLOCK(vp);
 2145 }
 2146 
 2147 /*
 2148  * Return reference count of a vnode.
 2149  *
 2150  * The results of this call are only guaranteed when some mechanism other
 2151  * than the VI lock is used to stop other processes from gaining references
 2152  * to the vnode.  This may be the case if the caller holds the only reference.
 2153  * This is also useful when stale data is acceptable as race conditions may
 2154  * be accounted for by some other means.
 2155  */
 2156 int
 2157 vrefcnt(struct vnode *vp)
 2158 {
 2159         int usecnt;
 2160 
 2161         VI_LOCK(vp);
 2162         usecnt = vp->v_usecount;
 2163         VI_UNLOCK(vp);
 2164 
 2165         return (usecnt);
 2166 }
 2167 
 2168 
 2169 /*
 2170  * Vnode put/release.
 2171  * If count drops to zero, call inactive routine and return to freelist.
 2172  */
 2173 void
 2174 vrele(vp)
 2175         struct vnode *vp;
 2176 {
 2177         struct thread *td = curthread;  /* XXX */
 2178 
 2179         KASSERT(vp != NULL, ("vrele: null vp"));
 2180 
 2181         VI_LOCK(vp);
 2182 
 2183         /* Skip this v_writecount check if we're going to panic below. */
 2184         KASSERT(vp->v_writecount < vp->v_usecount || vp->v_usecount < 1,
 2185             ("vrele: missed vn_close"));
 2186 
 2187         if (vp->v_usecount > 1 || ((vp->v_iflag & VI_DOINGINACT) &&
 2188             vp->v_usecount == 1)) {
 2189                 v_incr_usecount(vp, -1);
 2190                 VI_UNLOCK(vp);
 2191 
 2192                 return;
 2193         }
 2194 
 2195         if (vp->v_usecount == 1) {
 2196                 v_incr_usecount(vp, -1);
 2197                 /*
 2198                  * We must call VOP_INACTIVE with the node locked. Mark
 2199                  * as VI_DOINGINACT to avoid recursion.
 2200                  */
 2201                 if (vn_lock(vp, LK_EXCLUSIVE | LK_INTERLOCK, td) == 0) {
 2202                         VI_LOCK(vp);
 2203                         vp->v_iflag |= VI_DOINGINACT;
 2204                         VI_UNLOCK(vp);
 2205                         VOP_INACTIVE(vp, td);
 2206                         VI_LOCK(vp);
 2207                         KASSERT(vp->v_iflag & VI_DOINGINACT,
 2208                             ("vrele: lost VI_DOINGINACT"));
 2209                         vp->v_iflag &= ~VI_DOINGINACT;
 2210                 } else
 2211                         VI_LOCK(vp);
 2212                 if (VSHOULDFREE(vp))
 2213                         vfree(vp);
 2214                 else
 2215                         vlruvp(vp);
 2216                 VI_UNLOCK(vp);
 2217 
 2218         } else {
 2219 #ifdef DIAGNOSTIC
 2220                 vprint("vrele: negative ref count", vp);
 2221 #endif
 2222                 VI_UNLOCK(vp);
 2223                 panic("vrele: negative ref cnt");
 2224         }
 2225 }
 2226 
 2227 /*
 2228  * Release an already locked vnode.  This give the same effects as
 2229  * unlock+vrele(), but takes less time and avoids releasing and
 2230  * re-aquiring the lock (as vrele() aquires the lock internally.)
 2231  */
 2232 void
 2233 vput(vp)
 2234         struct vnode *vp;
 2235 {
 2236         struct thread *td = curthread;  /* XXX */
 2237 
 2238         GIANT_REQUIRED;
 2239 
 2240         KASSERT(vp != NULL, ("vput: null vp"));
 2241         VI_LOCK(vp);
 2242         /* Skip this v_writecount check if we're going to panic below. */
 2243         KASSERT(vp->v_writecount < vp->v_usecount || vp->v_usecount < 1,
 2244             ("vput: missed vn_close"));
 2245 
 2246         if (vp->v_usecount > 1 || ((vp->v_iflag & VI_DOINGINACT) &&
 2247             vp->v_usecount == 1)) {
 2248                 v_incr_usecount(vp, -1);
 2249                 VOP_UNLOCK(vp, LK_INTERLOCK, td);
 2250                 return;
 2251         }
 2252 
 2253         if (vp->v_usecount == 1) {
 2254                 v_incr_usecount(vp, -1);
 2255                 /*
 2256                  * We must call VOP_INACTIVE with the node locked, so
 2257                  * we just need to release the vnode mutex. Mark as
 2258                  * as VI_DOINGINACT to avoid recursion.
 2259                  */
 2260                 vp->v_iflag |= VI_DOINGINACT;
 2261                 VI_UNLOCK(vp);
 2262                 VOP_INACTIVE(vp, td);
 2263                 VI_LOCK(vp);
 2264                 KASSERT(vp->v_iflag & VI_DOINGINACT,
 2265                     ("vput: lost VI_DOINGINACT"));
 2266                 vp->v_iflag &= ~VI_DOINGINACT;
 2267                 if (VSHOULDFREE(vp))
 2268                         vfree(vp);
 2269                 else
 2270                         vlruvp(vp);
 2271                 VI_UNLOCK(vp);
 2272 
 2273         } else {
 2274 #ifdef DIAGNOSTIC
 2275                 vprint("vput: negative ref count", vp);
 2276 #endif
 2277                 panic("vput: negative ref cnt");
 2278         }
 2279 }
 2280 
 2281 /*
 2282  * Somebody doesn't want the vnode recycled.
 2283  */
 2284 void
 2285 vhold(struct vnode *vp)
 2286 {
 2287         VI_LOCK(vp);
 2288         vholdl(vp);
 2289         VI_UNLOCK(vp);
 2290 }
 2291 
 2292 void
 2293 vholdl(vp)
 2294         register struct vnode *vp;
 2295 {
 2296         vp->v_holdcnt++;
 2297         if (VSHOULDBUSY(vp))
 2298                 vbusy(vp);
 2299 }
 2300 
 2301 /*
 2302  * Note that there is one less who cares about this vnode.  vdrop() is the
 2303  * opposite of vhold().
 2304  */
 2305 void
 2306 vdrop(struct vnode *vp)
 2307 {
 2308         VI_LOCK(vp);
 2309         vdropl(vp);
 2310         VI_UNLOCK(vp);
 2311 }
 2312         
 2313 void
 2314 vdropl(vp)
 2315         register struct vnode *vp;
 2316 {
 2317         if (vp->v_holdcnt <= 0)
 2318                 panic("vdrop: holdcnt");
 2319         vp->v_holdcnt--;
 2320         if (VSHOULDFREE(vp))
 2321                 vfree(vp);
 2322         else
 2323                 vlruvp(vp);
 2324 }
 2325 
 2326 /*
 2327  * Remove any vnodes in the vnode table belonging to mount point mp.
 2328  *
 2329  * If FORCECLOSE is not specified, there should not be any active ones,
 2330  * return error if any are found (nb: this is a user error, not a
 2331  * system error). If FORCECLOSE is specified, detach any active vnodes
 2332  * that are found.
 2333  *
 2334  * If WRITECLOSE is set, only flush out regular file vnodes open for
 2335  * writing.
 2336  *
 2337  * SKIPSYSTEM causes any vnodes marked VV_SYSTEM to be skipped.
 2338  *
 2339  * `rootrefs' specifies the base reference count for the root vnode
 2340  * of this filesystem. The root vnode is considered busy if its
 2341  * v_usecount exceeds this value. On a successful return, vflush()
 2342  * will call vrele() on the root vnode exactly rootrefs times.
 2343  * If the SKIPSYSTEM or WRITECLOSE flags are specified, rootrefs must
 2344  * be zero.
 2345  */
 2346 #ifdef DIAGNOSTIC
 2347 static int busyprt = 0;         /* print out busy vnodes */
 2348 SYSCTL_INT(_debug, OID_AUTO, busyprt, CTLFLAG_RW, &busyprt, 0, "");
 2349 #endif
 2350 
 2351 int
 2352 vflush(mp, rootrefs, flags)
 2353         struct mount *mp;
 2354         int rootrefs;
 2355         int flags;
 2356 {
 2357         struct thread *td = curthread;  /* XXX */
 2358         struct vnode *vp, *nvp, *rootvp = NULL;
 2359         struct vattr vattr;
 2360         int busy = 0, error;
 2361 
 2362         if (rootrefs > 0) {
 2363                 KASSERT((flags & (SKIPSYSTEM | WRITECLOSE)) == 0,
 2364                     ("vflush: bad args"));
 2365                 /*
 2366                  * Get the filesystem root vnode. We can vput() it
 2367                  * immediately, since with rootrefs > 0, it won't go away.
 2368                  */
 2369                 if ((error = VFS_ROOT(mp, &rootvp)) != 0)
 2370                         return (error);
 2371                 vput(rootvp);
 2372 
 2373         }
 2374         MNT_ILOCK(mp);
 2375 loop:
 2376         for (vp = TAILQ_FIRST(&mp->mnt_nvnodelist); vp; vp = nvp) {
 2377                 /*
 2378                  * Make sure this vnode wasn't reclaimed in getnewvnode().
 2379                  * Start over if it has (it won't be on the list anymore).
 2380                  */
 2381                 if (vp->v_mount != mp)
 2382                         goto loop;
 2383                 nvp = TAILQ_NEXT(vp, v_nmntvnodes);
 2384 
 2385                 VI_LOCK(vp);
 2386                 MNT_IUNLOCK(mp);
 2387                 error = vn_lock(vp, LK_INTERLOCK | LK_EXCLUSIVE, td);
 2388                 if (error) {
 2389                         MNT_ILOCK(mp);
 2390                         goto loop;
 2391                 }
 2392                 /*
 2393                  * Skip over a vnodes marked VV_SYSTEM.
 2394                  */
 2395                 if ((flags & SKIPSYSTEM) && (vp->v_vflag & VV_SYSTEM)) {
 2396                         VOP_UNLOCK(vp, 0, td);
 2397                         MNT_ILOCK(mp);
 2398                         continue;
 2399                 }
 2400                 /*
 2401                  * If WRITECLOSE is set, flush out unlinked but still open
 2402                  * files (even if open only for reading) and regular file
 2403                  * vnodes open for writing.
 2404                  */
 2405                 if (flags & WRITECLOSE) {
 2406                         error = VOP_GETATTR(vp, &vattr, td->td_ucred, td);
 2407                         VI_LOCK(vp);
 2408 
 2409                         if ((vp->v_type == VNON ||
 2410                             (error == 0 && vattr.va_nlink > 0)) &&
 2411                             (vp->v_writecount == 0 || vp->v_type != VREG)) {
 2412                                 VOP_UNLOCK(vp, LK_INTERLOCK, td);
 2413                                 MNT_ILOCK(mp);
 2414                                 continue;
 2415                         }
 2416                 } else
 2417                         VI_LOCK(vp);
 2418 
 2419                 VOP_UNLOCK(vp, 0, td);
 2420 
 2421                 /*
 2422                  * With v_usecount == 0, all we need to do is clear out the
 2423                  * vnode data structures and we are done.
 2424                  */
 2425                 if (vp->v_usecount == 0) {
 2426                         vgonel(vp, td);
 2427                         MNT_ILOCK(mp);
 2428                         continue;
 2429                 }
 2430 
 2431                 /*
 2432                  * If FORCECLOSE is set, forcibly close the vnode. For block
 2433                  * or character devices, revert to an anonymous device. For
 2434                  * all other files, just kill them.
 2435                  */
 2436                 if (flags & FORCECLOSE) {
 2437                         if (vp->v_type != VCHR)
 2438                                 vgonel(vp, td);
 2439                         else
 2440                                 vgonechrl(vp, td);
 2441                         MNT_ILOCK(mp);
 2442                         continue;
 2443                 }
 2444 #ifdef DIAGNOSTIC
 2445                 if (busyprt)
 2446                         vprint("vflush: busy vnode", vp);
 2447 #endif
 2448                 VI_UNLOCK(vp);
 2449                 MNT_ILOCK(mp);
 2450                 busy++;
 2451         }
 2452         MNT_IUNLOCK(mp);
 2453         if (rootrefs > 0 && (flags & FORCECLOSE) == 0) {
 2454                 /*
 2455                  * If just the root vnode is busy, and if its refcount
 2456                  * is equal to `rootrefs', then go ahead and kill it.
 2457                  */
 2458                 VI_LOCK(rootvp);
 2459                 KASSERT(busy > 0, ("vflush: not busy"));
 2460                 KASSERT(rootvp->v_usecount >= rootrefs, ("vflush: rootrefs"));
 2461                 if (busy == 1 && rootvp->v_usecount == rootrefs) {
 2462                         vgonel(rootvp, td);
 2463                         busy = 0;
 2464                 } else
 2465                         VI_UNLOCK(rootvp);
 2466         }
 2467         if (busy)
 2468                 return (EBUSY);
 2469         for (; rootrefs > 0; rootrefs--)
 2470                 vrele(rootvp);
 2471         return (0);
 2472 }
 2473 
 2474 /*
 2475  * This moves a now (likely recyclable) vnode to the end of the
 2476  * mountlist.  XXX However, it is temporarily disabled until we
 2477  * can clean up ffs_sync() and friends, which have loop restart
 2478  * conditions which this code causes to operate O(N^2).
 2479  */
 2480 static void
 2481 vlruvp(struct vnode *vp)
 2482 {
 2483 #if 0
 2484         struct mount *mp;
 2485 
 2486         if ((mp = vp->v_mount) != NULL) {
 2487                 MNT_ILOCK(mp);
 2488                 TAILQ_REMOVE(&mp->mnt_nvnodelist, vp, v_nmntvnodes);
 2489                 TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes);
 2490                 MNT_IUNLOCK(mp);
 2491         }
 2492 #endif
 2493 }
 2494 
 2495 static void
 2496 vx_lock(struct vnode *vp)
 2497 {
 2498         ASSERT_VI_LOCKED(vp, "vx_lock");
 2499 
 2500         /*
 2501          * Prevent the vnode from being recycled or brought into use while we
 2502          * clean it out.
 2503          */
 2504         if (vp->v_iflag & VI_XLOCK)
 2505                 panic("vclean: deadlock");
 2506         vp->v_iflag |= VI_XLOCK;
 2507         vp->v_vxproc = curthread;
 2508 }
 2509 
 2510 static void
 2511 vx_unlock(struct vnode *vp)
 2512 {
 2513         ASSERT_VI_LOCKED(vp, "vx_unlock");
 2514         vp->v_iflag &= ~VI_XLOCK;
 2515         vp->v_vxproc = NULL;
 2516         if (vp->v_iflag & VI_XWANT) {
 2517                 vp->v_iflag &= ~VI_XWANT;
 2518                 wakeup(vp);
 2519         }
 2520 }
 2521 
 2522 
 2523 /*
 2524  * Disassociate the underlying filesystem from a vnode.
 2525  */
 2526 static void
 2527 vclean(vp, flags, td)
 2528         struct vnode *vp;
 2529         int flags;
 2530         struct thread *td;
 2531 {
 2532         int active;
 2533 
 2534         ASSERT_VI_LOCKED(vp, "vclean");
 2535         /*
 2536          * Check to see if the vnode is in use. If so we have to reference it
 2537          * before we clean it out so that its count cannot fall to zero and
 2538          * generate a race against ourselves to recycle it.
 2539          */
 2540         if ((active = vp->v_usecount))
 2541                 v_incr_usecount(vp, 1);
 2542 
 2543         /*
 2544          * Even if the count is zero, the VOP_INACTIVE routine may still
 2545          * have the object locked while it cleans it out. The VOP_LOCK
 2546          * ensures that the VOP_INACTIVE routine is done with its work.
 2547          * For active vnodes, it ensures that no other activity can
 2548          * occur while the underlying object is being cleaned out.
 2549          */
 2550         VOP_LOCK(vp, LK_DRAIN | LK_INTERLOCK, td);
 2551 
 2552         /*
 2553          * Clean out any buffers associated with the vnode.
 2554          * If the flush fails, just toss the buffers.
 2555          */
 2556         if (flags & DOCLOSE) {
 2557                 struct buf *bp;
 2558                 bp = TAILQ_FIRST(&vp->v_dirtyblkhd);
 2559                 if (bp != NULL)
 2560                         (void) vn_write_suspend_wait(vp, NULL, V_WAIT);
 2561                 if (vinvalbuf(vp, V_SAVE, NOCRED, td, 0, 0) != 0)
 2562                         vinvalbuf(vp, 0, NOCRED, td, 0, 0);
 2563         }
 2564 
 2565         VOP_DESTROYVOBJECT(vp);
 2566 
 2567         /*
 2568          * Any other processes trying to obtain this lock must first
 2569          * wait for VXLOCK to clear, then call the new lock operation.
 2570          */
 2571         VOP_UNLOCK(vp, 0, td);
 2572 
 2573         /*
 2574          * If purging an active vnode, it must be closed and
 2575          * deactivated before being reclaimed. Note that the
 2576          * VOP_INACTIVE will unlock the vnode.
 2577          */
 2578         if (active) {
 2579                 if (flags & DOCLOSE)
 2580                         VOP_CLOSE(vp, FNONBLOCK, NOCRED, td);
 2581                 VI_LOCK(vp);
 2582                 if ((vp->v_iflag & VI_DOINGINACT) == 0) {
 2583                         vp->v_iflag |= VI_DOINGINACT;
 2584                         VI_UNLOCK(vp);
 2585                         if (vn_lock(vp, LK_EXCLUSIVE | LK_NOWAIT, td) != 0)
 2586                                 panic("vclean: cannot relock.");
 2587                         VOP_INACTIVE(vp, td);
 2588                         VI_LOCK(vp);
 2589                         KASSERT(vp->v_iflag & VI_DOINGINACT,
 2590                             ("vclean: lost VI_DOINGINACT"));
 2591                         vp->v_iflag &= ~VI_DOINGINACT;
 2592                 }
 2593                 VI_UNLOCK(vp);
 2594         }
 2595         /*
 2596          * Reclaim the vnode.
 2597          */
 2598         if (VOP_RECLAIM(vp, td))
 2599                 panic("vclean: cannot reclaim");
 2600 
 2601         if (active) {
 2602                 /*
 2603                  * Inline copy of vrele() since VOP_INACTIVE
 2604                  * has already been called.
 2605                  */
 2606                 VI_LOCK(vp);
 2607                 v_incr_usecount(vp, -1);
 2608                 if (vp->v_usecount <= 0) {
 2609 #ifdef INVARIANTS
 2610                         if (vp->v_usecount < 0 || vp->v_writecount != 0) {
 2611                                 vprint("vclean: bad ref count", vp);
 2612                                 panic("vclean: ref cnt");
 2613                         }
 2614 #endif
 2615                         if (VSHOULDFREE(vp))
 2616                                 vfree(vp);
 2617                 }
 2618                 VI_UNLOCK(vp);
 2619         }
 2620         /*
 2621          * Delete from old mount point vnode list.
 2622          */
 2623         if (vp->v_mount != NULL)
 2624                 insmntque(vp, (struct mount *)0);
 2625         cache_purge(vp);
 2626         VI_LOCK(vp);
 2627         if (VSHOULDFREE(vp))
 2628                 vfree(vp);
 2629 
 2630         /*
 2631          * Done with purge, reset to the standard lock and
 2632          * notify sleepers of the grim news.
 2633          */
 2634         vp->v_vnlock = &vp->v_lock;
 2635         vp->v_op = dead_vnodeop_p;
 2636         if (vp->v_pollinfo != NULL)
 2637                 vn_pollgone(vp);
 2638         vp->v_tag = "none";
 2639 }
 2640 
 2641 /*
 2642  * Eliminate all activity associated with the requested vnode
 2643  * and with all vnodes aliased to the requested vnode.
 2644  */
 2645 int
 2646 vop_revoke(ap)
 2647         struct vop_revoke_args /* {
 2648                 struct vnode *a_vp;
 2649                 int a_flags;
 2650         } */ *ap;
 2651 {
 2652         struct vnode *vp, *vq;
 2653         dev_t dev;
 2654 
 2655         KASSERT((ap->a_flags & REVOKEALL) != 0, ("vop_revoke"));
 2656         vp = ap->a_vp;
 2657         KASSERT((vp->v_type == VCHR), ("vop_revoke: not VCHR"));
 2658 
 2659         VI_LOCK(vp);
 2660         /*
 2661          * If a vgone (or vclean) is already in progress,
 2662          * wait until it is done and return.
 2663          */
 2664         if (vp->v_iflag & VI_XLOCK) {
 2665                 vp->v_iflag |= VI_XWANT;
 2666                 msleep(vp, VI_MTX(vp), PINOD | PDROP,
 2667                     "vop_revokeall", 0);
 2668                 return (0);
 2669         }
 2670         VI_UNLOCK(vp);
 2671         dev = vp->v_rdev;
 2672         for (;;) {
 2673                 mtx_lock(&spechash_mtx);
 2674                 vq = SLIST_FIRST(&dev->si_hlist);
 2675                 mtx_unlock(&spechash_mtx);
 2676                 if (!vq)
 2677                         break;
 2678                 vgone(vq);
 2679         }
 2680         return (0);
 2681 }
 2682 
 2683 /*
 2684  * Recycle an unused vnode to the front of the free list.
 2685  * Release the passed interlock if the vnode will be recycled.
 2686  */
 2687 int
 2688 vrecycle(vp, inter_lkp, td)
 2689         struct vnode *vp;
 2690         struct mtx *inter_lkp;
 2691         struct thread *td;
 2692 {
 2693 
 2694         VI_LOCK(vp);
 2695         if (vp->v_usecount == 0) {
 2696                 if (inter_lkp) {
 2697                         mtx_unlock(inter_lkp);
 2698                 }
 2699                 vgonel(vp, td);
 2700                 return (1);
 2701         }
 2702         VI_UNLOCK(vp);
 2703         return (0);
 2704 }
 2705 
 2706 /*
 2707  * Eliminate all activity associated with a vnode
 2708  * in preparation for reuse.
 2709  */
 2710 void
 2711 vgone(vp)
 2712         register struct vnode *vp;
 2713 {
 2714         struct thread *td = curthread;  /* XXX */
 2715 
 2716         VI_LOCK(vp);
 2717         vgonel(vp, td);
 2718 }
 2719 
 2720 /*
 2721  * Disassociate a character device from the its underlying filesystem and
 2722  * attach it to spec.  This is for use when the chr device is still active
 2723  * and the filesystem is going away.
 2724  */
 2725 static void
 2726 vgonechrl(struct vnode *vp, struct thread *td)
 2727 {
 2728         ASSERT_VI_LOCKED(vp, "vgonechrl");
 2729         vx_lock(vp);
 2730         /*
 2731          * This is a custom version of vclean() which does not tearm down
 2732          * the bufs or vm objects held by this vnode.  This allows filesystems
 2733          * to continue using devices which were discovered via another
 2734          * filesystem that has been unmounted.
 2735          */
 2736         if (vp->v_usecount != 0) {
 2737                 v_incr_usecount(vp, 1);
 2738                 /*
 2739                  * Ensure that no other activity can occur while the
 2740                  * underlying object is being cleaned out.
 2741                  */
 2742                 VOP_LOCK(vp, LK_DRAIN | LK_INTERLOCK, td);
 2743                 /*
 2744                  * Any other processes trying to obtain this lock must first
 2745                  * wait for VXLOCK to clear, then call the new lock operation.
 2746                  */
 2747                 VOP_UNLOCK(vp, 0, td);
 2748                 vp->v_vnlock = &vp->v_lock;
 2749                 vp->v_tag = "orphanchr";
 2750                 vp->v_op = spec_vnodeop_p;
 2751                 if (vp->v_mount != NULL)
 2752                         insmntque(vp, (struct mount *)0);
 2753                 cache_purge(vp);
 2754                 vrele(vp);
 2755                 VI_LOCK(vp);
 2756         } else
 2757                 vclean(vp, 0, td);
 2758         vp->v_op = spec_vnodeop_p;
 2759         vx_unlock(vp);
 2760         VI_UNLOCK(vp);
 2761 }
 2762 /*
 2763  * vgone, with the vp interlock held.
 2764  */
 2765 void
 2766 vgonel(vp, td)
 2767         struct vnode *vp;
 2768         struct thread *td;
 2769 {
 2770         /*
 2771          * If a vgone (or vclean) is already in progress,
 2772          * wait until it is done and return.
 2773          */
 2774         ASSERT_VI_LOCKED(vp, "vgonel");
 2775         if (vp->v_iflag & VI_XLOCK) {
 2776                 vp->v_iflag |= VI_XWANT;
 2777                 msleep(vp, VI_MTX(vp), PINOD | PDROP, "vgone", 0);
 2778                 return;
 2779         }
 2780         vx_lock(vp);
 2781 
 2782         /*
 2783          * Clean out the filesystem specific data.
 2784          */
 2785         vclean(vp, DOCLOSE, td);
 2786         VI_UNLOCK(vp);
 2787 
 2788         /*
 2789          * If special device, remove it from special device alias list
 2790          * if it is on one.
 2791          */
 2792         VI_LOCK(vp);
 2793         if (vp->v_type == VCHR && vp->v_rdev != NULL && vp->v_rdev != NODEV) {
 2794                 mtx_lock(&spechash_mtx);
 2795                 SLIST_REMOVE(&vp->v_rdev->si_hlist, vp, vnode, v_specnext);
 2796                 vp->v_rdev->si_usecount -= vp->v_usecount;
 2797                 mtx_unlock(&spechash_mtx);
 2798                 vp->v_rdev = NULL;
 2799         }
 2800 
 2801         /*
 2802          * If it is on the freelist and not already at the head,
 2803          * move it to the head of the list. The test of the
 2804          * VDOOMED flag and the reference count of zero is because
 2805          * it will be removed from the free list by getnewvnode,
 2806          * but will not have its reference count incremented until
 2807          * after calling vgone. If the reference count were
 2808          * incremented first, vgone would (incorrectly) try to
 2809          * close the previous instance of the underlying object.
 2810          */
 2811         if (vp->v_usecount == 0 && !(vp->v_iflag & VI_DOOMED)) {
 2812                 mtx_lock(&vnode_free_list_mtx);
 2813                 if (vp->v_iflag & VI_FREE) {
 2814                         TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
 2815                 } else {
 2816                         vp->v_iflag |= VI_FREE;
 2817                         freevnodes++;
 2818                 }
 2819                 TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
 2820                 mtx_unlock(&vnode_free_list_mtx);
 2821         }
 2822 
 2823         vp->v_type = VBAD;
 2824         vx_unlock(vp);
 2825         VI_UNLOCK(vp);
 2826 }
 2827 
 2828 /*
 2829  * Lookup a vnode by device number.
 2830  */
 2831 int
 2832 vfinddev(dev, type, vpp)
 2833         dev_t dev;
 2834         enum vtype type;
 2835         struct vnode **vpp;
 2836 {
 2837         struct vnode *vp;
 2838 
 2839         mtx_lock(&spechash_mtx);
 2840         SLIST_FOREACH(vp, &dev->si_hlist, v_specnext) {
 2841                 if (type == vp->v_type) {
 2842                         *vpp = vp;
 2843                         mtx_unlock(&spechash_mtx);
 2844                         return (1);
 2845                 }
 2846         }
 2847         mtx_unlock(&spechash_mtx);
 2848         return (0);
 2849 }
 2850 
 2851 /*
 2852  * Calculate the total number of references to a special device.
 2853  */
 2854 int
 2855 vcount(vp)
 2856         struct vnode *vp;
 2857 {
 2858         int count;
 2859 
 2860         mtx_lock(&spechash_mtx);
 2861         count = vp->v_rdev->si_usecount;
 2862         mtx_unlock(&spechash_mtx);
 2863         return (count);
 2864 }
 2865 
 2866 /*
 2867  * Same as above, but using the dev_t as argument
 2868  */
 2869 int
 2870 count_dev(dev)
 2871         dev_t dev;
 2872 {
 2873         int count;
 2874 
 2875         mtx_lock(&spechash_mtx);
 2876         count = dev->si_usecount;
 2877         mtx_unlock(&spechash_mtx);
 2878         return(count);
 2879 }
 2880 
 2881 /*
 2882  * Print out a description of a vnode.
 2883  */
 2884 static char *typename[] =
 2885 {"VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD"};
 2886 
 2887 void
 2888 vprint(label, vp)
 2889         char *label;
 2890         struct vnode *vp;
 2891 {
 2892         char buf[96];
 2893 
 2894         if (label != NULL)
 2895                 printf("%s: %p: ", label, (void *)vp);
 2896         else
 2897                 printf("%p: ", (void *)vp);
 2898         printf("tag %s, type %s, usecount %d, writecount %d, refcount %d,",
 2899             vp->v_tag, typename[vp->v_type], vp->v_usecount,
 2900             vp->v_writecount, vp->v_holdcnt);
 2901         buf[0] = '\0';
 2902         if (vp->v_vflag & VV_ROOT)
 2903                 strcat(buf, "|VV_ROOT");
 2904         if (vp->v_vflag & VV_TEXT)
 2905                 strcat(buf, "|VV_TEXT");
 2906         if (vp->v_vflag & VV_SYSTEM)
 2907                 strcat(buf, "|VV_SYSTEM");
 2908         if (vp->v_iflag & VI_XLOCK)
 2909                 strcat(buf, "|VI_XLOCK");
 2910         if (vp->v_iflag & VI_XWANT)
 2911                 strcat(buf, "|VI_XWANT");
 2912         if (vp->v_iflag & VI_BWAIT)
 2913                 strcat(buf, "|VI_BWAIT");
 2914         if (vp->v_iflag & VI_DOOMED)
 2915                 strcat(buf, "|VI_DOOMED");
 2916         if (vp->v_iflag & VI_FREE)
 2917                 strcat(buf, "|VI_FREE");
 2918         if (vp->v_vflag & VV_OBJBUF)
 2919                 strcat(buf, "|VV_OBJBUF");
 2920         if (buf[0] != '\0')
 2921                 printf(" flags (%s),", &buf[1]);
 2922         lockmgr_printinfo(vp->v_vnlock);
 2923         printf("\n");
 2924         if (vp->v_data != NULL)
 2925                 VOP_PRINT(vp);
 2926 }
 2927 
 2928 #ifdef DDB
 2929 #include <ddb/ddb.h>
 2930 /*
 2931  * List all of the locked vnodes in the system.
 2932  * Called when debugging the kernel.
 2933  */
 2934 DB_SHOW_COMMAND(lockedvnods, lockedvnodes)
 2935 {
 2936         struct mount *mp, *nmp;
 2937         struct vnode *vp;
 2938 
 2939         /*
 2940          * Note: because this is DDB, we can't obey the locking semantics
 2941          * for these structures, which means we could catch an inconsistent
 2942          * state and dereference a nasty pointer.  Not much to be done
 2943          * about that.
 2944          */
 2945         printf("Locked vnodes\n");
 2946         for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) {
 2947                 nmp = TAILQ_NEXT(mp, mnt_list);
 2948                 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) {
 2949                         if (VOP_ISLOCKED(vp, NULL))
 2950                                 vprint(NULL, vp);
 2951                 }
 2952                 nmp = TAILQ_NEXT(mp, mnt_list);
 2953         }
 2954 }
 2955 #endif
 2956 
 2957 /*
 2958  * Fill in a struct xvfsconf based on a struct vfsconf.
 2959  */
 2960 static void
 2961 vfsconf2x(struct vfsconf *vfsp, struct xvfsconf *xvfsp)
 2962 {
 2963 
 2964         strcpy(xvfsp->vfc_name, vfsp->vfc_name);
 2965         xvfsp->vfc_typenum = vfsp->vfc_typenum;
 2966         xvfsp->vfc_refcount = vfsp->vfc_refcount;
 2967         xvfsp->vfc_flags = vfsp->vfc_flags;
 2968         /*
 2969          * These are unused in userland, we keep them
 2970          * to not break binary compatibility.
 2971          */
 2972         xvfsp->vfc_vfsops = NULL;
 2973         xvfsp->vfc_next = NULL;
 2974 }
 2975 
 2976 static int
 2977 sysctl_vfs_conflist(SYSCTL_HANDLER_ARGS)
 2978 {
 2979         struct vfsconf *vfsp;
 2980         struct xvfsconf *xvfsp;
 2981         int cnt, error, i;
 2982 
 2983         cnt = 0;
 2984         for (vfsp = vfsconf; vfsp != NULL; vfsp = vfsp->vfc_next)
 2985                 cnt++;
 2986         xvfsp = malloc(sizeof(struct xvfsconf) * cnt, M_TEMP, M_WAITOK);
 2987         /*
 2988          * Handle the race that we will have here when struct vfsconf
 2989          * will be locked down by using both cnt and checking vfc_next
 2990          * against NULL to determine the end of the loop.  The race will
 2991          * happen because we will have to unlock before calling malloc().
 2992          * We are protected by Giant for now.
 2993          */
 2994         i = 0;
 2995         for (vfsp = vfsconf; vfsp != NULL && i < cnt; vfsp = vfsp->vfc_next) {
 2996                 vfsconf2x(vfsp, xvfsp + i);
 2997                 i++;
 2998         }
 2999         error = SYSCTL_OUT(req, xvfsp, sizeof(struct xvfsconf) * i);
 3000         free(xvfsp, M_TEMP);
 3001         return (error);
 3002 }
 3003 
 3004 SYSCTL_PROC(_vfs, OID_AUTO, conflist, CTLFLAG_RD, NULL, 0, sysctl_vfs_conflist,
 3005     "S,xvfsconf", "List of all configured filesystems");
 3006 
 3007 /*
 3008  * Top level filesystem related information gathering.
 3009  */
 3010 static int      sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS);
 3011 
 3012 static int
 3013 vfs_sysctl(SYSCTL_HANDLER_ARGS)
 3014 {
 3015         int *name = (int *)arg1 - 1;    /* XXX */
 3016         u_int namelen = arg2 + 1;       /* XXX */
 3017         struct vfsconf *vfsp;
 3018         struct xvfsconf xvfsp;
 3019 
 3020         printf("WARNING: userland calling deprecated sysctl, "
 3021             "please rebuild world\n");
 3022 
 3023 #if 1 || defined(COMPAT_PRELITE2)
 3024         /* Resolve ambiguity between VFS_VFSCONF and VFS_GENERIC. */
 3025         if (namelen == 1)
 3026                 return (sysctl_ovfs_conf(oidp, arg1, arg2, req));
 3027 #endif
 3028 
 3029         switch (name[1]) {
 3030         case VFS_MAXTYPENUM:
 3031                 if (namelen != 2)
 3032                         return (ENOTDIR);
 3033                 return (SYSCTL_OUT(req, &maxvfsconf, sizeof(int)));
 3034         case VFS_CONF:
 3035                 if (namelen != 3)
 3036                         return (ENOTDIR);       /* overloaded */
 3037                 for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next)
 3038                         if (vfsp->vfc_typenum == name[2])
 3039                                 break;
 3040                 if (vfsp == NULL)
 3041                         return (EOPNOTSUPP);
 3042                 vfsconf2x(vfsp, &xvfsp);
 3043                 return (SYSCTL_OUT(req, &xvfsp, sizeof(xvfsp)));
 3044         }
 3045         return (EOPNOTSUPP);
 3046 }
 3047 
 3048 SYSCTL_NODE(_vfs, VFS_GENERIC, generic, CTLFLAG_RD | CTLFLAG_SKIP, vfs_sysctl,
 3049         "Generic filesystem");
 3050 
 3051 #if 1 || defined(COMPAT_PRELITE2)
 3052 
 3053 static int
 3054 sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS)
 3055 {
 3056         int error;
 3057         struct vfsconf *vfsp;
 3058         struct ovfsconf ovfs;
 3059 
 3060         for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) {
 3061                 ovfs.vfc_vfsops = vfsp->vfc_vfsops;     /* XXX used as flag */
 3062                 strcpy(ovfs.vfc_name, vfsp->vfc_name);
 3063                 ovfs.vfc_index = vfsp->vfc_typenum;
 3064                 ovfs.vfc_refcount = vfsp->vfc_refcount;
 3065                 ovfs.vfc_flags = vfsp->vfc_flags;
 3066                 error = SYSCTL_OUT(req, &ovfs, sizeof ovfs);
 3067                 if (error)
 3068                         return error;
 3069         }
 3070         return 0;
 3071 }
 3072 
 3073 #endif /* 1 || COMPAT_PRELITE2 */
 3074 
 3075 #define KINFO_VNODESLOP         10
 3076 #ifdef notyet
 3077 /*
 3078  * Dump vnode list (via sysctl).
 3079  */
 3080 /* ARGSUSED */
 3081 static int
 3082 sysctl_vnode(SYSCTL_HANDLER_ARGS)
 3083 {
 3084         struct xvnode *xvn;
 3085         struct thread *td = req->td;
 3086         struct mount *mp;
 3087         struct vnode *vp;
 3088         int error, len, n;
 3089 
 3090         /*
 3091          * Stale numvnodes access is not fatal here.
 3092          */
 3093         req->lock = 0;
 3094         len = (numvnodes + KINFO_VNODESLOP) * sizeof *xvn;
 3095         if (!req->oldptr)
 3096                 /* Make an estimate */
 3097                 return (SYSCTL_OUT(req, 0, len));
 3098 
 3099         sysctl_wire_old_buffer(req, 0);
 3100         xvn = malloc(len, M_TEMP, M_ZERO | M_WAITOK);
 3101         n = 0;
 3102         mtx_lock(&mountlist_mtx);
 3103         TAILQ_FOREACH(mp, &mountlist, mnt_list) {
 3104                 if (vfs_busy(mp, LK_NOWAIT, &mountlist_mtx, td))
 3105                         continue;
 3106                 MNT_ILOCK(mp);
 3107                 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) {
 3108                         if (n == len)
 3109                                 break;
 3110                         vref(vp);
 3111                         xvn[n].xv_size = sizeof *xvn;
 3112                         xvn[n].xv_vnode = vp;
 3113 #define XV_COPY(field) xvn[n].xv_##field = vp->v_##field
 3114                         XV_COPY(usecount);
 3115                         XV_COPY(writecount);
 3116                         XV_COPY(holdcnt);
 3117                         XV_COPY(id);
 3118                         XV_COPY(mount);
 3119                         XV_COPY(numoutput);
 3120                         XV_COPY(type);
 3121 #undef XV_COPY
 3122                         xvn[n].xv_flag = vp->v_vflag;
 3123 
 3124                         switch (vp->v_type) {
 3125                         case VREG:
 3126                         case VDIR:
 3127                         case VLNK:
 3128                                 xvn[n].xv_dev = vp->v_cachedfs;
 3129                                 xvn[n].xv_ino = vp->v_cachedid;
 3130                                 break;
 3131                         case VBLK:
 3132                         case VCHR:
 3133                                 if (vp->v_rdev == NULL) {
 3134                                         vrele(vp);
 3135                                         continue;
 3136                                 }
 3137                                 xvn[n].xv_dev = dev2udev(vp->v_rdev);
 3138                                 break;
 3139                         case VSOCK:
 3140                                 xvn[n].xv_socket = vp->v_socket;
 3141                                 break;
 3142                         case VFIFO:
 3143                                 xvn[n].xv_fifo = vp->v_fifoinfo;
 3144                                 break;
 3145                         case VNON:
 3146                         case VBAD:
 3147                         default:
 3148                                 /* shouldn't happen? */
 3149                                 vrele(vp);
 3150                                 continue;
 3151                         }
 3152                         vrele(vp);
 3153                         ++n;
 3154                 }
 3155                 MNT_IUNLOCK(mp);
 3156                 mtx_lock(&mountlist_mtx);
 3157                 vfs_unbusy(mp, td);
 3158                 if (n == len)
 3159                         break;
 3160         }
 3161         mtx_unlock(&mountlist_mtx);
 3162 
 3163         error = SYSCTL_OUT(req, xvn, n * sizeof *xvn);
 3164         free(xvn, M_TEMP);
 3165         return (error);
 3166 }
 3167 
 3168 SYSCTL_PROC(_kern, KERN_VNODE, vnode, CTLTYPE_OPAQUE|CTLFLAG_RD,
 3169         0, 0, sysctl_vnode, "S,xvnode", "");
 3170 #endif
 3171 
 3172 /*
 3173  * Check to see if a filesystem is mounted on a block device.
 3174  */
 3175 int
 3176 vfs_mountedon(vp)
 3177         struct vnode *vp;
 3178 {
 3179 
 3180         if (vp->v_rdev->si_mountpoint != NULL)
 3181                 return (EBUSY);
 3182         return (0);
 3183 }
 3184 
 3185 /*
 3186  * Unmount all filesystems. The list is traversed in reverse order
 3187  * of mounting to avoid dependencies.
 3188  */
 3189 void
 3190 vfs_unmountall()
 3191 {
 3192         struct mount *mp;
 3193         struct thread *td;
 3194         int error;
 3195 
 3196         if (curthread != NULL)
 3197                 td = curthread;
 3198         else
 3199                 td = FIRST_THREAD_IN_PROC(initproc); /* XXX XXX proc0? */
 3200         /*
 3201          * Since this only runs when rebooting, it is not interlocked.
 3202          */
 3203         while(!TAILQ_EMPTY(&mountlist)) {
 3204                 mp = TAILQ_LAST(&mountlist, mntlist);
 3205                 error = dounmount(mp, MNT_FORCE, td);
 3206                 if (error) {
 3207                         TAILQ_REMOVE(&mountlist, mp, mnt_list);
 3208                         printf("unmount of %s failed (",
 3209                             mp->mnt_stat.f_mntonname);
 3210                         if (error == EBUSY)
 3211                                 printf("BUSY)\n");
 3212                         else
 3213                                 printf("%d)\n", error);
 3214                 } else {
 3215                         /* The unmount has removed mp from the mountlist */
 3216                 }
 3217         }
 3218 }
 3219 
 3220 /*
 3221  * perform msync on all vnodes under a mount point
 3222  * the mount point must be locked.
 3223  */
 3224 void
 3225 vfs_msync(struct mount *mp, int flags)
 3226 {
 3227         struct vnode *vp, *nvp;
 3228         struct vm_object *obj;
 3229         int tries;
 3230 
 3231         GIANT_REQUIRED;
 3232 
 3233         tries = 5;
 3234         MNT_ILOCK(mp);
 3235 loop:
 3236         for (vp = TAILQ_FIRST(&mp->mnt_nvnodelist); vp != NULL; vp = nvp) {
 3237                 if (vp->v_mount != mp) {
 3238                         if (--tries > 0)
 3239                                 goto loop;
 3240                         break;
 3241                 }
 3242                 nvp = TAILQ_NEXT(vp, v_nmntvnodes);
 3243 
 3244                 VI_LOCK(vp);
 3245                 if (vp->v_iflag & VI_XLOCK) {
 3246                         VI_UNLOCK(vp);
 3247                         continue;
 3248                 }
 3249 
 3250                 if ((vp->v_iflag & VI_OBJDIRTY) &&
 3251                     (flags == MNT_WAIT || VOP_ISLOCKED(vp, NULL) == 0)) {
 3252                         MNT_IUNLOCK(mp);
 3253                         if (!vget(vp,
 3254                             LK_EXCLUSIVE | LK_RETRY | LK_INTERLOCK,
 3255                             curthread)) {
 3256                                 if (vp->v_vflag & VV_NOSYNC) {  /* unlinked */
 3257                                         vput(vp);
 3258                                         MNT_ILOCK(mp);
 3259                                         continue;
 3260                                 }
 3261 
 3262                                 if (VOP_GETVOBJECT(vp, &obj) == 0) {
 3263                                         VM_OBJECT_LOCK(obj);
 3264                                         vm_object_page_clean(obj, 0, 0,
 3265                                             flags == MNT_WAIT ?
 3266                                             OBJPC_SYNC : OBJPC_NOSYNC);
 3267                                         VM_OBJECT_UNLOCK(obj);
 3268                                 }
 3269                                 vput(vp);
 3270                         }
 3271                         MNT_ILOCK(mp);
 3272                         if (TAILQ_NEXT(vp, v_nmntvnodes) != nvp) {
 3273                                 if (--tries > 0)
 3274                                         goto loop;
 3275                                 break;
 3276                         }
 3277                 } else
 3278                         VI_UNLOCK(vp);
 3279         }
 3280         MNT_IUNLOCK(mp);
 3281 }
 3282 
 3283 /*
 3284  * Create the VM object needed for VMIO and mmap support.  This
 3285  * is done for all VREG files in the system.  Some filesystems might
 3286  * afford the additional metadata buffering capability of the
 3287  * VMIO code by making the device node be VMIO mode also.
 3288  *
 3289  * vp must be locked when vfs_object_create is called.
 3290  */
 3291 int
 3292 vfs_object_create(vp, td, cred)
 3293         struct vnode *vp;
 3294         struct thread *td;
 3295         struct ucred *cred;
 3296 {
 3297         GIANT_REQUIRED;
 3298         return (VOP_CREATEVOBJECT(vp, cred, td));
 3299 }
 3300 
 3301 /*
 3302  * Mark a vnode as free, putting it up for recycling.
 3303  */
 3304 void
 3305 vfree(vp)
 3306         struct vnode *vp;
 3307 {
 3308         ASSERT_VI_LOCKED(vp, "vfree");
 3309         mtx_lock(&vnode_free_list_mtx);
 3310         KASSERT((vp->v_iflag & VI_FREE) == 0, ("vnode already free"));
 3311         if (vp->v_iflag & VI_AGE) {
 3312                 TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
 3313         } else {
 3314                 TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
 3315         }
 3316         freevnodes++;
 3317         mtx_unlock(&vnode_free_list_mtx);
 3318         vp->v_iflag &= ~VI_AGE;
 3319         vp->v_iflag |= VI_FREE;
 3320 }
 3321 
 3322 /*
 3323  * Opposite of vfree() - mark a vnode as in use.
 3324  */
 3325 void
 3326 vbusy(vp)
 3327         struct vnode *vp;
 3328 {
 3329         ASSERT_VI_LOCKED(vp, "vbusy");
 3330         KASSERT((vp->v_iflag & VI_FREE) != 0, ("vnode not free"));
 3331 
 3332         mtx_lock(&vnode_free_list_mtx);
 3333         TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
 3334         freevnodes--;
 3335         mtx_unlock(&vnode_free_list_mtx);
 3336 
 3337         vp->v_iflag &= ~(VI_FREE|VI_AGE);
 3338 }
 3339 
 3340 /*
 3341  * Record a process's interest in events which might happen to
 3342  * a vnode.  Because poll uses the historic select-style interface
 3343  * internally, this routine serves as both the ``check for any
 3344  * pending events'' and the ``record my interest in future events''
 3345  * functions.  (These are done together, while the lock is held,
 3346  * to avoid race conditions.)
 3347  */
 3348 int
 3349 vn_pollrecord(vp, td, events)
 3350         struct vnode *vp;
 3351         struct thread *td;
 3352         short events;
 3353 {
 3354 
 3355         if (vp->v_pollinfo == NULL)
 3356                 v_addpollinfo(vp);
 3357         mtx_lock(&vp->v_pollinfo->vpi_lock);
 3358         if (vp->v_pollinfo->vpi_revents & events) {
 3359                 /*
 3360                  * This leaves events we are not interested
 3361                  * in available for the other process which
 3362                  * which presumably had requested them
 3363                  * (otherwise they would never have been
 3364                  * recorded).
 3365                  */
 3366                 events &= vp->v_pollinfo->vpi_revents;
 3367                 vp->v_pollinfo->vpi_revents &= ~events;
 3368 
 3369                 mtx_unlock(&vp->v_pollinfo->vpi_lock);
 3370                 return events;
 3371         }
 3372         vp->v_pollinfo->vpi_events |= events;
 3373         selrecord(td, &vp->v_pollinfo->vpi_selinfo);
 3374         mtx_unlock(&vp->v_pollinfo->vpi_lock);
 3375         return 0;
 3376 }
 3377 
 3378 /*
 3379  * Note the occurrence of an event.  If the VN_POLLEVENT macro is used,
 3380  * it is possible for us to miss an event due to race conditions, but
 3381  * that condition is expected to be rare, so for the moment it is the
 3382  * preferred interface.
 3383  */
 3384 void
 3385 vn_pollevent(vp, events)
 3386         struct vnode *vp;
 3387         short events;
 3388 {
 3389 
 3390         if (vp->v_pollinfo == NULL)
 3391                 v_addpollinfo(vp);
 3392         mtx_lock(&vp->v_pollinfo->vpi_lock);
 3393         if (vp->v_pollinfo->vpi_events & events) {
 3394                 /*
 3395                  * We clear vpi_events so that we don't
 3396                  * call selwakeup() twice if two events are
 3397                  * posted before the polling process(es) is
 3398                  * awakened.  This also ensures that we take at
 3399                  * most one selwakeup() if the polling process
 3400                  * is no longer interested.  However, it does
 3401                  * mean that only one event can be noticed at
 3402                  * a time.  (Perhaps we should only clear those
 3403                  * event bits which we note?) XXX
 3404                  */
 3405                 vp->v_pollinfo->vpi_events = 0; /* &= ~events ??? */
 3406                 vp->v_pollinfo->vpi_revents |= events;
 3407                 selwakeuppri(&vp->v_pollinfo->vpi_selinfo, PRIBIO);
 3408         }
 3409         mtx_unlock(&vp->v_pollinfo->vpi_lock);
 3410 }
 3411 
 3412 /*
 3413  * Wake up anyone polling on vp because it is being revoked.
 3414  * This depends on dead_poll() returning POLLHUP for correct
 3415  * behavior.
 3416  */
 3417 void
 3418 vn_pollgone(vp)
 3419         struct vnode *vp;
 3420 {
 3421 
 3422         mtx_lock(&vp->v_pollinfo->vpi_lock);
 3423         VN_KNOTE(vp, NOTE_REVOKE);
 3424         if (vp->v_pollinfo->vpi_events) {
 3425                 vp->v_pollinfo->vpi_events = 0;
 3426                 selwakeuppri(&vp->v_pollinfo->vpi_selinfo, PRIBIO);
 3427         }
 3428         mtx_unlock(&vp->v_pollinfo->vpi_lock);
 3429 }
 3430 
 3431 
 3432 
 3433 /*
 3434  * Routine to create and manage a filesystem syncer vnode.
 3435  */
 3436 #define sync_close ((int (*)(struct  vop_close_args *))nullop)
 3437 static int      sync_fsync(struct  vop_fsync_args *);
 3438 static int      sync_inactive(struct  vop_inactive_args *);
 3439 static int      sync_reclaim(struct  vop_reclaim_args *);
 3440 
 3441 static vop_t **sync_vnodeop_p;
 3442 static struct vnodeopv_entry_desc sync_vnodeop_entries[] = {
 3443         { &vop_default_desc,    (vop_t *) vop_eopnotsupp },
 3444         { &vop_close_desc,      (vop_t *) sync_close },         /* close */
 3445         { &vop_fsync_desc,      (vop_t *) sync_fsync },         /* fsync */
 3446         { &vop_inactive_desc,   (vop_t *) sync_inactive },      /* inactive */
 3447         { &vop_reclaim_desc,    (vop_t *) sync_reclaim },       /* reclaim */
 3448         { &vop_lock_desc,       (vop_t *) vop_stdlock },        /* lock */
 3449         { &vop_unlock_desc,     (vop_t *) vop_stdunlock },      /* unlock */
 3450         { &vop_islocked_desc,   (vop_t *) vop_stdislocked },    /* islocked */
 3451         { NULL, NULL }
 3452 };
 3453 static struct vnodeopv_desc sync_vnodeop_opv_desc =
 3454         { &sync_vnodeop_p, sync_vnodeop_entries };
 3455 
 3456 VNODEOP_SET(sync_vnodeop_opv_desc);
 3457 
 3458 /*
 3459  * Create a new filesystem syncer vnode for the specified mount point.
 3460  */
 3461 int
 3462 vfs_allocate_syncvnode(mp)
 3463         struct mount *mp;
 3464 {
 3465         struct vnode *vp;
 3466         static long start, incr, next;
 3467         int error;
 3468 
 3469         /* Allocate a new vnode */
 3470         if ((error = getnewvnode("syncer", mp, sync_vnodeop_p, &vp)) != 0) {
 3471                 mp->mnt_syncer = NULL;
 3472                 return (error);
 3473         }
 3474         vp->v_type = VNON;
 3475         /*
 3476          * Place the vnode onto the syncer worklist. We attempt to
 3477          * scatter them about on the list so that they will go off
 3478          * at evenly distributed times even if all the filesystems
 3479          * are mounted at once.
 3480          */
 3481         next += incr;
 3482         if (next == 0 || next > syncer_maxdelay) {
 3483                 start /= 2;
 3484                 incr /= 2;
 3485                 if (start == 0) {
 3486                         start = syncer_maxdelay / 2;
 3487                         incr = syncer_maxdelay;
 3488                 }
 3489                 next = start;
 3490         }
 3491         VI_LOCK(vp);
 3492         vn_syncer_add_to_worklist(vp, syncdelay > 0 ? next % syncdelay : 0);
 3493         VI_UNLOCK(vp);
 3494         mp->mnt_syncer = vp;
 3495         return (0);
 3496 }
 3497 
 3498 /*
 3499  * Do a lazy sync of the filesystem.
 3500  */
 3501 static int
 3502 sync_fsync(ap)
 3503         struct vop_fsync_args /* {
 3504                 struct vnode *a_vp;
 3505                 struct ucred *a_cred;
 3506                 int a_waitfor;
 3507                 struct thread *a_td;
 3508         } */ *ap;
 3509 {
 3510         struct vnode *syncvp = ap->a_vp;
 3511         struct mount *mp = syncvp->v_mount;
 3512         struct thread *td = ap->a_td;
 3513         int error, asyncflag;
 3514 
 3515         /*
 3516          * We only need to do something if this is a lazy evaluation.
 3517          */
 3518         if (ap->a_waitfor != MNT_LAZY)
 3519                 return (0);
 3520 
 3521         /*
 3522          * Move ourselves to the back of the sync list.
 3523          */
 3524         VI_LOCK(syncvp);
 3525         vn_syncer_add_to_worklist(syncvp, syncdelay);
 3526         VI_UNLOCK(syncvp);
 3527 
 3528         /*
 3529          * Walk the list of vnodes pushing all that are dirty and
 3530          * not already on the sync list.
 3531          */
 3532         mtx_lock(&mountlist_mtx);
 3533         if (vfs_busy(mp, LK_EXCLUSIVE | LK_NOWAIT, &mountlist_mtx, td) != 0) {
 3534                 mtx_unlock(&mountlist_mtx);
 3535                 return (0);
 3536         }
 3537         if (vn_start_write(NULL, &mp, V_NOWAIT) != 0) {
 3538                 vfs_unbusy(mp, td);
 3539                 return (0);
 3540         }
 3541         asyncflag = mp->mnt_flag & MNT_ASYNC;
 3542         mp->mnt_flag &= ~MNT_ASYNC;
 3543         vfs_msync(mp, MNT_NOWAIT);
 3544         error = VFS_SYNC(mp, MNT_LAZY, ap->a_cred, td);
 3545         if (asyncflag)
 3546                 mp->mnt_flag |= MNT_ASYNC;
 3547         vn_finished_write(mp);
 3548         vfs_unbusy(mp, td);
 3549         return (error);
 3550 }
 3551 
 3552 /*
 3553  * The syncer vnode is no referenced.
 3554  */
 3555 static int
 3556 sync_inactive(ap)
 3557         struct vop_inactive_args /* {
 3558                 struct vnode *a_vp;
 3559                 struct thread *a_td;
 3560         } */ *ap;
 3561 {
 3562 
 3563         VOP_UNLOCK(ap->a_vp, 0, ap->a_td);
 3564         vgone(ap->a_vp);
 3565         return (0);
 3566 }
 3567 
 3568 /*
 3569  * The syncer vnode is no longer needed and is being decommissioned.
 3570  *
 3571  * Modifications to the worklist must be protected by sync_mtx.
 3572  */
 3573 static int
 3574 sync_reclaim(ap)
 3575         struct vop_reclaim_args /* {
 3576                 struct vnode *a_vp;
 3577         } */ *ap;
 3578 {
 3579         struct vnode *vp = ap->a_vp;
 3580 
 3581         VI_LOCK(vp);
 3582         vp->v_mount->mnt_syncer = NULL;
 3583         if (vp->v_iflag & VI_ONWORKLST) {
 3584                 mtx_lock(&sync_mtx);
 3585                 LIST_REMOVE(vp, v_synclist);
 3586                 mtx_unlock(&sync_mtx);
 3587                 vp->v_iflag &= ~VI_ONWORKLST;
 3588         }
 3589         VI_UNLOCK(vp);
 3590 
 3591         return (0);
 3592 }
 3593 
 3594 /*
 3595  * extract the dev_t from a VCHR
 3596  */
 3597 dev_t
 3598 vn_todev(vp)
 3599         struct vnode *vp;
 3600 {
 3601         if (vp->v_type != VCHR)
 3602                 return (NODEV);
 3603         return (vp->v_rdev);
 3604 }
 3605 
 3606 /*
 3607  * Check if vnode represents a disk device
 3608  */
 3609 int
 3610 vn_isdisk(vp, errp)
 3611         struct vnode *vp;
 3612         int *errp;
 3613 {
 3614         int error;
 3615 
 3616         error = 0;
 3617         if (vp->v_type != VCHR)
 3618                 error = ENOTBLK;
 3619         else if (vp->v_rdev == NULL)
 3620                 error = ENXIO;
 3621         else if (!(devsw(vp->v_rdev)->d_flags & D_DISK))
 3622                 error = ENOTBLK;
 3623         if (errp != NULL)
 3624                 *errp = error;
 3625         return (error == 0);
 3626 }
 3627 
 3628 /*
 3629  * Free data allocated by namei(); see namei(9) for details.
 3630  */
 3631 void
 3632 NDFREE(ndp, flags)
 3633      struct nameidata *ndp;
 3634      const u_int flags;
 3635 {
 3636         if (!(flags & NDF_NO_FREE_PNBUF) &&
 3637             (ndp->ni_cnd.cn_flags & HASBUF)) {
 3638                 uma_zfree(namei_zone, ndp->ni_cnd.cn_pnbuf);
 3639                 ndp->ni_cnd.cn_flags &= ~HASBUF;
 3640         }
 3641         if (!(flags & NDF_NO_DVP_UNLOCK) &&
 3642             (ndp->ni_cnd.cn_flags & LOCKPARENT) &&
 3643             ndp->ni_dvp != ndp->ni_vp)
 3644                 VOP_UNLOCK(ndp->ni_dvp, 0, ndp->ni_cnd.cn_thread);
 3645         if (!(flags & NDF_NO_DVP_RELE) &&
 3646             (ndp->ni_cnd.cn_flags & (LOCKPARENT|WANTPARENT))) {
 3647                 vrele(ndp->ni_dvp);
 3648                 ndp->ni_dvp = NULL;
 3649         }
 3650         if (!(flags & NDF_NO_VP_UNLOCK) &&
 3651             (ndp->ni_cnd.cn_flags & LOCKLEAF) && ndp->ni_vp)
 3652                 VOP_UNLOCK(ndp->ni_vp, 0, ndp->ni_cnd.cn_thread);
 3653         if (!(flags & NDF_NO_VP_RELE) &&
 3654             ndp->ni_vp) {
 3655                 vrele(ndp->ni_vp);
 3656                 ndp->ni_vp = NULL;
 3657         }
 3658         if (!(flags & NDF_NO_STARTDIR_RELE) &&
 3659             (ndp->ni_cnd.cn_flags & SAVESTART)) {
 3660                 vrele(ndp->ni_startdir);
 3661                 ndp->ni_startdir = NULL;
 3662         }
 3663 }
 3664 
 3665 /*
 3666  * Common filesystem object access control check routine.  Accepts a
 3667  * vnode's type, "mode", uid and gid, requested access mode, credentials,
 3668  * and optional call-by-reference privused argument allowing vaccess()
 3669  * to indicate to the caller whether privilege was used to satisfy the
 3670  * request (obsoleted).  Returns 0 on success, or an errno on failure.
 3671  */
 3672 int
 3673 vaccess(type, file_mode, file_uid, file_gid, acc_mode, cred, privused)
 3674         enum vtype type;
 3675         mode_t file_mode;
 3676         uid_t file_uid;
 3677         gid_t file_gid;
 3678         mode_t acc_mode;
 3679         struct ucred *cred;
 3680         int *privused;
 3681 {
 3682         mode_t dac_granted;
 3683 #ifdef CAPABILITIES
 3684         mode_t cap_granted;
 3685 #endif
 3686 
 3687         /*
 3688          * Look for a normal, non-privileged way to access the file/directory
 3689          * as requested.  If it exists, go with that.
 3690          */
 3691 
 3692         if (privused != NULL)
 3693                 *privused = 0;
 3694 
 3695         dac_granted = 0;
 3696 
 3697         /* Check the owner. */
 3698         if (cred->cr_uid == file_uid) {
 3699                 dac_granted |= VADMIN;
 3700                 if (file_mode & S_IXUSR)
 3701                         dac_granted |= VEXEC;
 3702                 if (file_mode & S_IRUSR)
 3703                         dac_granted |= VREAD;
 3704                 if (file_mode & S_IWUSR)
 3705                         dac_granted |= (VWRITE | VAPPEND);
 3706 
 3707                 if ((acc_mode & dac_granted) == acc_mode)
 3708                         return (0);
 3709 
 3710                 goto privcheck;
 3711         }
 3712 
 3713         /* Otherwise, check the groups (first match) */
 3714         if (groupmember(file_gid, cred)) {
 3715                 if (file_mode & S_IXGRP)
 3716                         dac_granted |= VEXEC;
 3717                 if (file_mode & S_IRGRP)
 3718                         dac_granted |= VREAD;
 3719                 if (file_mode & S_IWGRP)
 3720                         dac_granted |= (VWRITE | VAPPEND);
 3721 
 3722                 if ((acc_mode & dac_granted) == acc_mode)
 3723                         return (0);
 3724 
 3725                 goto privcheck;
 3726         }
 3727 
 3728         /* Otherwise, check everyone else. */
 3729         if (file_mode & S_IXOTH)
 3730                 dac_granted |= VEXEC;
 3731         if (file_mode & S_IROTH)
 3732                 dac_granted |= VREAD;
 3733         if (file_mode & S_IWOTH)
 3734                 dac_granted |= (VWRITE | VAPPEND);
 3735         if ((acc_mode & dac_granted) == acc_mode)
 3736                 return (0);
 3737 
 3738 privcheck:
 3739         if (!suser_cred(cred, PRISON_ROOT)) {
 3740                 /* XXX audit: privilege used */
 3741                 if (privused != NULL)
 3742                         *privused = 1;
 3743                 return (0);
 3744         }
 3745 
 3746 #ifdef CAPABILITIES
 3747         /*
 3748          * Build a capability mask to determine if the set of capabilities
 3749          * satisfies the requirements when combined with the granted mask
 3750          * from above.
 3751          * For each capability, if the capability is required, bitwise
 3752          * or the request type onto the cap_granted mask.
 3753          */
 3754         cap_granted = 0;
 3755 
 3756         if (type == VDIR) {
 3757                 /*
 3758                  * For directories, use CAP_DAC_READ_SEARCH to satisfy
 3759                  * VEXEC requests, instead of CAP_DAC_EXECUTE.
 3760                  */
 3761                 if ((acc_mode & VEXEC) && ((dac_granted & VEXEC) == 0) &&
 3762                     !cap_check(cred, NULL, CAP_DAC_READ_SEARCH, PRISON_ROOT))
 3763                         cap_granted |= VEXEC;
 3764         } else {
 3765                 if ((acc_mode & VEXEC) && ((dac_granted & VEXEC) == 0) &&
 3766                     !cap_check(cred, NULL, CAP_DAC_EXECUTE, PRISON_ROOT))
 3767                         cap_granted |= VEXEC;
 3768         }
 3769 
 3770         if ((acc_mode & VREAD) && ((dac_granted & VREAD) == 0) &&
 3771             !cap_check(cred, NULL, CAP_DAC_READ_SEARCH, PRISON_ROOT))
 3772                 cap_granted |= VREAD;
 3773 
 3774         if ((acc_mode & VWRITE) && ((dac_granted & VWRITE) == 0) &&
 3775             !cap_check(cred, NULL, CAP_DAC_WRITE, PRISON_ROOT))
 3776                 cap_granted |= (VWRITE | VAPPEND);
 3777 
 3778         if ((acc_mode & VADMIN) && ((dac_granted & VADMIN) == 0) &&
 3779             !cap_check(cred, NULL, CAP_FOWNER, PRISON_ROOT))
 3780                 cap_granted |= VADMIN;
 3781 
 3782         if ((acc_mode & (cap_granted | dac_granted)) == acc_mode) {
 3783                 /* XXX audit: privilege used */
 3784                 if (privused != NULL)
 3785                         *privused = 1;
 3786                 return (0);
 3787         }
 3788 #endif
 3789 
 3790         return ((acc_mode & VADMIN) ? EPERM : EACCES);
 3791 }
 3792 
 3793 /*
 3794  * Credential check based on process requesting service, and per-attribute
 3795  * permissions.
 3796  */
 3797 int
 3798 extattr_check_cred(struct vnode *vp, int attrnamespace,
 3799     struct ucred *cred, struct thread *td, int access)
 3800 {
 3801 
 3802         /*
 3803          * Kernel-invoked always succeeds.
 3804          */
 3805         if (cred == NOCRED)
 3806                 return (0);
 3807 
 3808         /*
 3809          * Do not allow privileged processes in jail to directly
 3810          * manipulate system attributes.
 3811          *
 3812          * XXX What capability should apply here?
 3813          * Probably CAP_SYS_SETFFLAG.
 3814          */
 3815         switch (attrnamespace) {
 3816         case EXTATTR_NAMESPACE_SYSTEM:
 3817                 /* Potentially should be: return (EPERM); */
 3818                 return (suser_cred(cred, 0));
 3819         case EXTATTR_NAMESPACE_USER:
 3820                 return (VOP_ACCESS(vp, access, cred, td));
 3821         default:
 3822                 return (EPERM);
 3823         }
 3824 }

Cache object: 193b657d79dae1ddd3b484b47aa70458


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.