The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/kern/vfs_mount.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * SPDX-License-Identifier: BSD-3-Clause
    3  *
    4  * Copyright (c) 1999-2004 Poul-Henning Kamp
    5  * Copyright (c) 1999 Michael Smith
    6  * Copyright (c) 1989, 1993
    7  *      The Regents of the University of California.  All rights reserved.
    8  * (c) UNIX System Laboratories, Inc.
    9  * All or some portions of this file are derived from material licensed
   10  * to the University of California by American Telephone and Telegraph
   11  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
   12  * the permission of UNIX System Laboratories, Inc.
   13  *
   14  * Redistribution and use in source and binary forms, with or without
   15  * modification, are permitted provided that the following conditions
   16  * are met:
   17  * 1. Redistributions of source code must retain the above copyright
   18  *    notice, this list of conditions and the following disclaimer.
   19  * 2. Redistributions in binary form must reproduce the above copyright
   20  *    notice, this list of conditions and the following disclaimer in the
   21  *    documentation and/or other materials provided with the distribution.
   22  * 3. Neither the name of the University nor the names of its contributors
   23  *    may be used to endorse or promote products derived from this software
   24  *    without specific prior written permission.
   25  *
   26  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   27  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   28  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   29  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   30  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   31  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   32  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   33  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   34  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   35  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   36  * SUCH DAMAGE.
   37  */
   38 
   39 #include <sys/cdefs.h>
   40 __FBSDID("$FreeBSD$");
   41 
   42 #include <sys/param.h>
   43 #include <sys/conf.h>
   44 #include <sys/smp.h>
   45 #include <sys/devctl.h>
   46 #include <sys/eventhandler.h>
   47 #include <sys/fcntl.h>
   48 #include <sys/jail.h>
   49 #include <sys/kernel.h>
   50 #include <sys/ktr.h>
   51 #include <sys/libkern.h>
   52 #include <sys/limits.h>
   53 #include <sys/malloc.h>
   54 #include <sys/mount.h>
   55 #include <sys/mutex.h>
   56 #include <sys/namei.h>
   57 #include <sys/priv.h>
   58 #include <sys/proc.h>
   59 #include <sys/filedesc.h>
   60 #include <sys/reboot.h>
   61 #include <sys/sbuf.h>
   62 #include <sys/syscallsubr.h>
   63 #include <sys/sysproto.h>
   64 #include <sys/sx.h>
   65 #include <sys/sysctl.h>
   66 #include <sys/systm.h>
   67 #include <sys/taskqueue.h>
   68 #include <sys/vnode.h>
   69 #include <vm/uma.h>
   70 
   71 #include <geom/geom.h>
   72 
   73 #include <machine/stdarg.h>
   74 
   75 #include <security/audit/audit.h>
   76 #include <security/mac/mac_framework.h>
   77 
   78 #define VFS_MOUNTARG_SIZE_MAX   (1024 * 64)
   79 
   80 static int      vfs_domount(struct thread *td, const char *fstype, char *fspath,
   81                     uint64_t fsflags, struct vfsoptlist **optlist);
   82 static void     free_mntarg(struct mntarg *ma);
   83 
   84 static int      usermount = 0;
   85 SYSCTL_INT(_vfs, OID_AUTO, usermount, CTLFLAG_RW, &usermount, 0,
   86     "Unprivileged users may mount and unmount file systems");
   87 
   88 static bool     default_autoro = false;
   89 SYSCTL_BOOL(_vfs, OID_AUTO, default_autoro, CTLFLAG_RW, &default_autoro, 0,
   90     "Retry failed r/w mount as r/o if no explicit ro/rw option is specified");
   91 
   92 static bool     recursive_forced_unmount = false;
   93 SYSCTL_BOOL(_vfs, OID_AUTO, recursive_forced_unmount, CTLFLAG_RW,
   94     &recursive_forced_unmount, 0, "Recursively unmount stacked upper mounts"
   95     " when a file system is forcibly unmounted");
   96 
   97 static SYSCTL_NODE(_vfs, OID_AUTO, deferred_unmount,
   98     CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "deferred unmount controls");
   99 
  100 static unsigned int     deferred_unmount_retry_limit = 10;
  101 SYSCTL_UINT(_vfs_deferred_unmount, OID_AUTO, retry_limit, CTLFLAG_RW,
  102     &deferred_unmount_retry_limit, 0,
  103     "Maximum number of retries for deferred unmount failure");
  104 
  105 static int      deferred_unmount_retry_delay_hz;
  106 SYSCTL_INT(_vfs_deferred_unmount, OID_AUTO, retry_delay_hz, CTLFLAG_RW,
  107     &deferred_unmount_retry_delay_hz, 0,
  108     "Delay in units of [1/kern.hz]s when retrying a failed deferred unmount");
  109 
  110 static int      deferred_unmount_total_retries = 0;
  111 SYSCTL_INT(_vfs_deferred_unmount, OID_AUTO, total_retries, CTLFLAG_RD,
  112     &deferred_unmount_total_retries, 0,
  113     "Total number of retried deferred unmounts");
  114 
  115 MALLOC_DEFINE(M_MOUNT, "mount", "vfs mount structure");
  116 MALLOC_DEFINE(M_STATFS, "statfs", "statfs structure");
  117 static uma_zone_t mount_zone;
  118 
  119 /* List of mounted filesystems. */
  120 struct mntlist mountlist = TAILQ_HEAD_INITIALIZER(mountlist);
  121 
  122 /* For any iteration/modification of mountlist */
  123 struct mtx_padalign __exclusive_cache_line mountlist_mtx;
  124 
  125 EVENTHANDLER_LIST_DEFINE(vfs_mounted);
  126 EVENTHANDLER_LIST_DEFINE(vfs_unmounted);
  127 
  128 static void vfs_deferred_unmount(void *arg, int pending);
  129 static struct timeout_task deferred_unmount_task;
  130 static struct mtx deferred_unmount_lock;
  131 MTX_SYSINIT(deferred_unmount, &deferred_unmount_lock, "deferred_unmount",
  132     MTX_DEF);
  133 static STAILQ_HEAD(, mount) deferred_unmount_list =
  134     STAILQ_HEAD_INITIALIZER(deferred_unmount_list);
  135 TASKQUEUE_DEFINE_THREAD(deferred_unmount);
  136 
  137 static void mount_devctl_event(const char *type, struct mount *mp, bool donew);
  138 
  139 /*
  140  * Global opts, taken by all filesystems
  141  */
  142 static const char *global_opts[] = {
  143         "errmsg",
  144         "fstype",
  145         "fspath",
  146         "ro",
  147         "rw",
  148         "nosuid",
  149         "noexec",
  150         NULL
  151 };
  152 
  153 static int
  154 mount_init(void *mem, int size, int flags)
  155 {
  156         struct mount *mp;
  157 
  158         mp = (struct mount *)mem;
  159         mtx_init(&mp->mnt_mtx, "struct mount mtx", NULL, MTX_DEF);
  160         mtx_init(&mp->mnt_listmtx, "struct mount vlist mtx", NULL, MTX_DEF);
  161         lockinit(&mp->mnt_explock, PVFS, "explock", 0, 0);
  162         mp->mnt_pcpu = uma_zalloc_pcpu(pcpu_zone_16, M_WAITOK | M_ZERO);
  163         mp->mnt_ref = 0;
  164         mp->mnt_vfs_ops = 1;
  165         mp->mnt_rootvnode = NULL;
  166         return (0);
  167 }
  168 
  169 static void
  170 mount_fini(void *mem, int size)
  171 {
  172         struct mount *mp;
  173 
  174         mp = (struct mount *)mem;
  175         uma_zfree_pcpu(pcpu_zone_16, mp->mnt_pcpu);
  176         lockdestroy(&mp->mnt_explock);
  177         mtx_destroy(&mp->mnt_listmtx);
  178         mtx_destroy(&mp->mnt_mtx);
  179 }
  180 
  181 static void
  182 vfs_mount_init(void *dummy __unused)
  183 {
  184         TIMEOUT_TASK_INIT(taskqueue_deferred_unmount, &deferred_unmount_task,
  185             0, vfs_deferred_unmount, NULL);
  186         deferred_unmount_retry_delay_hz = hz;
  187         mount_zone = uma_zcreate("Mountpoints", sizeof(struct mount), NULL,
  188             NULL, mount_init, mount_fini, UMA_ALIGN_CACHE, UMA_ZONE_NOFREE);
  189         mtx_init(&mountlist_mtx, "mountlist", NULL, MTX_DEF);
  190 }
  191 SYSINIT(vfs_mount, SI_SUB_VFS, SI_ORDER_ANY, vfs_mount_init, NULL);
  192 
  193 /*
  194  * ---------------------------------------------------------------------
  195  * Functions for building and sanitizing the mount options
  196  */
  197 
  198 /* Remove one mount option. */
  199 static void
  200 vfs_freeopt(struct vfsoptlist *opts, struct vfsopt *opt)
  201 {
  202 
  203         TAILQ_REMOVE(opts, opt, link);
  204         free(opt->name, M_MOUNT);
  205         if (opt->value != NULL)
  206                 free(opt->value, M_MOUNT);
  207         free(opt, M_MOUNT);
  208 }
  209 
  210 /* Release all resources related to the mount options. */
  211 void
  212 vfs_freeopts(struct vfsoptlist *opts)
  213 {
  214         struct vfsopt *opt;
  215 
  216         while (!TAILQ_EMPTY(opts)) {
  217                 opt = TAILQ_FIRST(opts);
  218                 vfs_freeopt(opts, opt);
  219         }
  220         free(opts, M_MOUNT);
  221 }
  222 
  223 void
  224 vfs_deleteopt(struct vfsoptlist *opts, const char *name)
  225 {
  226         struct vfsopt *opt, *temp;
  227 
  228         if (opts == NULL)
  229                 return;
  230         TAILQ_FOREACH_SAFE(opt, opts, link, temp)  {
  231                 if (strcmp(opt->name, name) == 0)
  232                         vfs_freeopt(opts, opt);
  233         }
  234 }
  235 
  236 static int
  237 vfs_isopt_ro(const char *opt)
  238 {
  239 
  240         if (strcmp(opt, "ro") == 0 || strcmp(opt, "rdonly") == 0 ||
  241             strcmp(opt, "norw") == 0)
  242                 return (1);
  243         return (0);
  244 }
  245 
  246 static int
  247 vfs_isopt_rw(const char *opt)
  248 {
  249 
  250         if (strcmp(opt, "rw") == 0 || strcmp(opt, "noro") == 0)
  251                 return (1);
  252         return (0);
  253 }
  254 
  255 /*
  256  * Check if options are equal (with or without the "no" prefix).
  257  */
  258 static int
  259 vfs_equalopts(const char *opt1, const char *opt2)
  260 {
  261         char *p;
  262 
  263         /* "opt" vs. "opt" or "noopt" vs. "noopt" */
  264         if (strcmp(opt1, opt2) == 0)
  265                 return (1);
  266         /* "noopt" vs. "opt" */
  267         if (strncmp(opt1, "no", 2) == 0 && strcmp(opt1 + 2, opt2) == 0)
  268                 return (1);
  269         /* "opt" vs. "noopt" */
  270         if (strncmp(opt2, "no", 2) == 0 && strcmp(opt1, opt2 + 2) == 0)
  271                 return (1);
  272         while ((p = strchr(opt1, '.')) != NULL &&
  273             !strncmp(opt1, opt2, ++p - opt1)) {
  274                 opt2 += p - opt1;
  275                 opt1 = p;
  276                 /* "foo.noopt" vs. "foo.opt" */
  277                 if (strncmp(opt1, "no", 2) == 0 && strcmp(opt1 + 2, opt2) == 0)
  278                         return (1);
  279                 /* "foo.opt" vs. "foo.noopt" */
  280                 if (strncmp(opt2, "no", 2) == 0 && strcmp(opt1, opt2 + 2) == 0)
  281                         return (1);
  282         }
  283         /* "ro" / "rdonly" / "norw" / "rw" / "noro" */
  284         if ((vfs_isopt_ro(opt1) || vfs_isopt_rw(opt1)) &&
  285             (vfs_isopt_ro(opt2) || vfs_isopt_rw(opt2)))
  286                 return (1);
  287         return (0);
  288 }
  289 
  290 /*
  291  * If a mount option is specified several times,
  292  * (with or without the "no" prefix) only keep
  293  * the last occurrence of it.
  294  */
  295 static void
  296 vfs_sanitizeopts(struct vfsoptlist *opts)
  297 {
  298         struct vfsopt *opt, *opt2, *tmp;
  299 
  300         TAILQ_FOREACH_REVERSE(opt, opts, vfsoptlist, link) {
  301                 opt2 = TAILQ_PREV(opt, vfsoptlist, link);
  302                 while (opt2 != NULL) {
  303                         if (vfs_equalopts(opt->name, opt2->name)) {
  304                                 tmp = TAILQ_PREV(opt2, vfsoptlist, link);
  305                                 vfs_freeopt(opts, opt2);
  306                                 opt2 = tmp;
  307                         } else {
  308                                 opt2 = TAILQ_PREV(opt2, vfsoptlist, link);
  309                         }
  310                 }
  311         }
  312 }
  313 
  314 /*
  315  * Build a linked list of mount options from a struct uio.
  316  */
  317 int
  318 vfs_buildopts(struct uio *auio, struct vfsoptlist **options)
  319 {
  320         struct vfsoptlist *opts;
  321         struct vfsopt *opt;
  322         size_t memused, namelen, optlen;
  323         unsigned int i, iovcnt;
  324         int error;
  325 
  326         opts = malloc(sizeof(struct vfsoptlist), M_MOUNT, M_WAITOK);
  327         TAILQ_INIT(opts);
  328         memused = 0;
  329         iovcnt = auio->uio_iovcnt;
  330         for (i = 0; i < iovcnt; i += 2) {
  331                 namelen = auio->uio_iov[i].iov_len;
  332                 optlen = auio->uio_iov[i + 1].iov_len;
  333                 memused += sizeof(struct vfsopt) + optlen + namelen;
  334                 /*
  335                  * Avoid consuming too much memory, and attempts to overflow
  336                  * memused.
  337                  */
  338                 if (memused > VFS_MOUNTARG_SIZE_MAX ||
  339                     optlen > VFS_MOUNTARG_SIZE_MAX ||
  340                     namelen > VFS_MOUNTARG_SIZE_MAX) {
  341                         error = EINVAL;
  342                         goto bad;
  343                 }
  344 
  345                 opt = malloc(sizeof(struct vfsopt), M_MOUNT, M_WAITOK);
  346                 opt->name = malloc(namelen, M_MOUNT, M_WAITOK);
  347                 opt->value = NULL;
  348                 opt->len = 0;
  349                 opt->pos = i / 2;
  350                 opt->seen = 0;
  351 
  352                 /*
  353                  * Do this early, so jumps to "bad" will free the current
  354                  * option.
  355                  */
  356                 TAILQ_INSERT_TAIL(opts, opt, link);
  357 
  358                 if (auio->uio_segflg == UIO_SYSSPACE) {
  359                         bcopy(auio->uio_iov[i].iov_base, opt->name, namelen);
  360                 } else {
  361                         error = copyin(auio->uio_iov[i].iov_base, opt->name,
  362                             namelen);
  363                         if (error)
  364                                 goto bad;
  365                 }
  366                 /* Ensure names are null-terminated strings. */
  367                 if (namelen == 0 || opt->name[namelen - 1] != '\0') {
  368                         error = EINVAL;
  369                         goto bad;
  370                 }
  371                 if (optlen != 0) {
  372                         opt->len = optlen;
  373                         opt->value = malloc(optlen, M_MOUNT, M_WAITOK);
  374                         if (auio->uio_segflg == UIO_SYSSPACE) {
  375                                 bcopy(auio->uio_iov[i + 1].iov_base, opt->value,
  376                                     optlen);
  377                         } else {
  378                                 error = copyin(auio->uio_iov[i + 1].iov_base,
  379                                     opt->value, optlen);
  380                                 if (error)
  381                                         goto bad;
  382                         }
  383                 }
  384         }
  385         vfs_sanitizeopts(opts);
  386         *options = opts;
  387         return (0);
  388 bad:
  389         vfs_freeopts(opts);
  390         return (error);
  391 }
  392 
  393 /*
  394  * Merge the old mount options with the new ones passed
  395  * in the MNT_UPDATE case.
  396  *
  397  * XXX: This function will keep a "nofoo" option in the new
  398  * options.  E.g, if the option's canonical name is "foo",
  399  * "nofoo" ends up in the mount point's active options.
  400  */
  401 static void
  402 vfs_mergeopts(struct vfsoptlist *toopts, struct vfsoptlist *oldopts)
  403 {
  404         struct vfsopt *opt, *new;
  405 
  406         TAILQ_FOREACH(opt, oldopts, link) {
  407                 new = malloc(sizeof(struct vfsopt), M_MOUNT, M_WAITOK);
  408                 new->name = strdup(opt->name, M_MOUNT);
  409                 if (opt->len != 0) {
  410                         new->value = malloc(opt->len, M_MOUNT, M_WAITOK);
  411                         bcopy(opt->value, new->value, opt->len);
  412                 } else
  413                         new->value = NULL;
  414                 new->len = opt->len;
  415                 new->seen = opt->seen;
  416                 TAILQ_INSERT_HEAD(toopts, new, link);
  417         }
  418         vfs_sanitizeopts(toopts);
  419 }
  420 
  421 /*
  422  * Mount a filesystem.
  423  */
  424 #ifndef _SYS_SYSPROTO_H_
  425 struct nmount_args {
  426         struct iovec *iovp;
  427         unsigned int iovcnt;
  428         int flags;
  429 };
  430 #endif
  431 int
  432 sys_nmount(struct thread *td, struct nmount_args *uap)
  433 {
  434         struct uio *auio;
  435         int error;
  436         u_int iovcnt;
  437         uint64_t flags;
  438 
  439         /*
  440          * Mount flags are now 64-bits. On 32-bit archtectures only
  441          * 32-bits are passed in, but from here on everything handles
  442          * 64-bit flags correctly.
  443          */
  444         flags = uap->flags;
  445 
  446         AUDIT_ARG_FFLAGS(flags);
  447         CTR4(KTR_VFS, "%s: iovp %p with iovcnt %d and flags %d", __func__,
  448             uap->iovp, uap->iovcnt, flags);
  449 
  450         /*
  451          * Filter out MNT_ROOTFS.  We do not want clients of nmount() in
  452          * userspace to set this flag, but we must filter it out if we want
  453          * MNT_UPDATE on the root file system to work.
  454          * MNT_ROOTFS should only be set by the kernel when mounting its
  455          * root file system.
  456          */
  457         flags &= ~MNT_ROOTFS;
  458 
  459         iovcnt = uap->iovcnt;
  460         /*
  461          * Check that we have an even number of iovec's
  462          * and that we have at least two options.
  463          */
  464         if ((iovcnt & 1) || (iovcnt < 4)) {
  465                 CTR2(KTR_VFS, "%s: failed for invalid iovcnt %d", __func__,
  466                     uap->iovcnt);
  467                 return (EINVAL);
  468         }
  469 
  470         error = copyinuio(uap->iovp, iovcnt, &auio);
  471         if (error) {
  472                 CTR2(KTR_VFS, "%s: failed for invalid uio op with %d errno",
  473                     __func__, error);
  474                 return (error);
  475         }
  476         error = vfs_donmount(td, flags, auio);
  477 
  478         free(auio, M_IOV);
  479         return (error);
  480 }
  481 
  482 /*
  483  * ---------------------------------------------------------------------
  484  * Various utility functions
  485  */
  486 
  487 /*
  488  * Get a reference on a mount point from a vnode.
  489  *
  490  * The vnode is allowed to be passed unlocked and race against dooming. Note in
  491  * such case there are no guarantees the referenced mount point will still be
  492  * associated with it after the function returns.
  493  */
  494 struct mount *
  495 vfs_ref_from_vp(struct vnode *vp)
  496 {
  497         struct mount *mp;
  498         struct mount_pcpu *mpcpu;
  499 
  500         mp = atomic_load_ptr(&vp->v_mount);
  501         if (__predict_false(mp == NULL)) {
  502                 return (mp);
  503         }
  504         if (vfs_op_thread_enter(mp, mpcpu)) {
  505                 if (__predict_true(mp == vp->v_mount)) {
  506                         vfs_mp_count_add_pcpu(mpcpu, ref, 1);
  507                         vfs_op_thread_exit(mp, mpcpu);
  508                 } else {
  509                         vfs_op_thread_exit(mp, mpcpu);
  510                         mp = NULL;
  511                 }
  512         } else {
  513                 MNT_ILOCK(mp);
  514                 if (mp == vp->v_mount) {
  515                         MNT_REF(mp);
  516                         MNT_IUNLOCK(mp);
  517                 } else {
  518                         MNT_IUNLOCK(mp);
  519                         mp = NULL;
  520                 }
  521         }
  522         return (mp);
  523 }
  524 
  525 void
  526 vfs_ref(struct mount *mp)
  527 {
  528         struct mount_pcpu *mpcpu;
  529 
  530         CTR2(KTR_VFS, "%s: mp %p", __func__, mp);
  531         if (vfs_op_thread_enter(mp, mpcpu)) {
  532                 vfs_mp_count_add_pcpu(mpcpu, ref, 1);
  533                 vfs_op_thread_exit(mp, mpcpu);
  534                 return;
  535         }
  536 
  537         MNT_ILOCK(mp);
  538         MNT_REF(mp);
  539         MNT_IUNLOCK(mp);
  540 }
  541 
  542 /*
  543  * Register ump as an upper mount of the mount associated with
  544  * vnode vp.  This registration will be tracked through
  545  * mount_upper_node upper, which should be allocated by the
  546  * caller and stored in per-mount data associated with mp.
  547  *
  548  * If successful, this function will return the mount associated
  549  * with vp, and will ensure that it cannot be unmounted until
  550  * ump has been unregistered as one of its upper mounts.
  551  * 
  552  * Upon failure this function will return NULL.
  553  */
  554 struct mount *
  555 vfs_register_upper_from_vp(struct vnode *vp, struct mount *ump,
  556     struct mount_upper_node *upper)
  557 {
  558         struct mount *mp;
  559 
  560         mp = atomic_load_ptr(&vp->v_mount);
  561         if (mp == NULL)
  562                 return (NULL);
  563         MNT_ILOCK(mp);
  564         if (mp != vp->v_mount ||
  565             ((mp->mnt_kern_flag & (MNTK_UNMOUNT | MNTK_RECURSE)) != 0)) {
  566                 MNT_IUNLOCK(mp);
  567                 return (NULL);
  568         }
  569         KASSERT(ump != mp, ("upper and lower mounts are identical"));
  570         upper->mp = ump;
  571         MNT_REF(mp);
  572         TAILQ_INSERT_TAIL(&mp->mnt_uppers, upper, mnt_upper_link);
  573         MNT_IUNLOCK(mp);
  574         return (mp);
  575 }
  576 
  577 /*
  578  * Register upper mount ump to receive vnode unlink/reclaim
  579  * notifications from lower mount mp. This registration will
  580  * be tracked through mount_upper_node upper, which should be
  581  * allocated by the caller and stored in per-mount data
  582  * associated with mp.
  583  *
  584  * ump must already be registered as an upper mount of mp
  585  * through a call to vfs_register_upper_from_vp().
  586  */
  587 void
  588 vfs_register_for_notification(struct mount *mp, struct mount *ump,
  589     struct mount_upper_node *upper)
  590 {
  591         upper->mp = ump;
  592         MNT_ILOCK(mp);
  593         TAILQ_INSERT_TAIL(&mp->mnt_notify, upper, mnt_upper_link);
  594         MNT_IUNLOCK(mp);
  595 }
  596 
  597 static void
  598 vfs_drain_upper_locked(struct mount *mp)
  599 {
  600         mtx_assert(MNT_MTX(mp), MA_OWNED);
  601         while (mp->mnt_upper_pending != 0) {
  602                 mp->mnt_kern_flag |= MNTK_UPPER_WAITER;
  603                 msleep(&mp->mnt_uppers, MNT_MTX(mp), 0, "mntupw", 0);
  604         }
  605 }
  606 
  607 /*
  608  * Undo a previous call to vfs_register_for_notification().
  609  * The mount represented by upper must be currently registered
  610  * as an upper mount for mp.
  611  */
  612 void
  613 vfs_unregister_for_notification(struct mount *mp,
  614     struct mount_upper_node *upper)
  615 {
  616         MNT_ILOCK(mp);
  617         vfs_drain_upper_locked(mp);
  618         TAILQ_REMOVE(&mp->mnt_notify, upper, mnt_upper_link);
  619         MNT_IUNLOCK(mp);
  620 }
  621 
  622 /*
  623  * Undo a previous call to vfs_register_upper_from_vp().
  624  * This must be done before mp can be unmounted.
  625  */
  626 void
  627 vfs_unregister_upper(struct mount *mp, struct mount_upper_node *upper)
  628 {
  629         MNT_ILOCK(mp);
  630         KASSERT((mp->mnt_kern_flag & MNTK_UNMOUNT) == 0,
  631             ("registered upper with pending unmount"));
  632         vfs_drain_upper_locked(mp);
  633         TAILQ_REMOVE(&mp->mnt_uppers, upper, mnt_upper_link);
  634         if ((mp->mnt_kern_flag & MNTK_TASKQUEUE_WAITER) != 0 &&
  635             TAILQ_EMPTY(&mp->mnt_uppers)) {
  636                 mp->mnt_kern_flag &= ~MNTK_TASKQUEUE_WAITER;
  637                 wakeup(&mp->mnt_taskqueue_link);
  638         }
  639         MNT_REL(mp);
  640         MNT_IUNLOCK(mp);
  641 }
  642 
  643 void
  644 vfs_rel(struct mount *mp)
  645 {
  646         struct mount_pcpu *mpcpu;
  647 
  648         CTR2(KTR_VFS, "%s: mp %p", __func__, mp);
  649         if (vfs_op_thread_enter(mp, mpcpu)) {
  650                 vfs_mp_count_sub_pcpu(mpcpu, ref, 1);
  651                 vfs_op_thread_exit(mp, mpcpu);
  652                 return;
  653         }
  654 
  655         MNT_ILOCK(mp);
  656         MNT_REL(mp);
  657         MNT_IUNLOCK(mp);
  658 }
  659 
  660 /*
  661  * Allocate and initialize the mount point struct.
  662  */
  663 struct mount *
  664 vfs_mount_alloc(struct vnode *vp, struct vfsconf *vfsp, const char *fspath,
  665     struct ucred *cred)
  666 {
  667         struct mount *mp;
  668 
  669         mp = uma_zalloc(mount_zone, M_WAITOK);
  670         bzero(&mp->mnt_startzero,
  671             __rangeof(struct mount, mnt_startzero, mnt_endzero));
  672         mp->mnt_kern_flag = 0;
  673         mp->mnt_flag = 0;
  674         mp->mnt_rootvnode = NULL;
  675         mp->mnt_vnodecovered = NULL;
  676         mp->mnt_op = NULL;
  677         mp->mnt_vfc = NULL;
  678         TAILQ_INIT(&mp->mnt_nvnodelist);
  679         mp->mnt_nvnodelistsize = 0;
  680         TAILQ_INIT(&mp->mnt_lazyvnodelist);
  681         mp->mnt_lazyvnodelistsize = 0;
  682         MPPASS(mp->mnt_ref == 0 && mp->mnt_lockref == 0 &&
  683             mp->mnt_writeopcount == 0, mp);
  684         MPASSERT(mp->mnt_vfs_ops == 1, mp,
  685             ("vfs_ops should be 1 but %d found", mp->mnt_vfs_ops));
  686         (void) vfs_busy(mp, MBF_NOWAIT);
  687         atomic_add_acq_int(&vfsp->vfc_refcount, 1);
  688         mp->mnt_op = vfsp->vfc_vfsops;
  689         mp->mnt_vfc = vfsp;
  690         mp->mnt_stat.f_type = vfsp->vfc_typenum;
  691         mp->mnt_gen++;
  692         strlcpy(mp->mnt_stat.f_fstypename, vfsp->vfc_name, MFSNAMELEN);
  693         mp->mnt_vnodecovered = vp;
  694         mp->mnt_cred = crdup(cred);
  695         mp->mnt_stat.f_owner = cred->cr_uid;
  696         strlcpy(mp->mnt_stat.f_mntonname, fspath, MNAMELEN);
  697         mp->mnt_iosize_max = DFLTPHYS;
  698 #ifdef MAC
  699         mac_mount_init(mp);
  700         mac_mount_create(cred, mp);
  701 #endif
  702         arc4rand(&mp->mnt_hashseed, sizeof mp->mnt_hashseed, 0);
  703         mp->mnt_upper_pending = 0;
  704         TAILQ_INIT(&mp->mnt_uppers);
  705         TAILQ_INIT(&mp->mnt_notify);
  706         mp->mnt_taskqueue_flags = 0;
  707         mp->mnt_unmount_retries = 0;
  708         return (mp);
  709 }
  710 
  711 /*
  712  * Destroy the mount struct previously allocated by vfs_mount_alloc().
  713  */
  714 void
  715 vfs_mount_destroy(struct mount *mp)
  716 {
  717 
  718         MPPASS(mp->mnt_vfs_ops != 0, mp);
  719 
  720         vfs_assert_mount_counters(mp);
  721 
  722         MNT_ILOCK(mp);
  723         mp->mnt_kern_flag |= MNTK_REFEXPIRE;
  724         if (mp->mnt_kern_flag & MNTK_MWAIT) {
  725                 mp->mnt_kern_flag &= ~MNTK_MWAIT;
  726                 wakeup(mp);
  727         }
  728         while (mp->mnt_ref)
  729                 msleep(mp, MNT_MTX(mp), PVFS, "mntref", 0);
  730         KASSERT(mp->mnt_ref == 0,
  731             ("%s: invalid refcount in the drain path @ %s:%d", __func__,
  732             __FILE__, __LINE__));
  733         MPPASS(mp->mnt_writeopcount == 0, mp);
  734         MPPASS(mp->mnt_secondary_writes == 0, mp);
  735         atomic_subtract_rel_int(&mp->mnt_vfc->vfc_refcount, 1);
  736         if (!TAILQ_EMPTY(&mp->mnt_nvnodelist)) {
  737                 struct vnode *vp;
  738 
  739                 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes)
  740                         vn_printf(vp, "dangling vnode ");
  741                 panic("unmount: dangling vnode");
  742         }
  743         KASSERT(mp->mnt_upper_pending == 0, ("mnt_upper_pending"));
  744         KASSERT(TAILQ_EMPTY(&mp->mnt_uppers), ("mnt_uppers"));
  745         KASSERT(TAILQ_EMPTY(&mp->mnt_notify), ("mnt_notify"));
  746         MPPASS(mp->mnt_nvnodelistsize == 0, mp);
  747         MPPASS(mp->mnt_lazyvnodelistsize == 0, mp);
  748         MPPASS(mp->mnt_lockref == 0, mp);
  749         MNT_IUNLOCK(mp);
  750 
  751         MPASSERT(mp->mnt_vfs_ops == 1, mp,
  752             ("vfs_ops should be 1 but %d found", mp->mnt_vfs_ops));
  753 
  754         MPASSERT(mp->mnt_rootvnode == NULL, mp,
  755             ("mount point still has a root vnode %p", mp->mnt_rootvnode));
  756 
  757         if (mp->mnt_vnodecovered != NULL)
  758                 vrele(mp->mnt_vnodecovered);
  759 #ifdef MAC
  760         mac_mount_destroy(mp);
  761 #endif
  762         if (mp->mnt_opt != NULL)
  763                 vfs_freeopts(mp->mnt_opt);
  764         crfree(mp->mnt_cred);
  765         uma_zfree(mount_zone, mp);
  766 }
  767 
  768 static bool
  769 vfs_should_downgrade_to_ro_mount(uint64_t fsflags, int error)
  770 {
  771         /* This is an upgrade of an exisiting mount. */
  772         if ((fsflags & MNT_UPDATE) != 0)
  773                 return (false);
  774         /* This is already an R/O mount. */
  775         if ((fsflags & MNT_RDONLY) != 0)
  776                 return (false);
  777 
  778         switch (error) {
  779         case ENODEV:    /* generic, geom, ... */
  780         case EACCES:    /* cam/scsi, ... */
  781         case EROFS:     /* md, mmcsd, ... */
  782                 /*
  783                  * These errors can be returned by the storage layer to signal
  784                  * that the media is read-only.  No harm in the R/O mount
  785                  * attempt if the error was returned for some other reason.
  786                  */
  787                 return (true);
  788         default:
  789                 return (false);
  790         }
  791 }
  792 
  793 int
  794 vfs_donmount(struct thread *td, uint64_t fsflags, struct uio *fsoptions)
  795 {
  796         struct vfsoptlist *optlist;
  797         struct vfsopt *opt, *tmp_opt;
  798         char *fstype, *fspath, *errmsg;
  799         int error, fstypelen, fspathlen, errmsg_len, errmsg_pos;
  800         bool autoro;
  801 
  802         errmsg = fspath = NULL;
  803         errmsg_len = fspathlen = 0;
  804         errmsg_pos = -1;
  805         autoro = default_autoro;
  806 
  807         error = vfs_buildopts(fsoptions, &optlist);
  808         if (error)
  809                 return (error);
  810 
  811         if (vfs_getopt(optlist, "errmsg", (void **)&errmsg, &errmsg_len) == 0)
  812                 errmsg_pos = vfs_getopt_pos(optlist, "errmsg");
  813 
  814         /*
  815          * We need these two options before the others,
  816          * and they are mandatory for any filesystem.
  817          * Ensure they are NUL terminated as well.
  818          */
  819         fstypelen = 0;
  820         error = vfs_getopt(optlist, "fstype", (void **)&fstype, &fstypelen);
  821         if (error || fstypelen <= 0 || fstype[fstypelen - 1] != '\0') {
  822                 error = EINVAL;
  823                 if (errmsg != NULL)
  824                         strncpy(errmsg, "Invalid fstype", errmsg_len);
  825                 goto bail;
  826         }
  827         fspathlen = 0;
  828         error = vfs_getopt(optlist, "fspath", (void **)&fspath, &fspathlen);
  829         if (error || fspathlen <= 0 || fspath[fspathlen - 1] != '\0') {
  830                 error = EINVAL;
  831                 if (errmsg != NULL)
  832                         strncpy(errmsg, "Invalid fspath", errmsg_len);
  833                 goto bail;
  834         }
  835 
  836         /*
  837          * We need to see if we have the "update" option
  838          * before we call vfs_domount(), since vfs_domount() has special
  839          * logic based on MNT_UPDATE.  This is very important
  840          * when we want to update the root filesystem.
  841          */
  842         TAILQ_FOREACH_SAFE(opt, optlist, link, tmp_opt) {
  843                 int do_freeopt = 0;
  844 
  845                 if (strcmp(opt->name, "update") == 0) {
  846                         fsflags |= MNT_UPDATE;
  847                         do_freeopt = 1;
  848                 }
  849                 else if (strcmp(opt->name, "async") == 0)
  850                         fsflags |= MNT_ASYNC;
  851                 else if (strcmp(opt->name, "force") == 0) {
  852                         fsflags |= MNT_FORCE;
  853                         do_freeopt = 1;
  854                 }
  855                 else if (strcmp(opt->name, "reload") == 0) {
  856                         fsflags |= MNT_RELOAD;
  857                         do_freeopt = 1;
  858                 }
  859                 else if (strcmp(opt->name, "multilabel") == 0)
  860                         fsflags |= MNT_MULTILABEL;
  861                 else if (strcmp(opt->name, "noasync") == 0)
  862                         fsflags &= ~MNT_ASYNC;
  863                 else if (strcmp(opt->name, "noatime") == 0)
  864                         fsflags |= MNT_NOATIME;
  865                 else if (strcmp(opt->name, "atime") == 0) {
  866                         free(opt->name, M_MOUNT);
  867                         opt->name = strdup("nonoatime", M_MOUNT);
  868                 }
  869                 else if (strcmp(opt->name, "noclusterr") == 0)
  870                         fsflags |= MNT_NOCLUSTERR;
  871                 else if (strcmp(opt->name, "clusterr") == 0) {
  872                         free(opt->name, M_MOUNT);
  873                         opt->name = strdup("nonoclusterr", M_MOUNT);
  874                 }
  875                 else if (strcmp(opt->name, "noclusterw") == 0)
  876                         fsflags |= MNT_NOCLUSTERW;
  877                 else if (strcmp(opt->name, "clusterw") == 0) {
  878                         free(opt->name, M_MOUNT);
  879                         opt->name = strdup("nonoclusterw", M_MOUNT);
  880                 }
  881                 else if (strcmp(opt->name, "noexec") == 0)
  882                         fsflags |= MNT_NOEXEC;
  883                 else if (strcmp(opt->name, "exec") == 0) {
  884                         free(opt->name, M_MOUNT);
  885                         opt->name = strdup("nonoexec", M_MOUNT);
  886                 }
  887                 else if (strcmp(opt->name, "nosuid") == 0)
  888                         fsflags |= MNT_NOSUID;
  889                 else if (strcmp(opt->name, "suid") == 0) {
  890                         free(opt->name, M_MOUNT);
  891                         opt->name = strdup("nonosuid", M_MOUNT);
  892                 }
  893                 else if (strcmp(opt->name, "nosymfollow") == 0)
  894                         fsflags |= MNT_NOSYMFOLLOW;
  895                 else if (strcmp(opt->name, "symfollow") == 0) {
  896                         free(opt->name, M_MOUNT);
  897                         opt->name = strdup("nonosymfollow", M_MOUNT);
  898                 }
  899                 else if (strcmp(opt->name, "noro") == 0) {
  900                         fsflags &= ~MNT_RDONLY;
  901                         autoro = false;
  902                 }
  903                 else if (strcmp(opt->name, "rw") == 0) {
  904                         fsflags &= ~MNT_RDONLY;
  905                         autoro = false;
  906                 }
  907                 else if (strcmp(opt->name, "ro") == 0) {
  908                         fsflags |= MNT_RDONLY;
  909                         autoro = false;
  910                 }
  911                 else if (strcmp(opt->name, "rdonly") == 0) {
  912                         free(opt->name, M_MOUNT);
  913                         opt->name = strdup("ro", M_MOUNT);
  914                         fsflags |= MNT_RDONLY;
  915                         autoro = false;
  916                 }
  917                 else if (strcmp(opt->name, "autoro") == 0) {
  918                         do_freeopt = 1;
  919                         autoro = true;
  920                 }
  921                 else if (strcmp(opt->name, "suiddir") == 0)
  922                         fsflags |= MNT_SUIDDIR;
  923                 else if (strcmp(opt->name, "sync") == 0)
  924                         fsflags |= MNT_SYNCHRONOUS;
  925                 else if (strcmp(opt->name, "union") == 0)
  926                         fsflags |= MNT_UNION;
  927                 else if (strcmp(opt->name, "export") == 0)
  928                         fsflags |= MNT_EXPORTED;
  929                 else if (strcmp(opt->name, "automounted") == 0) {
  930                         fsflags |= MNT_AUTOMOUNTED;
  931                         do_freeopt = 1;
  932                 } else if (strcmp(opt->name, "nocover") == 0) {
  933                         fsflags |= MNT_NOCOVER;
  934                         do_freeopt = 1;
  935                 } else if (strcmp(opt->name, "cover") == 0) {
  936                         fsflags &= ~MNT_NOCOVER;
  937                         do_freeopt = 1;
  938                 } else if (strcmp(opt->name, "emptydir") == 0) {
  939                         fsflags |= MNT_EMPTYDIR;
  940                         do_freeopt = 1;
  941                 } else if (strcmp(opt->name, "noemptydir") == 0) {
  942                         fsflags &= ~MNT_EMPTYDIR;
  943                         do_freeopt = 1;
  944                 }
  945                 if (do_freeopt)
  946                         vfs_freeopt(optlist, opt);
  947         }
  948 
  949         /*
  950          * Be ultra-paranoid about making sure the type and fspath
  951          * variables will fit in our mp buffers, including the
  952          * terminating NUL.
  953          */
  954         if (fstypelen > MFSNAMELEN || fspathlen > MNAMELEN) {
  955                 error = ENAMETOOLONG;
  956                 goto bail;
  957         }
  958 
  959         error = vfs_domount(td, fstype, fspath, fsflags, &optlist);
  960         if (error == ENOENT) {
  961                 error = EINVAL;
  962                 if (errmsg != NULL)
  963                         strncpy(errmsg, "Invalid fstype", errmsg_len);
  964                 goto bail;
  965         }
  966 
  967         /*
  968          * See if we can mount in the read-only mode if the error code suggests
  969          * that it could be possible and the mount options allow for that.
  970          * Never try it if "[no]{ro|rw}" has been explicitly requested and not
  971          * overridden by "autoro".
  972          */
  973         if (autoro && vfs_should_downgrade_to_ro_mount(fsflags, error)) {
  974                 printf("%s: R/W mount failed, possibly R/O media,"
  975                     " trying R/O mount\n", __func__);
  976                 fsflags |= MNT_RDONLY;
  977                 error = vfs_domount(td, fstype, fspath, fsflags, &optlist);
  978         }
  979 bail:
  980         /* copyout the errmsg */
  981         if (errmsg_pos != -1 && ((2 * errmsg_pos + 1) < fsoptions->uio_iovcnt)
  982             && errmsg_len > 0 && errmsg != NULL) {
  983                 if (fsoptions->uio_segflg == UIO_SYSSPACE) {
  984                         bcopy(errmsg,
  985                             fsoptions->uio_iov[2 * errmsg_pos + 1].iov_base,
  986                             fsoptions->uio_iov[2 * errmsg_pos + 1].iov_len);
  987                 } else {
  988                         copyout(errmsg,
  989                             fsoptions->uio_iov[2 * errmsg_pos + 1].iov_base,
  990                             fsoptions->uio_iov[2 * errmsg_pos + 1].iov_len);
  991                 }
  992         }
  993 
  994         if (optlist != NULL)
  995                 vfs_freeopts(optlist);
  996         return (error);
  997 }
  998 
  999 /*
 1000  * Old mount API.
 1001  */
 1002 #ifndef _SYS_SYSPROTO_H_
 1003 struct mount_args {
 1004         char    *type;
 1005         char    *path;
 1006         int     flags;
 1007         caddr_t data;
 1008 };
 1009 #endif
 1010 /* ARGSUSED */
 1011 int
 1012 sys_mount(struct thread *td, struct mount_args *uap)
 1013 {
 1014         char *fstype;
 1015         struct vfsconf *vfsp = NULL;
 1016         struct mntarg *ma = NULL;
 1017         uint64_t flags;
 1018         int error;
 1019 
 1020         /*
 1021          * Mount flags are now 64-bits. On 32-bit architectures only
 1022          * 32-bits are passed in, but from here on everything handles
 1023          * 64-bit flags correctly.
 1024          */
 1025         flags = uap->flags;
 1026 
 1027         AUDIT_ARG_FFLAGS(flags);
 1028 
 1029         /*
 1030          * Filter out MNT_ROOTFS.  We do not want clients of mount() in
 1031          * userspace to set this flag, but we must filter it out if we want
 1032          * MNT_UPDATE on the root file system to work.
 1033          * MNT_ROOTFS should only be set by the kernel when mounting its
 1034          * root file system.
 1035          */
 1036         flags &= ~MNT_ROOTFS;
 1037 
 1038         fstype = malloc(MFSNAMELEN, M_TEMP, M_WAITOK);
 1039         error = copyinstr(uap->type, fstype, MFSNAMELEN, NULL);
 1040         if (error) {
 1041                 free(fstype, M_TEMP);
 1042                 return (error);
 1043         }
 1044 
 1045         AUDIT_ARG_TEXT(fstype);
 1046         vfsp = vfs_byname_kld(fstype, td, &error);
 1047         free(fstype, M_TEMP);
 1048         if (vfsp == NULL)
 1049                 return (ENOENT);
 1050         if (((vfsp->vfc_flags & VFCF_SBDRY) != 0 &&
 1051             vfsp->vfc_vfsops_sd->vfs_cmount == NULL) ||
 1052             ((vfsp->vfc_flags & VFCF_SBDRY) == 0 &&
 1053             vfsp->vfc_vfsops->vfs_cmount == NULL))
 1054                 return (EOPNOTSUPP);
 1055 
 1056         ma = mount_argsu(ma, "fstype", uap->type, MFSNAMELEN);
 1057         ma = mount_argsu(ma, "fspath", uap->path, MNAMELEN);
 1058         ma = mount_argb(ma, flags & MNT_RDONLY, "noro");
 1059         ma = mount_argb(ma, !(flags & MNT_NOSUID), "nosuid");
 1060         ma = mount_argb(ma, !(flags & MNT_NOEXEC), "noexec");
 1061 
 1062         if ((vfsp->vfc_flags & VFCF_SBDRY) != 0)
 1063                 return (vfsp->vfc_vfsops_sd->vfs_cmount(ma, uap->data, flags));
 1064         return (vfsp->vfc_vfsops->vfs_cmount(ma, uap->data, flags));
 1065 }
 1066 
 1067 /*
 1068  * vfs_domount_first(): first file system mount (not update)
 1069  */
 1070 static int
 1071 vfs_domount_first(
 1072         struct thread *td,              /* Calling thread. */
 1073         struct vfsconf *vfsp,           /* File system type. */
 1074         char *fspath,                   /* Mount path. */
 1075         struct vnode *vp,               /* Vnode to be covered. */
 1076         uint64_t fsflags,               /* Flags common to all filesystems. */
 1077         struct vfsoptlist **optlist     /* Options local to the filesystem. */
 1078         )
 1079 {
 1080         struct vattr va;
 1081         struct mount *mp;
 1082         struct vnode *newdp, *rootvp;
 1083         int error, error1;
 1084         bool unmounted;
 1085 
 1086         ASSERT_VOP_ELOCKED(vp, __func__);
 1087         KASSERT((fsflags & MNT_UPDATE) == 0, ("MNT_UPDATE shouldn't be here"));
 1088 
 1089         /*
 1090          * If the jail of the calling thread lacks permission for this type of
 1091          * file system, or is trying to cover its own root, deny immediately.
 1092          */
 1093         if (jailed(td->td_ucred) && (!prison_allow(td->td_ucred,
 1094             vfsp->vfc_prison_flag) || vp == td->td_ucred->cr_prison->pr_root)) {
 1095                 vput(vp);
 1096                 return (EPERM);
 1097         }
 1098 
 1099         /*
 1100          * If the user is not root, ensure that they own the directory
 1101          * onto which we are attempting to mount.
 1102          */
 1103         error = VOP_GETATTR(vp, &va, td->td_ucred);
 1104         if (error == 0 && va.va_uid != td->td_ucred->cr_uid)
 1105                 error = priv_check_cred(td->td_ucred, PRIV_VFS_ADMIN);
 1106         if (error == 0)
 1107                 error = vinvalbuf(vp, V_SAVE, 0, 0);
 1108         if (vfsp->vfc_flags & VFCF_FILEMOUNT) {
 1109                 if (error == 0 && vp->v_type != VDIR && vp->v_type != VREG)
 1110                         error = EINVAL;
 1111                 /*
 1112                  * For file mounts, ensure that there is only one hardlink to the file.
 1113                  */
 1114                 if (error == 0 && vp->v_type == VREG && va.va_nlink != 1)
 1115                         error = EINVAL;
 1116         } else {
 1117                 if (error == 0 && vp->v_type != VDIR)
 1118                         error = ENOTDIR;
 1119         }
 1120         if (error == 0 && (fsflags & MNT_EMPTYDIR) != 0)
 1121                 error = vfs_emptydir(vp);
 1122         if (error == 0) {
 1123                 VI_LOCK(vp);
 1124                 if ((vp->v_iflag & VI_MOUNT) == 0 && vp->v_mountedhere == NULL)
 1125                         vp->v_iflag |= VI_MOUNT;
 1126                 else
 1127                         error = EBUSY;
 1128                 VI_UNLOCK(vp);
 1129         }
 1130         if (error != 0) {
 1131                 vput(vp);
 1132                 return (error);
 1133         }
 1134         vn_seqc_write_begin(vp);
 1135         VOP_UNLOCK(vp);
 1136 
 1137         /* Allocate and initialize the filesystem. */
 1138         mp = vfs_mount_alloc(vp, vfsp, fspath, td->td_ucred);
 1139         /* XXXMAC: pass to vfs_mount_alloc? */
 1140         mp->mnt_optnew = *optlist;
 1141         /* Set the mount level flags. */
 1142         mp->mnt_flag = (fsflags &
 1143             (MNT_UPDATEMASK | MNT_ROOTFS | MNT_RDONLY | MNT_FORCE));
 1144 
 1145         /*
 1146          * Mount the filesystem.
 1147          * XXX The final recipients of VFS_MOUNT just overwrite the ndp they
 1148          * get.  No freeing of cn_pnbuf.
 1149          */
 1150         error1 = 0;
 1151         unmounted = true;
 1152         if ((error = VFS_MOUNT(mp)) != 0 ||
 1153             (error1 = VFS_STATFS(mp, &mp->mnt_stat)) != 0 ||
 1154             (error1 = VFS_ROOT(mp, LK_EXCLUSIVE, &newdp)) != 0) {
 1155                 rootvp = NULL;
 1156                 if (error1 != 0) {
 1157                         MPASS(error == 0);
 1158                         rootvp = vfs_cache_root_clear(mp);
 1159                         if (rootvp != NULL) {
 1160                                 vhold(rootvp);
 1161                                 vrele(rootvp);
 1162                         }
 1163                         (void)vn_start_write(NULL, &mp, V_WAIT);
 1164                         MNT_ILOCK(mp);
 1165                         mp->mnt_kern_flag |= MNTK_UNMOUNT | MNTK_UNMOUNTF;
 1166                         MNT_IUNLOCK(mp);
 1167                         VFS_PURGE(mp);
 1168                         error = VFS_UNMOUNT(mp, 0);
 1169                         vn_finished_write(mp);
 1170                         if (error != 0) {
 1171                                 printf(
 1172                     "failed post-mount (%d): rollback unmount returned %d\n",
 1173                                     error1, error);
 1174                                 unmounted = false;
 1175                         }
 1176                         error = error1;
 1177                 }
 1178                 vfs_unbusy(mp);
 1179                 mp->mnt_vnodecovered = NULL;
 1180                 if (unmounted) {
 1181                         /* XXXKIB wait for mnt_lockref drain? */
 1182                         vfs_mount_destroy(mp);
 1183                 }
 1184                 VI_LOCK(vp);
 1185                 vp->v_iflag &= ~VI_MOUNT;
 1186                 VI_UNLOCK(vp);
 1187                 if (rootvp != NULL) {
 1188                         vn_seqc_write_end(rootvp);
 1189                         vdrop(rootvp);
 1190                 }
 1191                 vn_seqc_write_end(vp);
 1192                 vrele(vp);
 1193                 return (error);
 1194         }
 1195         vn_seqc_write_begin(newdp);
 1196         VOP_UNLOCK(newdp);
 1197 
 1198         if (mp->mnt_opt != NULL)
 1199                 vfs_freeopts(mp->mnt_opt);
 1200         mp->mnt_opt = mp->mnt_optnew;
 1201         *optlist = NULL;
 1202 
 1203         /*
 1204          * Prevent external consumers of mount options from reading mnt_optnew.
 1205          */
 1206         mp->mnt_optnew = NULL;
 1207 
 1208         MNT_ILOCK(mp);
 1209         if ((mp->mnt_flag & MNT_ASYNC) != 0 &&
 1210             (mp->mnt_kern_flag & MNTK_NOASYNC) == 0)
 1211                 mp->mnt_kern_flag |= MNTK_ASYNC;
 1212         else
 1213                 mp->mnt_kern_flag &= ~MNTK_ASYNC;
 1214         MNT_IUNLOCK(mp);
 1215 
 1216         /*
 1217          * VIRF_MOUNTPOINT and v_mountedhere need to be set under the
 1218          * vp lock to satisfy vfs_lookup() requirements.
 1219          */
 1220         VOP_LOCK(vp, LK_EXCLUSIVE | LK_RETRY);
 1221         VI_LOCK(vp);
 1222         vn_irflag_set_locked(vp, VIRF_MOUNTPOINT);
 1223         vp->v_mountedhere = mp;
 1224         VI_UNLOCK(vp);
 1225         VOP_UNLOCK(vp);
 1226         cache_purge(vp);
 1227 
 1228         /*
 1229          * We need to lock both vnodes.
 1230          *
 1231          * Use vn_lock_pair to avoid establishing an ordering between vnodes
 1232          * from different filesystems.
 1233          */
 1234         vn_lock_pair(vp, false, newdp, false);
 1235 
 1236         VI_LOCK(vp);
 1237         vp->v_iflag &= ~VI_MOUNT;
 1238         VI_UNLOCK(vp);
 1239         /* Place the new filesystem at the end of the mount list. */
 1240         mtx_lock(&mountlist_mtx);
 1241         TAILQ_INSERT_TAIL(&mountlist, mp, mnt_list);
 1242         mtx_unlock(&mountlist_mtx);
 1243         vfs_event_signal(NULL, VQ_MOUNT, 0);
 1244         VOP_UNLOCK(vp);
 1245         EVENTHANDLER_DIRECT_INVOKE(vfs_mounted, mp, newdp, td);
 1246         VOP_UNLOCK(newdp);
 1247         mount_devctl_event("MOUNT", mp, false);
 1248         mountcheckdirs(vp, newdp);
 1249         vn_seqc_write_end(vp);
 1250         vn_seqc_write_end(newdp);
 1251         vrele(newdp);
 1252         if ((mp->mnt_flag & MNT_RDONLY) == 0)
 1253                 vfs_allocate_syncvnode(mp);
 1254         vfs_op_exit(mp);
 1255         vfs_unbusy(mp);
 1256         return (0);
 1257 }
 1258 
 1259 /*
 1260  * vfs_domount_update(): update of mounted file system
 1261  */
 1262 static int
 1263 vfs_domount_update(
 1264         struct thread *td,              /* Calling thread. */
 1265         struct vnode *vp,               /* Mount point vnode. */
 1266         uint64_t fsflags,               /* Flags common to all filesystems. */
 1267         struct vfsoptlist **optlist     /* Options local to the filesystem. */
 1268         )
 1269 {
 1270         struct export_args export;
 1271         struct o2export_args o2export;
 1272         struct vnode *rootvp;
 1273         void *bufp;
 1274         struct mount *mp;
 1275         int error, export_error, i, len;
 1276         uint64_t flag;
 1277         gid_t *grps;
 1278 
 1279         ASSERT_VOP_ELOCKED(vp, __func__);
 1280         KASSERT((fsflags & MNT_UPDATE) != 0, ("MNT_UPDATE should be here"));
 1281         mp = vp->v_mount;
 1282 
 1283         if ((vp->v_vflag & VV_ROOT) == 0) {
 1284                 if (vfs_copyopt(*optlist, "export", &export, sizeof(export))
 1285                     == 0)
 1286                         error = EXDEV;
 1287                 else
 1288                         error = EINVAL;
 1289                 vput(vp);
 1290                 return (error);
 1291         }
 1292 
 1293         /*
 1294          * We only allow the filesystem to be reloaded if it
 1295          * is currently mounted read-only.
 1296          */
 1297         flag = mp->mnt_flag;
 1298         if ((fsflags & MNT_RELOAD) != 0 && (flag & MNT_RDONLY) == 0) {
 1299                 vput(vp);
 1300                 return (EOPNOTSUPP);    /* Needs translation */
 1301         }
 1302         /*
 1303          * Only privileged root, or (if MNT_USER is set) the user that
 1304          * did the original mount is permitted to update it.
 1305          */
 1306         error = vfs_suser(mp, td);
 1307         if (error != 0) {
 1308                 vput(vp);
 1309                 return (error);
 1310         }
 1311         if (vfs_busy(mp, MBF_NOWAIT)) {
 1312                 vput(vp);
 1313                 return (EBUSY);
 1314         }
 1315         VI_LOCK(vp);
 1316         if ((vp->v_iflag & VI_MOUNT) != 0 || vp->v_mountedhere != NULL) {
 1317                 VI_UNLOCK(vp);
 1318                 vfs_unbusy(mp);
 1319                 vput(vp);
 1320                 return (EBUSY);
 1321         }
 1322         vp->v_iflag |= VI_MOUNT;
 1323         VI_UNLOCK(vp);
 1324         VOP_UNLOCK(vp);
 1325 
 1326         vfs_op_enter(mp);
 1327         vn_seqc_write_begin(vp);
 1328 
 1329         rootvp = NULL;
 1330         MNT_ILOCK(mp);
 1331         if ((mp->mnt_kern_flag & MNTK_UNMOUNT) != 0) {
 1332                 MNT_IUNLOCK(mp);
 1333                 error = EBUSY;
 1334                 goto end;
 1335         }
 1336         mp->mnt_flag &= ~MNT_UPDATEMASK;
 1337         mp->mnt_flag |= fsflags & (MNT_RELOAD | MNT_FORCE | MNT_UPDATE |
 1338             MNT_SNAPSHOT | MNT_ROOTFS | MNT_UPDATEMASK | MNT_RDONLY);
 1339         if ((mp->mnt_flag & MNT_ASYNC) == 0)
 1340                 mp->mnt_kern_flag &= ~MNTK_ASYNC;
 1341         rootvp = vfs_cache_root_clear(mp);
 1342         MNT_IUNLOCK(mp);
 1343         mp->mnt_optnew = *optlist;
 1344         vfs_mergeopts(mp->mnt_optnew, mp->mnt_opt);
 1345 
 1346         /*
 1347          * Mount the filesystem.
 1348          * XXX The final recipients of VFS_MOUNT just overwrite the ndp they
 1349          * get.  No freeing of cn_pnbuf.
 1350          */
 1351         error = VFS_MOUNT(mp);
 1352 
 1353         export_error = 0;
 1354         /* Process the export option. */
 1355         if (error == 0 && vfs_getopt(mp->mnt_optnew, "export", &bufp,
 1356             &len) == 0) {
 1357                 /* Assume that there is only 1 ABI for each length. */
 1358                 switch (len) {
 1359                 case (sizeof(struct oexport_args)):
 1360                         bzero(&o2export, sizeof(o2export));
 1361                         /* FALLTHROUGH */
 1362                 case (sizeof(o2export)):
 1363                         bcopy(bufp, &o2export, len);
 1364                         export.ex_flags = (uint64_t)o2export.ex_flags;
 1365                         export.ex_root = o2export.ex_root;
 1366                         export.ex_uid = o2export.ex_anon.cr_uid;
 1367                         export.ex_groups = NULL;
 1368                         export.ex_ngroups = o2export.ex_anon.cr_ngroups;
 1369                         if (export.ex_ngroups > 0) {
 1370                                 if (export.ex_ngroups <= XU_NGROUPS) {
 1371                                         export.ex_groups = malloc(
 1372                                             export.ex_ngroups * sizeof(gid_t),
 1373                                             M_TEMP, M_WAITOK);
 1374                                         for (i = 0; i < export.ex_ngroups; i++)
 1375                                                 export.ex_groups[i] =
 1376                                                   o2export.ex_anon.cr_groups[i];
 1377                                 } else
 1378                                         export_error = EINVAL;
 1379                         } else if (export.ex_ngroups < 0)
 1380                                 export_error = EINVAL;
 1381                         export.ex_addr = o2export.ex_addr;
 1382                         export.ex_addrlen = o2export.ex_addrlen;
 1383                         export.ex_mask = o2export.ex_mask;
 1384                         export.ex_masklen = o2export.ex_masklen;
 1385                         export.ex_indexfile = o2export.ex_indexfile;
 1386                         export.ex_numsecflavors = o2export.ex_numsecflavors;
 1387                         if (export.ex_numsecflavors < MAXSECFLAVORS) {
 1388                                 for (i = 0; i < export.ex_numsecflavors; i++)
 1389                                         export.ex_secflavors[i] =
 1390                                             o2export.ex_secflavors[i];
 1391                         } else
 1392                                 export_error = EINVAL;
 1393                         if (export_error == 0)
 1394                                 export_error = vfs_export(mp, &export);
 1395                         free(export.ex_groups, M_TEMP);
 1396                         break;
 1397                 case (sizeof(export)):
 1398                         bcopy(bufp, &export, len);
 1399                         grps = NULL;
 1400                         if (export.ex_ngroups > 0) {
 1401                                 if (export.ex_ngroups <= NGROUPS_MAX) {
 1402                                         grps = malloc(export.ex_ngroups *
 1403                                             sizeof(gid_t), M_TEMP, M_WAITOK);
 1404                                         export_error = copyin(export.ex_groups,
 1405                                             grps, export.ex_ngroups *
 1406                                             sizeof(gid_t));
 1407                                         if (export_error == 0)
 1408                                                 export.ex_groups = grps;
 1409                                 } else
 1410                                         export_error = EINVAL;
 1411                         } else if (export.ex_ngroups == 0)
 1412                                 export.ex_groups = NULL;
 1413                         else
 1414                                 export_error = EINVAL;
 1415                         if (export_error == 0)
 1416                                 export_error = vfs_export(mp, &export);
 1417                         free(grps, M_TEMP);
 1418                         break;
 1419                 default:
 1420                         export_error = EINVAL;
 1421                         break;
 1422                 }
 1423         }
 1424 
 1425         MNT_ILOCK(mp);
 1426         if (error == 0) {
 1427                 mp->mnt_flag &= ~(MNT_UPDATE | MNT_RELOAD | MNT_FORCE |
 1428                     MNT_SNAPSHOT);
 1429         } else {
 1430                 /*
 1431                  * If we fail, restore old mount flags. MNT_QUOTA is special,
 1432                  * because it is not part of MNT_UPDATEMASK, but it could have
 1433                  * changed in the meantime if quotactl(2) was called.
 1434                  * All in all we want current value of MNT_QUOTA, not the old
 1435                  * one.
 1436                  */
 1437                 mp->mnt_flag = (mp->mnt_flag & MNT_QUOTA) | (flag & ~MNT_QUOTA);
 1438         }
 1439         if ((mp->mnt_flag & MNT_ASYNC) != 0 &&
 1440             (mp->mnt_kern_flag & MNTK_NOASYNC) == 0)
 1441                 mp->mnt_kern_flag |= MNTK_ASYNC;
 1442         else
 1443                 mp->mnt_kern_flag &= ~MNTK_ASYNC;
 1444         MNT_IUNLOCK(mp);
 1445 
 1446         if (error != 0)
 1447                 goto end;
 1448 
 1449         mount_devctl_event("REMOUNT", mp, true);
 1450         if (mp->mnt_opt != NULL)
 1451                 vfs_freeopts(mp->mnt_opt);
 1452         mp->mnt_opt = mp->mnt_optnew;
 1453         *optlist = NULL;
 1454         (void)VFS_STATFS(mp, &mp->mnt_stat);
 1455         /*
 1456          * Prevent external consumers of mount options from reading
 1457          * mnt_optnew.
 1458          */
 1459         mp->mnt_optnew = NULL;
 1460 
 1461         if ((mp->mnt_flag & MNT_RDONLY) == 0)
 1462                 vfs_allocate_syncvnode(mp);
 1463         else
 1464                 vfs_deallocate_syncvnode(mp);
 1465 end:
 1466         vfs_op_exit(mp);
 1467         if (rootvp != NULL) {
 1468                 vn_seqc_write_end(rootvp);
 1469                 vrele(rootvp);
 1470         }
 1471         vn_seqc_write_end(vp);
 1472         vfs_unbusy(mp);
 1473         VI_LOCK(vp);
 1474         vp->v_iflag &= ~VI_MOUNT;
 1475         VI_UNLOCK(vp);
 1476         vrele(vp);
 1477         return (error != 0 ? error : export_error);
 1478 }
 1479 
 1480 /*
 1481  * vfs_domount(): actually attempt a filesystem mount.
 1482  */
 1483 static int
 1484 vfs_domount(
 1485         struct thread *td,              /* Calling thread. */
 1486         const char *fstype,             /* Filesystem type. */
 1487         char *fspath,                   /* Mount path. */
 1488         uint64_t fsflags,               /* Flags common to all filesystems. */
 1489         struct vfsoptlist **optlist     /* Options local to the filesystem. */
 1490         )
 1491 {
 1492         struct vfsconf *vfsp;
 1493         struct nameidata nd;
 1494         struct vnode *vp;
 1495         char *pathbuf;
 1496         int error;
 1497 
 1498         /*
 1499          * Be ultra-paranoid about making sure the type and fspath
 1500          * variables will fit in our mp buffers, including the
 1501          * terminating NUL.
 1502          */
 1503         if (strlen(fstype) >= MFSNAMELEN || strlen(fspath) >= MNAMELEN)
 1504                 return (ENAMETOOLONG);
 1505 
 1506         if (jailed(td->td_ucred) || usermount == 0) {
 1507                 if ((error = priv_check(td, PRIV_VFS_MOUNT)) != 0)
 1508                         return (error);
 1509         }
 1510 
 1511         /*
 1512          * Do not allow NFS export or MNT_SUIDDIR by unprivileged users.
 1513          */
 1514         if (fsflags & MNT_EXPORTED) {
 1515                 error = priv_check(td, PRIV_VFS_MOUNT_EXPORTED);
 1516                 if (error)
 1517                         return (error);
 1518         }
 1519         if (fsflags & MNT_SUIDDIR) {
 1520                 error = priv_check(td, PRIV_VFS_MOUNT_SUIDDIR);
 1521                 if (error)
 1522                         return (error);
 1523         }
 1524         /*
 1525          * Silently enforce MNT_NOSUID and MNT_USER for unprivileged users.
 1526          */
 1527         if ((fsflags & (MNT_NOSUID | MNT_USER)) != (MNT_NOSUID | MNT_USER)) {
 1528                 if (priv_check(td, PRIV_VFS_MOUNT_NONUSER) != 0)
 1529                         fsflags |= MNT_NOSUID | MNT_USER;
 1530         }
 1531 
 1532         /* Load KLDs before we lock the covered vnode to avoid reversals. */
 1533         vfsp = NULL;
 1534         if ((fsflags & MNT_UPDATE) == 0) {
 1535                 /* Don't try to load KLDs if we're mounting the root. */
 1536                 if (fsflags & MNT_ROOTFS) {
 1537                         if ((vfsp = vfs_byname(fstype)) == NULL)
 1538                                 return (ENODEV);
 1539                 } else {
 1540                         if ((vfsp = vfs_byname_kld(fstype, td, &error)) == NULL)
 1541                                 return (error);
 1542                 }
 1543         }
 1544 
 1545         /*
 1546          * Get vnode to be covered or mount point's vnode in case of MNT_UPDATE.
 1547          */
 1548         NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF | AUDITVNODE1 | WANTPARENT,
 1549             UIO_SYSSPACE, fspath);
 1550         error = namei(&nd);
 1551         if (error != 0)
 1552                 return (error);
 1553         vp = nd.ni_vp;
 1554         /*
 1555          * Don't allow stacking file mounts to work around problems with the way
 1556          * that namei sets nd.ni_dvp to vp_crossmp for these.
 1557          */
 1558         if (vp->v_type == VREG)
 1559                 fsflags |= MNT_NOCOVER;
 1560         if ((fsflags & MNT_UPDATE) == 0) {
 1561                 if ((vp->v_vflag & VV_ROOT) != 0 &&
 1562                     (fsflags & MNT_NOCOVER) != 0) {
 1563                         vput(vp);
 1564                         error = EBUSY;
 1565                         goto out;
 1566                 }
 1567                 pathbuf = malloc(MNAMELEN, M_TEMP, M_WAITOK);
 1568                 strcpy(pathbuf, fspath);
 1569                 /*
 1570                  * Note: we allow any vnode type here. If the path sanity check
 1571                  * succeeds, the type will be validated in vfs_domount_first
 1572                  * above.
 1573                  */
 1574                 if (vp->v_type == VDIR)
 1575                         error = vn_path_to_global_path(td, vp, pathbuf,
 1576                             MNAMELEN);
 1577                 else
 1578                         error = vn_path_to_global_path_hardlink(td, vp,
 1579                             nd.ni_dvp, pathbuf, MNAMELEN,
 1580                             nd.ni_cnd.cn_nameptr, nd.ni_cnd.cn_namelen);
 1581                 if (error == 0) {
 1582                         error = vfs_domount_first(td, vfsp, pathbuf, vp,
 1583                             fsflags, optlist);
 1584                 }
 1585                 free(pathbuf, M_TEMP);
 1586         } else
 1587                 error = vfs_domount_update(td, vp, fsflags, optlist);
 1588 
 1589 out:
 1590         NDFREE_PNBUF(&nd);
 1591         vrele(nd.ni_dvp);
 1592 
 1593         return (error);
 1594 }
 1595 
 1596 /*
 1597  * Unmount a filesystem.
 1598  *
 1599  * Note: unmount takes a path to the vnode mounted on as argument, not
 1600  * special file (as before).
 1601  */
 1602 #ifndef _SYS_SYSPROTO_H_
 1603 struct unmount_args {
 1604         char    *path;
 1605         int     flags;
 1606 };
 1607 #endif
 1608 /* ARGSUSED */
 1609 int
 1610 sys_unmount(struct thread *td, struct unmount_args *uap)
 1611 {
 1612 
 1613         return (kern_unmount(td, uap->path, uap->flags));
 1614 }
 1615 
 1616 int
 1617 kern_unmount(struct thread *td, const char *path, int flags)
 1618 {
 1619         struct nameidata nd;
 1620         struct mount *mp;
 1621         char *fsidbuf, *pathbuf;
 1622         fsid_t fsid;
 1623         int error;
 1624 
 1625         AUDIT_ARG_VALUE(flags);
 1626         if (jailed(td->td_ucred) || usermount == 0) {
 1627                 error = priv_check(td, PRIV_VFS_UNMOUNT);
 1628                 if (error)
 1629                         return (error);
 1630         }
 1631 
 1632         if (flags & MNT_BYFSID) {
 1633                 fsidbuf = malloc(MNAMELEN, M_TEMP, M_WAITOK);
 1634                 error = copyinstr(path, fsidbuf, MNAMELEN, NULL);
 1635                 if (error) {
 1636                         free(fsidbuf, M_TEMP);
 1637                         return (error);
 1638                 }
 1639 
 1640                 AUDIT_ARG_TEXT(fsidbuf);
 1641                 /* Decode the filesystem ID. */
 1642                 if (sscanf(fsidbuf, "FSID:%d:%d", &fsid.val[0], &fsid.val[1]) != 2) {
 1643                         free(fsidbuf, M_TEMP);
 1644                         return (EINVAL);
 1645                 }
 1646 
 1647                 mp = vfs_getvfs(&fsid);
 1648                 free(fsidbuf, M_TEMP);
 1649                 if (mp == NULL) {
 1650                         return (ENOENT);
 1651                 }
 1652         } else {
 1653                 pathbuf = malloc(MNAMELEN, M_TEMP, M_WAITOK);
 1654                 error = copyinstr(path, pathbuf, MNAMELEN, NULL);
 1655                 if (error) {
 1656                         free(pathbuf, M_TEMP);
 1657                         return (error);
 1658                 }
 1659 
 1660                 /*
 1661                  * Try to find global path for path argument.
 1662                  */
 1663                 NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF | AUDITVNODE1,
 1664                     UIO_SYSSPACE, pathbuf);
 1665                 if (namei(&nd) == 0) {
 1666                         NDFREE_PNBUF(&nd);
 1667                         error = vn_path_to_global_path(td, nd.ni_vp, pathbuf,
 1668                             MNAMELEN);
 1669                         if (error == 0)
 1670                                 vput(nd.ni_vp);
 1671                 }
 1672                 mtx_lock(&mountlist_mtx);
 1673                 TAILQ_FOREACH_REVERSE(mp, &mountlist, mntlist, mnt_list) {
 1674                         if (strcmp(mp->mnt_stat.f_mntonname, pathbuf) == 0) {
 1675                                 vfs_ref(mp);
 1676                                 break;
 1677                         }
 1678                 }
 1679                 mtx_unlock(&mountlist_mtx);
 1680                 free(pathbuf, M_TEMP);
 1681                 if (mp == NULL) {
 1682                         /*
 1683                          * Previously we returned ENOENT for a nonexistent path and
 1684                          * EINVAL for a non-mountpoint.  We cannot tell these apart
 1685                          * now, so in the !MNT_BYFSID case return the more likely
 1686                          * EINVAL for compatibility.
 1687                          */
 1688                         return (EINVAL);
 1689                 }
 1690         }
 1691 
 1692         /*
 1693          * Don't allow unmounting the root filesystem.
 1694          */
 1695         if (mp->mnt_flag & MNT_ROOTFS) {
 1696                 vfs_rel(mp);
 1697                 return (EINVAL);
 1698         }
 1699         error = dounmount(mp, flags, td);
 1700         return (error);
 1701 }
 1702 
 1703 /*
 1704  * Return error if any of the vnodes, ignoring the root vnode
 1705  * and the syncer vnode, have non-zero usecount.
 1706  *
 1707  * This function is purely advisory - it can return false positives
 1708  * and negatives.
 1709  */
 1710 static int
 1711 vfs_check_usecounts(struct mount *mp)
 1712 {
 1713         struct vnode *vp, *mvp;
 1714 
 1715         MNT_VNODE_FOREACH_ALL(vp, mp, mvp) {
 1716                 if ((vp->v_vflag & VV_ROOT) == 0 && vp->v_type != VNON &&
 1717                     vp->v_usecount != 0) {
 1718                         VI_UNLOCK(vp);
 1719                         MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp);
 1720                         return (EBUSY);
 1721                 }
 1722                 VI_UNLOCK(vp);
 1723         }
 1724 
 1725         return (0);
 1726 }
 1727 
 1728 static void
 1729 dounmount_cleanup(struct mount *mp, struct vnode *coveredvp, int mntkflags)
 1730 {
 1731 
 1732         mtx_assert(MNT_MTX(mp), MA_OWNED);
 1733         mp->mnt_kern_flag &= ~mntkflags;
 1734         if ((mp->mnt_kern_flag & MNTK_MWAIT) != 0) {
 1735                 mp->mnt_kern_flag &= ~MNTK_MWAIT;
 1736                 wakeup(mp);
 1737         }
 1738         vfs_op_exit_locked(mp);
 1739         MNT_IUNLOCK(mp);
 1740         if (coveredvp != NULL) {
 1741                 VOP_UNLOCK(coveredvp);
 1742                 vdrop(coveredvp);
 1743         }
 1744         vn_finished_write(mp);
 1745         vfs_rel(mp);
 1746 }
 1747 
 1748 /*
 1749  * There are various reference counters associated with the mount point.
 1750  * Normally it is permitted to modify them without taking the mnt ilock,
 1751  * but this behavior can be temporarily disabled if stable value is needed
 1752  * or callers are expected to block (e.g. to not allow new users during
 1753  * forced unmount).
 1754  */
 1755 void
 1756 vfs_op_enter(struct mount *mp)
 1757 {
 1758         struct mount_pcpu *mpcpu;
 1759         int cpu;
 1760 
 1761         MNT_ILOCK(mp);
 1762         mp->mnt_vfs_ops++;
 1763         if (mp->mnt_vfs_ops > 1) {
 1764                 MNT_IUNLOCK(mp);
 1765                 return;
 1766         }
 1767         vfs_op_barrier_wait(mp);
 1768         CPU_FOREACH(cpu) {
 1769                 mpcpu = vfs_mount_pcpu_remote(mp, cpu);
 1770 
 1771                 mp->mnt_ref += mpcpu->mntp_ref;
 1772                 mpcpu->mntp_ref = 0;
 1773 
 1774                 mp->mnt_lockref += mpcpu->mntp_lockref;
 1775                 mpcpu->mntp_lockref = 0;
 1776 
 1777                 mp->mnt_writeopcount += mpcpu->mntp_writeopcount;
 1778                 mpcpu->mntp_writeopcount = 0;
 1779         }
 1780         MPASSERT(mp->mnt_ref > 0 && mp->mnt_lockref >= 0 &&
 1781             mp->mnt_writeopcount >= 0, mp,
 1782             ("invalid count(s): ref %d lockref %d writeopcount %d",
 1783             mp->mnt_ref, mp->mnt_lockref, mp->mnt_writeopcount));
 1784         MNT_IUNLOCK(mp);
 1785         vfs_assert_mount_counters(mp);
 1786 }
 1787 
 1788 void
 1789 vfs_op_exit_locked(struct mount *mp)
 1790 {
 1791 
 1792         mtx_assert(MNT_MTX(mp), MA_OWNED);
 1793 
 1794         MPASSERT(mp->mnt_vfs_ops > 0, mp,
 1795             ("invalid vfs_ops count %d", mp->mnt_vfs_ops));
 1796         MPASSERT(mp->mnt_vfs_ops > 1 ||
 1797             (mp->mnt_kern_flag & (MNTK_UNMOUNT | MNTK_SUSPEND)) == 0, mp,
 1798             ("vfs_ops too low %d in unmount or suspend", mp->mnt_vfs_ops));
 1799         mp->mnt_vfs_ops--;
 1800 }
 1801 
 1802 void
 1803 vfs_op_exit(struct mount *mp)
 1804 {
 1805 
 1806         MNT_ILOCK(mp);
 1807         vfs_op_exit_locked(mp);
 1808         MNT_IUNLOCK(mp);
 1809 }
 1810 
 1811 struct vfs_op_barrier_ipi {
 1812         struct mount *mp;
 1813         struct smp_rendezvous_cpus_retry_arg srcra;
 1814 };
 1815 
 1816 static void
 1817 vfs_op_action_func(void *arg)
 1818 {
 1819         struct vfs_op_barrier_ipi *vfsopipi;
 1820         struct mount *mp;
 1821 
 1822         vfsopipi = __containerof(arg, struct vfs_op_barrier_ipi, srcra);
 1823         mp = vfsopipi->mp;
 1824 
 1825         if (!vfs_op_thread_entered(mp))
 1826                 smp_rendezvous_cpus_done(arg);
 1827 }
 1828 
 1829 static void
 1830 vfs_op_wait_func(void *arg, int cpu)
 1831 {
 1832         struct vfs_op_barrier_ipi *vfsopipi;
 1833         struct mount *mp;
 1834         struct mount_pcpu *mpcpu;
 1835 
 1836         vfsopipi = __containerof(arg, struct vfs_op_barrier_ipi, srcra);
 1837         mp = vfsopipi->mp;
 1838 
 1839         mpcpu = vfs_mount_pcpu_remote(mp, cpu);
 1840         while (atomic_load_int(&mpcpu->mntp_thread_in_ops))
 1841                 cpu_spinwait();
 1842 }
 1843 
 1844 void
 1845 vfs_op_barrier_wait(struct mount *mp)
 1846 {
 1847         struct vfs_op_barrier_ipi vfsopipi;
 1848 
 1849         vfsopipi.mp = mp;
 1850 
 1851         smp_rendezvous_cpus_retry(all_cpus,
 1852             smp_no_rendezvous_barrier,
 1853             vfs_op_action_func,
 1854             smp_no_rendezvous_barrier,
 1855             vfs_op_wait_func,
 1856             &vfsopipi.srcra);
 1857 }
 1858 
 1859 #ifdef DIAGNOSTIC
 1860 void
 1861 vfs_assert_mount_counters(struct mount *mp)
 1862 {
 1863         struct mount_pcpu *mpcpu;
 1864         int cpu;
 1865 
 1866         if (mp->mnt_vfs_ops == 0)
 1867                 return;
 1868 
 1869         CPU_FOREACH(cpu) {
 1870                 mpcpu = vfs_mount_pcpu_remote(mp, cpu);
 1871                 if (mpcpu->mntp_ref != 0 ||
 1872                     mpcpu->mntp_lockref != 0 ||
 1873                     mpcpu->mntp_writeopcount != 0)
 1874                         vfs_dump_mount_counters(mp);
 1875         }
 1876 }
 1877 
 1878 void
 1879 vfs_dump_mount_counters(struct mount *mp)
 1880 {
 1881         struct mount_pcpu *mpcpu;
 1882         int ref, lockref, writeopcount;
 1883         int cpu;
 1884 
 1885         printf("%s: mp %p vfs_ops %d\n", __func__, mp, mp->mnt_vfs_ops);
 1886 
 1887         printf("        ref : ");
 1888         ref = mp->mnt_ref;
 1889         CPU_FOREACH(cpu) {
 1890                 mpcpu = vfs_mount_pcpu_remote(mp, cpu);
 1891                 printf("%d ", mpcpu->mntp_ref);
 1892                 ref += mpcpu->mntp_ref;
 1893         }
 1894         printf("\n");
 1895         printf("    lockref : ");
 1896         lockref = mp->mnt_lockref;
 1897         CPU_FOREACH(cpu) {
 1898                 mpcpu = vfs_mount_pcpu_remote(mp, cpu);
 1899                 printf("%d ", mpcpu->mntp_lockref);
 1900                 lockref += mpcpu->mntp_lockref;
 1901         }
 1902         printf("\n");
 1903         printf("writeopcount: ");
 1904         writeopcount = mp->mnt_writeopcount;
 1905         CPU_FOREACH(cpu) {
 1906                 mpcpu = vfs_mount_pcpu_remote(mp, cpu);
 1907                 printf("%d ", mpcpu->mntp_writeopcount);
 1908                 writeopcount += mpcpu->mntp_writeopcount;
 1909         }
 1910         printf("\n");
 1911 
 1912         printf("counter       struct total\n");
 1913         printf("ref             %-5d  %-5d\n", mp->mnt_ref, ref);
 1914         printf("lockref         %-5d  %-5d\n", mp->mnt_lockref, lockref);
 1915         printf("writeopcount    %-5d  %-5d\n", mp->mnt_writeopcount, writeopcount);
 1916 
 1917         panic("invalid counts on struct mount");
 1918 }
 1919 #endif
 1920 
 1921 int
 1922 vfs_mount_fetch_counter(struct mount *mp, enum mount_counter which)
 1923 {
 1924         struct mount_pcpu *mpcpu;
 1925         int cpu, sum;
 1926 
 1927         switch (which) {
 1928         case MNT_COUNT_REF:
 1929                 sum = mp->mnt_ref;
 1930                 break;
 1931         case MNT_COUNT_LOCKREF:
 1932                 sum = mp->mnt_lockref;
 1933                 break;
 1934         case MNT_COUNT_WRITEOPCOUNT:
 1935                 sum = mp->mnt_writeopcount;
 1936                 break;
 1937         }
 1938 
 1939         CPU_FOREACH(cpu) {
 1940                 mpcpu = vfs_mount_pcpu_remote(mp, cpu);
 1941                 switch (which) {
 1942                 case MNT_COUNT_REF:
 1943                         sum += mpcpu->mntp_ref;
 1944                         break;
 1945                 case MNT_COUNT_LOCKREF:
 1946                         sum += mpcpu->mntp_lockref;
 1947                         break;
 1948                 case MNT_COUNT_WRITEOPCOUNT:
 1949                         sum += mpcpu->mntp_writeopcount;
 1950                         break;
 1951                 }
 1952         }
 1953         return (sum);
 1954 }
 1955 
 1956 static bool
 1957 deferred_unmount_enqueue(struct mount *mp, uint64_t flags, bool requeue,
 1958     int timeout_ticks)
 1959 {
 1960         bool enqueued;
 1961 
 1962         enqueued = false;
 1963         mtx_lock(&deferred_unmount_lock);
 1964         if ((mp->mnt_taskqueue_flags & MNT_DEFERRED) == 0 || requeue) {
 1965                 mp->mnt_taskqueue_flags = flags | MNT_DEFERRED;
 1966                 STAILQ_INSERT_TAIL(&deferred_unmount_list, mp,
 1967                     mnt_taskqueue_link);
 1968                 enqueued = true;
 1969         }
 1970         mtx_unlock(&deferred_unmount_lock);
 1971 
 1972         if (enqueued) {
 1973                 taskqueue_enqueue_timeout(taskqueue_deferred_unmount,
 1974                     &deferred_unmount_task, timeout_ticks);
 1975         }
 1976 
 1977         return (enqueued);
 1978 }
 1979 
 1980 /*
 1981  * Taskqueue handler for processing async/recursive unmounts
 1982  */
 1983 static void
 1984 vfs_deferred_unmount(void *argi __unused, int pending __unused)
 1985 {
 1986         STAILQ_HEAD(, mount) local_unmounts;
 1987         uint64_t flags;
 1988         struct mount *mp, *tmp;
 1989         int error;
 1990         unsigned int retries;
 1991         bool unmounted;
 1992 
 1993         STAILQ_INIT(&local_unmounts);
 1994         mtx_lock(&deferred_unmount_lock);
 1995         STAILQ_CONCAT(&local_unmounts, &deferred_unmount_list); 
 1996         mtx_unlock(&deferred_unmount_lock);
 1997 
 1998         STAILQ_FOREACH_SAFE(mp, &local_unmounts, mnt_taskqueue_link, tmp) {
 1999                 flags = mp->mnt_taskqueue_flags;
 2000                 KASSERT((flags & MNT_DEFERRED) != 0,
 2001                     ("taskqueue unmount without MNT_DEFERRED"));
 2002                 error = dounmount(mp, flags, curthread);
 2003                 if (error != 0) {
 2004                         MNT_ILOCK(mp);
 2005                         unmounted = ((mp->mnt_kern_flag & MNTK_REFEXPIRE) != 0);
 2006                         MNT_IUNLOCK(mp);
 2007 
 2008                         /*
 2009                          * The deferred unmount thread is the only thread that
 2010                          * modifies the retry counts, so locking/atomics aren't
 2011                          * needed here.
 2012                          */
 2013                         retries = (mp->mnt_unmount_retries)++;
 2014                         deferred_unmount_total_retries++;
 2015                         if (!unmounted && retries < deferred_unmount_retry_limit) {
 2016                                 deferred_unmount_enqueue(mp, flags, true,
 2017                                     -deferred_unmount_retry_delay_hz);
 2018                         } else {
 2019                                 if (retries >= deferred_unmount_retry_limit) {
 2020                                         printf("giving up on deferred unmount "
 2021                                             "of %s after %d retries, error %d\n",
 2022                                             mp->mnt_stat.f_mntonname, retries, error);
 2023                                 }
 2024                                 vfs_rel(mp);
 2025                         }
 2026                 }
 2027         }
 2028 }
 2029 
 2030 /*
 2031  * Do the actual filesystem unmount.
 2032  */
 2033 int
 2034 dounmount(struct mount *mp, uint64_t flags, struct thread *td)
 2035 {
 2036         struct mount_upper_node *upper;
 2037         struct vnode *coveredvp, *rootvp;
 2038         int error;
 2039         uint64_t async_flag;
 2040         int mnt_gen_r;
 2041         unsigned int retries;
 2042 
 2043         KASSERT((flags & MNT_DEFERRED) == 0 ||
 2044             (flags & (MNT_RECURSE | MNT_FORCE)) == (MNT_RECURSE | MNT_FORCE),
 2045             ("MNT_DEFERRED requires MNT_RECURSE | MNT_FORCE"));
 2046 
 2047         /*
 2048          * If the caller has explicitly requested the unmount to be handled by
 2049          * the taskqueue and we're not already in taskqueue context, queue
 2050          * up the unmount request and exit.  This is done prior to any
 2051          * credential checks; MNT_DEFERRED should be used only for kernel-
 2052          * initiated unmounts and will therefore be processed with the
 2053          * (kernel) credentials of the taskqueue thread.  Still, callers
 2054          * should be sure this is the behavior they want.
 2055          */
 2056         if ((flags & MNT_DEFERRED) != 0 &&
 2057             taskqueue_member(taskqueue_deferred_unmount, curthread) == 0) {
 2058                 if (!deferred_unmount_enqueue(mp, flags, false, 0))
 2059                         vfs_rel(mp);
 2060                 return (EINPROGRESS);
 2061         }
 2062 
 2063         /*
 2064          * Only privileged root, or (if MNT_USER is set) the user that did the
 2065          * original mount is permitted to unmount this filesystem.
 2066          * This check should be made prior to queueing up any recursive
 2067          * unmounts of upper filesystems.  Those unmounts will be executed
 2068          * with kernel thread credentials and are expected to succeed, so
 2069          * we must at least ensure the originating context has sufficient
 2070          * privilege to unmount the base filesystem before proceeding with
 2071          * the uppers.
 2072          */
 2073         error = vfs_suser(mp, td);
 2074         if (error != 0) {
 2075                 KASSERT((flags & MNT_DEFERRED) == 0,
 2076                     ("taskqueue unmount with insufficient privilege"));
 2077                 vfs_rel(mp);
 2078                 return (error);
 2079         }
 2080 
 2081         if (recursive_forced_unmount && ((flags & MNT_FORCE) != 0))
 2082                 flags |= MNT_RECURSE;
 2083 
 2084         if ((flags & MNT_RECURSE) != 0) {
 2085                 KASSERT((flags & MNT_FORCE) != 0,
 2086                     ("MNT_RECURSE requires MNT_FORCE"));
 2087 
 2088                 MNT_ILOCK(mp);
 2089                 /*
 2090                  * Set MNTK_RECURSE to prevent new upper mounts from being
 2091                  * added, and note that an operation on the uppers list is in
 2092                  * progress.  This will ensure that unregistration from the
 2093                  * uppers list, and therefore any pending unmount of the upper
 2094                  * FS, can't complete until after we finish walking the list.
 2095                  */
 2096                 mp->mnt_kern_flag |= MNTK_RECURSE;
 2097                 mp->mnt_upper_pending++;
 2098                 TAILQ_FOREACH(upper, &mp->mnt_uppers, mnt_upper_link) {
 2099                         retries = upper->mp->mnt_unmount_retries;
 2100                         if (retries > deferred_unmount_retry_limit) {
 2101                                 error = EBUSY;
 2102                                 continue;
 2103                         }
 2104                         MNT_IUNLOCK(mp);
 2105 
 2106                         vfs_ref(upper->mp);
 2107                         if (!deferred_unmount_enqueue(upper->mp, flags,
 2108                             false, 0))
 2109                                 vfs_rel(upper->mp);
 2110                         MNT_ILOCK(mp);
 2111                 }
 2112                 mp->mnt_upper_pending--;
 2113                 if ((mp->mnt_kern_flag & MNTK_UPPER_WAITER) != 0 &&
 2114                     mp->mnt_upper_pending == 0) {
 2115                         mp->mnt_kern_flag &= ~MNTK_UPPER_WAITER;
 2116                         wakeup(&mp->mnt_uppers);
 2117                 }
 2118 
 2119                 /*
 2120                  * If we're not on the taskqueue, wait until the uppers list
 2121                  * is drained before proceeding with unmount.  Otherwise, if
 2122                  * we are on the taskqueue and there are still pending uppers,
 2123                  * just re-enqueue on the end of the taskqueue.
 2124                  */
 2125                 if ((flags & MNT_DEFERRED) == 0) {
 2126                         while (error == 0 && !TAILQ_EMPTY(&mp->mnt_uppers)) {
 2127                                 mp->mnt_kern_flag |= MNTK_TASKQUEUE_WAITER;
 2128                                 error = msleep(&mp->mnt_taskqueue_link,
 2129                                     MNT_MTX(mp), PCATCH, "umntqw", 0);
 2130                         }
 2131                         if (error != 0) {
 2132                                 MNT_REL(mp);
 2133                                 MNT_IUNLOCK(mp);
 2134                                 return (error);
 2135                         }
 2136                 } else if (!TAILQ_EMPTY(&mp->mnt_uppers)) {
 2137                         MNT_IUNLOCK(mp);
 2138                         if (error == 0)
 2139                                 deferred_unmount_enqueue(mp, flags, true, 0);
 2140                         return (error);
 2141                 }
 2142                 MNT_IUNLOCK(mp);
 2143                 KASSERT(TAILQ_EMPTY(&mp->mnt_uppers), ("mnt_uppers not empty"));
 2144         }
 2145 
 2146         /* Allow the taskqueue to safely re-enqueue on failure */
 2147         if ((flags & MNT_DEFERRED) != 0)
 2148                 vfs_ref(mp);
 2149 
 2150         if ((coveredvp = mp->mnt_vnodecovered) != NULL) {
 2151                 mnt_gen_r = mp->mnt_gen;
 2152                 VI_LOCK(coveredvp);
 2153                 vholdl(coveredvp);
 2154                 vn_lock(coveredvp, LK_EXCLUSIVE | LK_INTERLOCK | LK_RETRY);
 2155                 /*
 2156                  * Check for mp being unmounted while waiting for the
 2157                  * covered vnode lock.
 2158                  */
 2159                 if (coveredvp->v_mountedhere != mp ||
 2160                     coveredvp->v_mountedhere->mnt_gen != mnt_gen_r) {
 2161                         VOP_UNLOCK(coveredvp);
 2162                         vdrop(coveredvp);
 2163                         vfs_rel(mp);
 2164                         return (EBUSY);
 2165                 }
 2166         }
 2167 
 2168         vfs_op_enter(mp);
 2169 
 2170         vn_start_write(NULL, &mp, V_WAIT);
 2171         MNT_ILOCK(mp);
 2172         if ((mp->mnt_kern_flag & MNTK_UNMOUNT) != 0 ||
 2173             (mp->mnt_flag & MNT_UPDATE) != 0 ||
 2174             !TAILQ_EMPTY(&mp->mnt_uppers)) {
 2175                 dounmount_cleanup(mp, coveredvp, 0);
 2176                 return (EBUSY);
 2177         }
 2178         mp->mnt_kern_flag |= MNTK_UNMOUNT;
 2179         rootvp = vfs_cache_root_clear(mp);
 2180         if (coveredvp != NULL)
 2181                 vn_seqc_write_begin(coveredvp);
 2182         if (flags & MNT_NONBUSY) {
 2183                 MNT_IUNLOCK(mp);
 2184                 error = vfs_check_usecounts(mp);
 2185                 MNT_ILOCK(mp);
 2186                 if (error != 0) {
 2187                         vn_seqc_write_end(coveredvp);
 2188                         dounmount_cleanup(mp, coveredvp, MNTK_UNMOUNT);
 2189                         if (rootvp != NULL) {
 2190                                 vn_seqc_write_end(rootvp);
 2191                                 vrele(rootvp);
 2192                         }
 2193                         return (error);
 2194                 }
 2195         }
 2196         /* Allow filesystems to detect that a forced unmount is in progress. */
 2197         if (flags & MNT_FORCE) {
 2198                 mp->mnt_kern_flag |= MNTK_UNMOUNTF;
 2199                 MNT_IUNLOCK(mp);
 2200                 /*
 2201                  * Must be done after setting MNTK_UNMOUNTF and before
 2202                  * waiting for mnt_lockref to become 0.
 2203                  */
 2204                 VFS_PURGE(mp);
 2205                 MNT_ILOCK(mp);
 2206         }
 2207         error = 0;
 2208         if (mp->mnt_lockref) {
 2209                 mp->mnt_kern_flag |= MNTK_DRAINING;
 2210                 error = msleep(&mp->mnt_lockref, MNT_MTX(mp), PVFS,
 2211                     "mount drain", 0);
 2212         }
 2213         MNT_IUNLOCK(mp);
 2214         KASSERT(mp->mnt_lockref == 0,
 2215             ("%s: invalid lock refcount in the drain path @ %s:%d",
 2216             __func__, __FILE__, __LINE__));
 2217         KASSERT(error == 0,
 2218             ("%s: invalid return value for msleep in the drain path @ %s:%d",
 2219             __func__, __FILE__, __LINE__));
 2220 
 2221         /*
 2222          * We want to keep the vnode around so that we can vn_seqc_write_end
 2223          * after we are done with unmount. Downgrade our reference to a mere
 2224          * hold count so that we don't interefere with anything.
 2225          */
 2226         if (rootvp != NULL) {
 2227                 vhold(rootvp);
 2228                 vrele(rootvp);
 2229         }
 2230 
 2231         if (mp->mnt_flag & MNT_EXPUBLIC)
 2232                 vfs_setpublicfs(NULL, NULL, NULL);
 2233 
 2234         vfs_periodic(mp, MNT_WAIT);
 2235         MNT_ILOCK(mp);
 2236         async_flag = mp->mnt_flag & MNT_ASYNC;
 2237         mp->mnt_flag &= ~MNT_ASYNC;
 2238         mp->mnt_kern_flag &= ~MNTK_ASYNC;
 2239         MNT_IUNLOCK(mp);
 2240         vfs_deallocate_syncvnode(mp);
 2241         error = VFS_UNMOUNT(mp, flags);
 2242         vn_finished_write(mp);
 2243         vfs_rel(mp);
 2244         /*
 2245          * If we failed to flush the dirty blocks for this mount point,
 2246          * undo all the cdir/rdir and rootvnode changes we made above.
 2247          * Unless we failed to do so because the device is reporting that
 2248          * it doesn't exist anymore.
 2249          */
 2250         if (error && error != ENXIO) {
 2251                 MNT_ILOCK(mp);
 2252                 if ((mp->mnt_flag & MNT_RDONLY) == 0) {
 2253                         MNT_IUNLOCK(mp);
 2254                         vfs_allocate_syncvnode(mp);
 2255                         MNT_ILOCK(mp);
 2256                 }
 2257                 mp->mnt_kern_flag &= ~(MNTK_UNMOUNT | MNTK_UNMOUNTF);
 2258                 mp->mnt_flag |= async_flag;
 2259                 if ((mp->mnt_flag & MNT_ASYNC) != 0 &&
 2260                     (mp->mnt_kern_flag & MNTK_NOASYNC) == 0)
 2261                         mp->mnt_kern_flag |= MNTK_ASYNC;
 2262                 if (mp->mnt_kern_flag & MNTK_MWAIT) {
 2263                         mp->mnt_kern_flag &= ~MNTK_MWAIT;
 2264                         wakeup(mp);
 2265                 }
 2266                 vfs_op_exit_locked(mp);
 2267                 MNT_IUNLOCK(mp);
 2268                 if (coveredvp) {
 2269                         vn_seqc_write_end(coveredvp);
 2270                         VOP_UNLOCK(coveredvp);
 2271                         vdrop(coveredvp);
 2272                 }
 2273                 if (rootvp != NULL) {
 2274                         vn_seqc_write_end(rootvp);
 2275                         vdrop(rootvp);
 2276                 }
 2277                 return (error);
 2278         }
 2279 
 2280         mtx_lock(&mountlist_mtx);
 2281         TAILQ_REMOVE(&mountlist, mp, mnt_list);
 2282         mtx_unlock(&mountlist_mtx);
 2283         EVENTHANDLER_DIRECT_INVOKE(vfs_unmounted, mp, td);
 2284         if (coveredvp != NULL) {
 2285                 VI_LOCK(coveredvp);
 2286                 vn_irflag_unset_locked(coveredvp, VIRF_MOUNTPOINT);
 2287                 coveredvp->v_mountedhere = NULL;
 2288                 vn_seqc_write_end_locked(coveredvp);
 2289                 VI_UNLOCK(coveredvp);
 2290                 VOP_UNLOCK(coveredvp);
 2291                 vdrop(coveredvp);
 2292         }
 2293         mount_devctl_event("UNMOUNT", mp, false);
 2294         if (rootvp != NULL) {
 2295                 vn_seqc_write_end(rootvp);
 2296                 vdrop(rootvp);
 2297         }
 2298         vfs_event_signal(NULL, VQ_UNMOUNT, 0);
 2299         if (rootvnode != NULL && mp == rootvnode->v_mount) {
 2300                 vrele(rootvnode);
 2301                 rootvnode = NULL;
 2302         }
 2303         if (mp == rootdevmp)
 2304                 rootdevmp = NULL;
 2305         if ((flags & MNT_DEFERRED) != 0)
 2306                 vfs_rel(mp);
 2307         vfs_mount_destroy(mp);
 2308         return (0);
 2309 }
 2310 
 2311 /*
 2312  * Report errors during filesystem mounting.
 2313  */
 2314 void
 2315 vfs_mount_error(struct mount *mp, const char *fmt, ...)
 2316 {
 2317         struct vfsoptlist *moptlist = mp->mnt_optnew;
 2318         va_list ap;
 2319         int error, len;
 2320         char *errmsg;
 2321 
 2322         error = vfs_getopt(moptlist, "errmsg", (void **)&errmsg, &len);
 2323         if (error || errmsg == NULL || len <= 0)
 2324                 return;
 2325 
 2326         va_start(ap, fmt);
 2327         vsnprintf(errmsg, (size_t)len, fmt, ap);
 2328         va_end(ap);
 2329 }
 2330 
 2331 void
 2332 vfs_opterror(struct vfsoptlist *opts, const char *fmt, ...)
 2333 {
 2334         va_list ap;
 2335         int error, len;
 2336         char *errmsg;
 2337 
 2338         error = vfs_getopt(opts, "errmsg", (void **)&errmsg, &len);
 2339         if (error || errmsg == NULL || len <= 0)
 2340                 return;
 2341 
 2342         va_start(ap, fmt);
 2343         vsnprintf(errmsg, (size_t)len, fmt, ap);
 2344         va_end(ap);
 2345 }
 2346 
 2347 /*
 2348  * ---------------------------------------------------------------------
 2349  * Functions for querying mount options/arguments from filesystems.
 2350  */
 2351 
 2352 /*
 2353  * Check that no unknown options are given
 2354  */
 2355 int
 2356 vfs_filteropt(struct vfsoptlist *opts, const char **legal)
 2357 {
 2358         struct vfsopt *opt;
 2359         char errmsg[255];
 2360         const char **t, *p, *q;
 2361         int ret = 0;
 2362 
 2363         TAILQ_FOREACH(opt, opts, link) {
 2364                 p = opt->name;
 2365                 q = NULL;
 2366                 if (p[0] == 'n' && p[1] == 'o')
 2367                         q = p + 2;
 2368                 for(t = global_opts; *t != NULL; t++) {
 2369                         if (strcmp(*t, p) == 0)
 2370                                 break;
 2371                         if (q != NULL) {
 2372                                 if (strcmp(*t, q) == 0)
 2373                                         break;
 2374                         }
 2375                 }
 2376                 if (*t != NULL)
 2377                         continue;
 2378                 for(t = legal; *t != NULL; t++) {
 2379                         if (strcmp(*t, p) == 0)
 2380                                 break;
 2381                         if (q != NULL) {
 2382                                 if (strcmp(*t, q) == 0)
 2383                                         break;
 2384                         }
 2385                 }
 2386                 if (*t != NULL)
 2387                         continue;
 2388                 snprintf(errmsg, sizeof(errmsg),
 2389                     "mount option <%s> is unknown", p);
 2390                 ret = EINVAL;
 2391         }
 2392         if (ret != 0) {
 2393                 TAILQ_FOREACH(opt, opts, link) {
 2394                         if (strcmp(opt->name, "errmsg") == 0) {
 2395                                 strncpy((char *)opt->value, errmsg, opt->len);
 2396                                 break;
 2397                         }
 2398                 }
 2399                 if (opt == NULL)
 2400                         printf("%s\n", errmsg);
 2401         }
 2402         return (ret);
 2403 }
 2404 
 2405 /*
 2406  * Get a mount option by its name.
 2407  *
 2408  * Return 0 if the option was found, ENOENT otherwise.
 2409  * If len is non-NULL it will be filled with the length
 2410  * of the option. If buf is non-NULL, it will be filled
 2411  * with the address of the option.
 2412  */
 2413 int
 2414 vfs_getopt(struct vfsoptlist *opts, const char *name, void **buf, int *len)
 2415 {
 2416         struct vfsopt *opt;
 2417 
 2418         KASSERT(opts != NULL, ("vfs_getopt: caller passed 'opts' as NULL"));
 2419 
 2420         TAILQ_FOREACH(opt, opts, link) {
 2421                 if (strcmp(name, opt->name) == 0) {
 2422                         opt->seen = 1;
 2423                         if (len != NULL)
 2424                                 *len = opt->len;
 2425                         if (buf != NULL)
 2426                                 *buf = opt->value;
 2427                         return (0);
 2428                 }
 2429         }
 2430         return (ENOENT);
 2431 }
 2432 
 2433 int
 2434 vfs_getopt_pos(struct vfsoptlist *opts, const char *name)
 2435 {
 2436         struct vfsopt *opt;
 2437 
 2438         if (opts == NULL)
 2439                 return (-1);
 2440 
 2441         TAILQ_FOREACH(opt, opts, link) {
 2442                 if (strcmp(name, opt->name) == 0) {
 2443                         opt->seen = 1;
 2444                         return (opt->pos);
 2445                 }
 2446         }
 2447         return (-1);
 2448 }
 2449 
 2450 int
 2451 vfs_getopt_size(struct vfsoptlist *opts, const char *name, off_t *value)
 2452 {
 2453         char *opt_value, *vtp;
 2454         quad_t iv;
 2455         int error, opt_len;
 2456 
 2457         error = vfs_getopt(opts, name, (void **)&opt_value, &opt_len);
 2458         if (error != 0)
 2459                 return (error);
 2460         if (opt_len == 0 || opt_value == NULL)
 2461                 return (EINVAL);
 2462         if (opt_value[0] == '\0' || opt_value[opt_len - 1] != '\0')
 2463                 return (EINVAL);
 2464         iv = strtoq(opt_value, &vtp, 0);
 2465         if (vtp == opt_value || (vtp[0] != '\0' && vtp[1] != '\0'))
 2466                 return (EINVAL);
 2467         if (iv < 0)
 2468                 return (EINVAL);
 2469         switch (vtp[0]) {
 2470         case 't': case 'T':
 2471                 iv *= 1024;
 2472                 /* FALLTHROUGH */
 2473         case 'g': case 'G':
 2474                 iv *= 1024;
 2475                 /* FALLTHROUGH */
 2476         case 'm': case 'M':
 2477                 iv *= 1024;
 2478                 /* FALLTHROUGH */
 2479         case 'k': case 'K':
 2480                 iv *= 1024;
 2481         case '\0':
 2482                 break;
 2483         default:
 2484                 return (EINVAL);
 2485         }
 2486         *value = iv;
 2487 
 2488         return (0);
 2489 }
 2490 
 2491 char *
 2492 vfs_getopts(struct vfsoptlist *opts, const char *name, int *error)
 2493 {
 2494         struct vfsopt *opt;
 2495 
 2496         *error = 0;
 2497         TAILQ_FOREACH(opt, opts, link) {
 2498                 if (strcmp(name, opt->name) != 0)
 2499                         continue;
 2500                 opt->seen = 1;
 2501                 if (opt->len == 0 ||
 2502                     ((char *)opt->value)[opt->len - 1] != '\0') {
 2503                         *error = EINVAL;
 2504                         return (NULL);
 2505                 }
 2506                 return (opt->value);
 2507         }
 2508         *error = ENOENT;
 2509         return (NULL);
 2510 }
 2511 
 2512 int
 2513 vfs_flagopt(struct vfsoptlist *opts, const char *name, uint64_t *w,
 2514         uint64_t val)
 2515 {
 2516         struct vfsopt *opt;
 2517 
 2518         TAILQ_FOREACH(opt, opts, link) {
 2519                 if (strcmp(name, opt->name) == 0) {
 2520                         opt->seen = 1;
 2521                         if (w != NULL)
 2522                                 *w |= val;
 2523                         return (1);
 2524                 }
 2525         }
 2526         if (w != NULL)
 2527                 *w &= ~val;
 2528         return (0);
 2529 }
 2530 
 2531 int
 2532 vfs_scanopt(struct vfsoptlist *opts, const char *name, const char *fmt, ...)
 2533 {
 2534         va_list ap;
 2535         struct vfsopt *opt;
 2536         int ret;
 2537 
 2538         KASSERT(opts != NULL, ("vfs_getopt: caller passed 'opts' as NULL"));
 2539 
 2540         TAILQ_FOREACH(opt, opts, link) {
 2541                 if (strcmp(name, opt->name) != 0)
 2542                         continue;
 2543                 opt->seen = 1;
 2544                 if (opt->len == 0 || opt->value == NULL)
 2545                         return (0);
 2546                 if (((char *)opt->value)[opt->len - 1] != '\0')
 2547                         return (0);
 2548                 va_start(ap, fmt);
 2549                 ret = vsscanf(opt->value, fmt, ap);
 2550                 va_end(ap);
 2551                 return (ret);
 2552         }
 2553         return (0);
 2554 }
 2555 
 2556 int
 2557 vfs_setopt(struct vfsoptlist *opts, const char *name, void *value, int len)
 2558 {
 2559         struct vfsopt *opt;
 2560 
 2561         TAILQ_FOREACH(opt, opts, link) {
 2562                 if (strcmp(name, opt->name) != 0)
 2563                         continue;
 2564                 opt->seen = 1;
 2565                 if (opt->value == NULL)
 2566                         opt->len = len;
 2567                 else {
 2568                         if (opt->len != len)
 2569                                 return (EINVAL);
 2570                         bcopy(value, opt->value, len);
 2571                 }
 2572                 return (0);
 2573         }
 2574         return (ENOENT);
 2575 }
 2576 
 2577 int
 2578 vfs_setopt_part(struct vfsoptlist *opts, const char *name, void *value, int len)
 2579 {
 2580         struct vfsopt *opt;
 2581 
 2582         TAILQ_FOREACH(opt, opts, link) {
 2583                 if (strcmp(name, opt->name) != 0)
 2584                         continue;
 2585                 opt->seen = 1;
 2586                 if (opt->value == NULL)
 2587                         opt->len = len;
 2588                 else {
 2589                         if (opt->len < len)
 2590                                 return (EINVAL);
 2591                         opt->len = len;
 2592                         bcopy(value, opt->value, len);
 2593                 }
 2594                 return (0);
 2595         }
 2596         return (ENOENT);
 2597 }
 2598 
 2599 int
 2600 vfs_setopts(struct vfsoptlist *opts, const char *name, const char *value)
 2601 {
 2602         struct vfsopt *opt;
 2603 
 2604         TAILQ_FOREACH(opt, opts, link) {
 2605                 if (strcmp(name, opt->name) != 0)
 2606                         continue;
 2607                 opt->seen = 1;
 2608                 if (opt->value == NULL)
 2609                         opt->len = strlen(value) + 1;
 2610                 else if (strlcpy(opt->value, value, opt->len) >= opt->len)
 2611                         return (EINVAL);
 2612                 return (0);
 2613         }
 2614         return (ENOENT);
 2615 }
 2616 
 2617 /*
 2618  * Find and copy a mount option.
 2619  *
 2620  * The size of the buffer has to be specified
 2621  * in len, if it is not the same length as the
 2622  * mount option, EINVAL is returned.
 2623  * Returns ENOENT if the option is not found.
 2624  */
 2625 int
 2626 vfs_copyopt(struct vfsoptlist *opts, const char *name, void *dest, int len)
 2627 {
 2628         struct vfsopt *opt;
 2629 
 2630         KASSERT(opts != NULL, ("vfs_copyopt: caller passed 'opts' as NULL"));
 2631 
 2632         TAILQ_FOREACH(opt, opts, link) {
 2633                 if (strcmp(name, opt->name) == 0) {
 2634                         opt->seen = 1;
 2635                         if (len != opt->len)
 2636                                 return (EINVAL);
 2637                         bcopy(opt->value, dest, opt->len);
 2638                         return (0);
 2639                 }
 2640         }
 2641         return (ENOENT);
 2642 }
 2643 
 2644 int
 2645 __vfs_statfs(struct mount *mp, struct statfs *sbp)
 2646 {
 2647         /*
 2648          * Filesystems only fill in part of the structure for updates, we
 2649          * have to read the entirety first to get all content.
 2650          */
 2651         if (sbp != &mp->mnt_stat)
 2652                 memcpy(sbp, &mp->mnt_stat, sizeof(*sbp));
 2653 
 2654         /*
 2655          * Set these in case the underlying filesystem fails to do so.
 2656          */
 2657         sbp->f_version = STATFS_VERSION;
 2658         sbp->f_namemax = NAME_MAX;
 2659         sbp->f_flags = mp->mnt_flag & MNT_VISFLAGMASK;
 2660         sbp->f_nvnodelistsize = mp->mnt_nvnodelistsize;
 2661 
 2662         return (mp->mnt_op->vfs_statfs(mp, sbp));
 2663 }
 2664 
 2665 void
 2666 vfs_mountedfrom(struct mount *mp, const char *from)
 2667 {
 2668 
 2669         bzero(mp->mnt_stat.f_mntfromname, sizeof mp->mnt_stat.f_mntfromname);
 2670         strlcpy(mp->mnt_stat.f_mntfromname, from,
 2671             sizeof mp->mnt_stat.f_mntfromname);
 2672 }
 2673 
 2674 /*
 2675  * ---------------------------------------------------------------------
 2676  * This is the api for building mount args and mounting filesystems from
 2677  * inside the kernel.
 2678  *
 2679  * The API works by accumulation of individual args.  First error is
 2680  * latched.
 2681  *
 2682  * XXX: should be documented in new manpage kernel_mount(9)
 2683  */
 2684 
 2685 /* A memory allocation which must be freed when we are done */
 2686 struct mntaarg {
 2687         SLIST_ENTRY(mntaarg)    next;
 2688 };
 2689 
 2690 /* The header for the mount arguments */
 2691 struct mntarg {
 2692         struct iovec *v;
 2693         int len;
 2694         int error;
 2695         SLIST_HEAD(, mntaarg)   list;
 2696 };
 2697 
 2698 /*
 2699  * Add a boolean argument.
 2700  *
 2701  * flag is the boolean value.
 2702  * name must start with "no".
 2703  */
 2704 struct mntarg *
 2705 mount_argb(struct mntarg *ma, int flag, const char *name)
 2706 {
 2707 
 2708         KASSERT(name[0] == 'n' && name[1] == 'o',
 2709             ("mount_argb(...,%s): name must start with 'no'", name));
 2710 
 2711         return (mount_arg(ma, name + (flag ? 2 : 0), NULL, 0));
 2712 }
 2713 
 2714 /*
 2715  * Add an argument printf style
 2716  */
 2717 struct mntarg *
 2718 mount_argf(struct mntarg *ma, const char *name, const char *fmt, ...)
 2719 {
 2720         va_list ap;
 2721         struct mntaarg *maa;
 2722         struct sbuf *sb;
 2723         int len;
 2724 
 2725         if (ma == NULL) {
 2726                 ma = malloc(sizeof *ma, M_MOUNT, M_WAITOK | M_ZERO);
 2727                 SLIST_INIT(&ma->list);
 2728         }
 2729         if (ma->error)
 2730                 return (ma);
 2731 
 2732         ma->v = realloc(ma->v, sizeof *ma->v * (ma->len + 2),
 2733             M_MOUNT, M_WAITOK);
 2734         ma->v[ma->len].iov_base = (void *)(uintptr_t)name;
 2735         ma->v[ma->len].iov_len = strlen(name) + 1;
 2736         ma->len++;
 2737 
 2738         sb = sbuf_new_auto();
 2739         va_start(ap, fmt);
 2740         sbuf_vprintf(sb, fmt, ap);
 2741         va_end(ap);
 2742         sbuf_finish(sb);
 2743         len = sbuf_len(sb) + 1;
 2744         maa = malloc(sizeof *maa + len, M_MOUNT, M_WAITOK | M_ZERO);
 2745         SLIST_INSERT_HEAD(&ma->list, maa, next);
 2746         bcopy(sbuf_data(sb), maa + 1, len);
 2747         sbuf_delete(sb);
 2748 
 2749         ma->v[ma->len].iov_base = maa + 1;
 2750         ma->v[ma->len].iov_len = len;
 2751         ma->len++;
 2752 
 2753         return (ma);
 2754 }
 2755 
 2756 /*
 2757  * Add an argument which is a userland string.
 2758  */
 2759 struct mntarg *
 2760 mount_argsu(struct mntarg *ma, const char *name, const void *val, int len)
 2761 {
 2762         struct mntaarg *maa;
 2763         char *tbuf;
 2764 
 2765         if (val == NULL)
 2766                 return (ma);
 2767         if (ma == NULL) {
 2768                 ma = malloc(sizeof *ma, M_MOUNT, M_WAITOK | M_ZERO);
 2769                 SLIST_INIT(&ma->list);
 2770         }
 2771         if (ma->error)
 2772                 return (ma);
 2773         maa = malloc(sizeof *maa + len, M_MOUNT, M_WAITOK | M_ZERO);
 2774         SLIST_INSERT_HEAD(&ma->list, maa, next);
 2775         tbuf = (void *)(maa + 1);
 2776         ma->error = copyinstr(val, tbuf, len, NULL);
 2777         return (mount_arg(ma, name, tbuf, -1));
 2778 }
 2779 
 2780 /*
 2781  * Plain argument.
 2782  *
 2783  * If length is -1, treat value as a C string.
 2784  */
 2785 struct mntarg *
 2786 mount_arg(struct mntarg *ma, const char *name, const void *val, int len)
 2787 {
 2788 
 2789         if (ma == NULL) {
 2790                 ma = malloc(sizeof *ma, M_MOUNT, M_WAITOK | M_ZERO);
 2791                 SLIST_INIT(&ma->list);
 2792         }
 2793         if (ma->error)
 2794                 return (ma);
 2795 
 2796         ma->v = realloc(ma->v, sizeof *ma->v * (ma->len + 2),
 2797             M_MOUNT, M_WAITOK);
 2798         ma->v[ma->len].iov_base = (void *)(uintptr_t)name;
 2799         ma->v[ma->len].iov_len = strlen(name) + 1;
 2800         ma->len++;
 2801 
 2802         ma->v[ma->len].iov_base = (void *)(uintptr_t)val;
 2803         if (len < 0)
 2804                 ma->v[ma->len].iov_len = strlen(val) + 1;
 2805         else
 2806                 ma->v[ma->len].iov_len = len;
 2807         ma->len++;
 2808         return (ma);
 2809 }
 2810 
 2811 /*
 2812  * Free a mntarg structure
 2813  */
 2814 static void
 2815 free_mntarg(struct mntarg *ma)
 2816 {
 2817         struct mntaarg *maa;
 2818 
 2819         while (!SLIST_EMPTY(&ma->list)) {
 2820                 maa = SLIST_FIRST(&ma->list);
 2821                 SLIST_REMOVE_HEAD(&ma->list, next);
 2822                 free(maa, M_MOUNT);
 2823         }
 2824         free(ma->v, M_MOUNT);
 2825         free(ma, M_MOUNT);
 2826 }
 2827 
 2828 /*
 2829  * Mount a filesystem
 2830  */
 2831 int
 2832 kernel_mount(struct mntarg *ma, uint64_t flags)
 2833 {
 2834         struct uio auio;
 2835         int error;
 2836 
 2837         KASSERT(ma != NULL, ("kernel_mount NULL ma"));
 2838         KASSERT(ma->error != 0 || ma->v != NULL, ("kernel_mount NULL ma->v"));
 2839         KASSERT(!(ma->len & 1), ("kernel_mount odd ma->len (%d)", ma->len));
 2840 
 2841         error = ma->error;
 2842         if (error == 0) {
 2843                 auio.uio_iov = ma->v;
 2844                 auio.uio_iovcnt = ma->len;
 2845                 auio.uio_segflg = UIO_SYSSPACE;
 2846                 error = vfs_donmount(curthread, flags, &auio);
 2847         }
 2848         free_mntarg(ma);
 2849         return (error);
 2850 }
 2851 
 2852 /* Map from mount options to printable formats. */
 2853 static struct mntoptnames optnames[] = {
 2854         MNTOPT_NAMES
 2855 };
 2856 
 2857 #define DEVCTL_LEN 1024
 2858 static void
 2859 mount_devctl_event(const char *type, struct mount *mp, bool donew)
 2860 {
 2861         const uint8_t *cp;
 2862         struct mntoptnames *fp;
 2863         struct sbuf sb;
 2864         struct statfs *sfp = &mp->mnt_stat;
 2865         char *buf;
 2866 
 2867         buf = malloc(DEVCTL_LEN, M_MOUNT, M_NOWAIT);
 2868         if (buf == NULL)
 2869                 return;
 2870         sbuf_new(&sb, buf, DEVCTL_LEN, SBUF_FIXEDLEN);
 2871         sbuf_cpy(&sb, "mount-point=\"");
 2872         devctl_safe_quote_sb(&sb, sfp->f_mntonname);
 2873         sbuf_cat(&sb, "\" mount-dev=\"");
 2874         devctl_safe_quote_sb(&sb, sfp->f_mntfromname);
 2875         sbuf_cat(&sb, "\" mount-type=\"");
 2876         devctl_safe_quote_sb(&sb, sfp->f_fstypename);
 2877         sbuf_cat(&sb, "\" fsid=0x");
 2878         cp = (const uint8_t *)&sfp->f_fsid.val[0];
 2879         for (int i = 0; i < sizeof(sfp->f_fsid); i++)
 2880                 sbuf_printf(&sb, "%02x", cp[i]);
 2881         sbuf_printf(&sb, " owner=%u flags=\"", sfp->f_owner);
 2882         for (fp = optnames; fp->o_opt != 0; fp++) {
 2883                 if ((mp->mnt_flag & fp->o_opt) != 0) {
 2884                         sbuf_cat(&sb, fp->o_name);
 2885                         sbuf_putc(&sb, ';');
 2886                 }
 2887         }
 2888         sbuf_putc(&sb, '"');
 2889         sbuf_finish(&sb);
 2890 
 2891         /*
 2892          * Options are not published because the form of the options depends on
 2893          * the file system and may include binary data. In addition, they don't
 2894          * necessarily provide enough useful information to be actionable when
 2895          * devd processes them.
 2896          */
 2897 
 2898         if (sbuf_error(&sb) == 0)
 2899                 devctl_notify("VFS", "FS", type, sbuf_data(&sb));
 2900         sbuf_delete(&sb);
 2901         free(buf, M_MOUNT);
 2902 }
 2903 
 2904 /*
 2905  * Force remount specified mount point to read-only.  The argument
 2906  * must be busied to avoid parallel unmount attempts.
 2907  *
 2908  * Intended use is to prevent further writes if some metadata
 2909  * inconsistency is detected.  Note that the function still flushes
 2910  * all cached metadata and data for the mount point, which might be
 2911  * not always suitable.
 2912  */
 2913 int
 2914 vfs_remount_ro(struct mount *mp)
 2915 {
 2916         struct vfsoptlist *opts;
 2917         struct vfsopt *opt;
 2918         struct vnode *vp_covered, *rootvp;
 2919         int error;
 2920 
 2921         KASSERT(mp->mnt_lockref > 0,
 2922             ("vfs_remount_ro: mp %p is not busied", mp));
 2923         KASSERT((mp->mnt_kern_flag & MNTK_UNMOUNT) == 0,
 2924             ("vfs_remount_ro: mp %p is being unmounted (and busy?)", mp));
 2925 
 2926         rootvp = NULL;
 2927         vp_covered = mp->mnt_vnodecovered;
 2928         error = vget(vp_covered, LK_EXCLUSIVE | LK_NOWAIT);
 2929         if (error != 0)
 2930                 return (error);
 2931         VI_LOCK(vp_covered);
 2932         if ((vp_covered->v_iflag & VI_MOUNT) != 0) {
 2933                 VI_UNLOCK(vp_covered);
 2934                 vput(vp_covered);
 2935                 return (EBUSY);
 2936         }
 2937         vp_covered->v_iflag |= VI_MOUNT;
 2938         VI_UNLOCK(vp_covered);
 2939         vfs_op_enter(mp);
 2940         vn_seqc_write_begin(vp_covered);
 2941 
 2942         MNT_ILOCK(mp);
 2943         if ((mp->mnt_flag & MNT_RDONLY) != 0) {
 2944                 MNT_IUNLOCK(mp);
 2945                 error = EBUSY;
 2946                 goto out;
 2947         }
 2948         mp->mnt_flag |= MNT_UPDATE | MNT_FORCE | MNT_RDONLY;
 2949         rootvp = vfs_cache_root_clear(mp);
 2950         MNT_IUNLOCK(mp);
 2951 
 2952         opts = malloc(sizeof(struct vfsoptlist), M_MOUNT, M_WAITOK | M_ZERO);
 2953         TAILQ_INIT(opts);
 2954         opt = malloc(sizeof(struct vfsopt), M_MOUNT, M_WAITOK | M_ZERO);
 2955         opt->name = strdup("ro", M_MOUNT);
 2956         opt->value = NULL;
 2957         TAILQ_INSERT_TAIL(opts, opt, link);
 2958         vfs_mergeopts(opts, mp->mnt_opt);
 2959         mp->mnt_optnew = opts;
 2960 
 2961         error = VFS_MOUNT(mp);
 2962 
 2963         if (error == 0) {
 2964                 MNT_ILOCK(mp);
 2965                 mp->mnt_flag &= ~(MNT_UPDATE | MNT_FORCE);
 2966                 MNT_IUNLOCK(mp);
 2967                 vfs_deallocate_syncvnode(mp);
 2968                 if (mp->mnt_opt != NULL)
 2969                         vfs_freeopts(mp->mnt_opt);
 2970                 mp->mnt_opt = mp->mnt_optnew;
 2971         } else {
 2972                 MNT_ILOCK(mp);
 2973                 mp->mnt_flag &= ~(MNT_UPDATE | MNT_FORCE | MNT_RDONLY);
 2974                 MNT_IUNLOCK(mp);
 2975                 vfs_freeopts(mp->mnt_optnew);
 2976         }
 2977         mp->mnt_optnew = NULL;
 2978 
 2979 out:
 2980         vfs_op_exit(mp);
 2981         VI_LOCK(vp_covered);
 2982         vp_covered->v_iflag &= ~VI_MOUNT;
 2983         VI_UNLOCK(vp_covered);
 2984         vput(vp_covered);
 2985         vn_seqc_write_end(vp_covered);
 2986         if (rootvp != NULL) {
 2987                 vn_seqc_write_end(rootvp);
 2988                 vrele(rootvp);
 2989         }
 2990         return (error);
 2991 }
 2992 
 2993 /*
 2994  * Suspend write operations on all local writeable filesystems.  Does
 2995  * full sync of them in the process.
 2996  *
 2997  * Iterate over the mount points in reverse order, suspending most
 2998  * recently mounted filesystems first.  It handles a case where a
 2999  * filesystem mounted from a md(4) vnode-backed device should be
 3000  * suspended before the filesystem that owns the vnode.
 3001  */
 3002 void
 3003 suspend_all_fs(void)
 3004 {
 3005         struct mount *mp;
 3006         int error;
 3007 
 3008         mtx_lock(&mountlist_mtx);
 3009         TAILQ_FOREACH_REVERSE(mp, &mountlist, mntlist, mnt_list) {
 3010                 error = vfs_busy(mp, MBF_MNTLSTLOCK | MBF_NOWAIT);
 3011                 if (error != 0)
 3012                         continue;
 3013                 if ((mp->mnt_flag & (MNT_RDONLY | MNT_LOCAL)) != MNT_LOCAL ||
 3014                     (mp->mnt_kern_flag & MNTK_SUSPEND) != 0) {
 3015                         mtx_lock(&mountlist_mtx);
 3016                         vfs_unbusy(mp);
 3017                         continue;
 3018                 }
 3019                 error = vfs_write_suspend(mp, 0);
 3020                 if (error == 0) {
 3021                         MNT_ILOCK(mp);
 3022                         MPASS((mp->mnt_kern_flag & MNTK_SUSPEND_ALL) == 0);
 3023                         mp->mnt_kern_flag |= MNTK_SUSPEND_ALL;
 3024                         MNT_IUNLOCK(mp);
 3025                         mtx_lock(&mountlist_mtx);
 3026                 } else {
 3027                         printf("suspend of %s failed, error %d\n",
 3028                             mp->mnt_stat.f_mntonname, error);
 3029                         mtx_lock(&mountlist_mtx);
 3030                         vfs_unbusy(mp);
 3031                 }
 3032         }
 3033         mtx_unlock(&mountlist_mtx);
 3034 }
 3035 
 3036 void
 3037 resume_all_fs(void)
 3038 {
 3039         struct mount *mp;
 3040 
 3041         mtx_lock(&mountlist_mtx);
 3042         TAILQ_FOREACH(mp, &mountlist, mnt_list) {
 3043                 if ((mp->mnt_kern_flag & MNTK_SUSPEND_ALL) == 0)
 3044                         continue;
 3045                 mtx_unlock(&mountlist_mtx);
 3046                 MNT_ILOCK(mp);
 3047                 MPASS((mp->mnt_kern_flag & MNTK_SUSPEND) != 0);
 3048                 mp->mnt_kern_flag &= ~MNTK_SUSPEND_ALL;
 3049                 MNT_IUNLOCK(mp);
 3050                 vfs_write_resume(mp, 0);
 3051                 mtx_lock(&mountlist_mtx);
 3052                 vfs_unbusy(mp);
 3053         }
 3054         mtx_unlock(&mountlist_mtx);
 3055 }

Cache object: e7814fc474028e18cda091d7a7258251


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.