The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/fs/tmpfs/tmpfs_vfsops.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*      $NetBSD: tmpfs_vfsops.c,v 1.10 2005/12/11 12:24:29 christos Exp $       */
    2 
    3 /*-
    4  * Copyright (c) 2005 The NetBSD Foundation, Inc.
    5  * All rights reserved.
    6  *
    7  * This code is derived from software contributed to The NetBSD Foundation
    8  * by Julio M. Merino Vidal, developed as part of Google's Summer of Code
    9  * 2005 program.
   10  *
   11  * Redistribution and use in source and binary forms, with or without
   12  * modification, are permitted provided that the following conditions
   13  * are met:
   14  * 1. Redistributions of source code must retain the above copyright
   15  *    notice, this list of conditions and the following disclaimer.
   16  * 2. Redistributions in binary form must reproduce the above copyright
   17  *    notice, this list of conditions and the following disclaimer in the
   18  *    documentation and/or other materials provided with the distribution.
   19  *
   20  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
   21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
   22  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
   23  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
   24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
   25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
   26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
   27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
   28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
   29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
   30  * POSSIBILITY OF SUCH DAMAGE.
   31  */
   32 
   33 /*
   34  * Efficient memory file system.
   35  *
   36  * tmpfs is a file system that uses FreeBSD's virtual memory
   37  * sub-system to store file data and metadata in an efficient way.
   38  * This means that it does not follow the structure of an on-disk file
   39  * system because it simply does not need to.  Instead, it uses
   40  * memory-specific data structures and algorithms to automatically
   41  * allocate and release resources.
   42  */
   43 #include <sys/cdefs.h>
   44 __FBSDID("$FreeBSD$");
   45 
   46 #include <sys/param.h>
   47 #include <sys/limits.h>
   48 #include <sys/lock.h>
   49 #include <sys/mutex.h>
   50 #include <sys/proc.h>
   51 #include <sys/jail.h>
   52 #include <sys/kernel.h>
   53 #include <sys/rwlock.h>
   54 #include <sys/stat.h>
   55 #include <sys/sx.h>
   56 #include <sys/sysctl.h>
   57 
   58 #include <vm/vm.h>
   59 #include <vm/vm_param.h>
   60 #include <vm/pmap.h>
   61 #include <vm/vm_extern.h>
   62 #include <vm/vm_map.h>
   63 #include <vm/vm_object.h>
   64 #include <vm/vm_param.h>
   65 
   66 #include <fs/tmpfs/tmpfs.h>
   67 
   68 /*
   69  * Default permission for root node
   70  */
   71 #define TMPFS_DEFAULT_ROOT_MODE (S_IRWXU|S_IRGRP|S_IXGRP|S_IROTH|S_IXOTH)
   72 
   73 MALLOC_DEFINE(M_TMPFSMNT, "tmpfs mount", "tmpfs mount structures");
   74 MALLOC_DEFINE(M_TMPFSNAME, "tmpfs name", "tmpfs file names");
   75 
   76 static int      tmpfs_mount(struct mount *);
   77 static int      tmpfs_unmount(struct mount *, int);
   78 static int      tmpfs_root(struct mount *, int flags, struct vnode **);
   79 static int      tmpfs_fhtovp(struct mount *, struct fid *, int,
   80                     struct vnode **);
   81 static int      tmpfs_statfs(struct mount *, struct statfs *);
   82 static void     tmpfs_susp_clean(struct mount *);
   83 
   84 static const char *tmpfs_opts[] = {
   85         "from", "size", "maxfilesize", "inodes", "uid", "gid", "mode", "export",
   86         "union", "nonc", NULL
   87 };
   88 
   89 static const char *tmpfs_updateopts[] = {
   90         "from", "export", "size", NULL
   91 };
   92 
   93 static int
   94 tmpfs_node_ctor(void *mem, int size, void *arg, int flags)
   95 {
   96         struct tmpfs_node *node = (struct tmpfs_node *)mem;
   97 
   98         node->tn_gen++;
   99         node->tn_size = 0;
  100         node->tn_status = 0;
  101         node->tn_flags = 0;
  102         node->tn_links = 0;
  103         node->tn_vnode = NULL;
  104         node->tn_vpstate = 0;
  105 
  106         return (0);
  107 }
  108 
  109 static void
  110 tmpfs_node_dtor(void *mem, int size, void *arg)
  111 {
  112         struct tmpfs_node *node = (struct tmpfs_node *)mem;
  113         node->tn_type = VNON;
  114 }
  115 
  116 static int
  117 tmpfs_node_init(void *mem, int size, int flags)
  118 {
  119         struct tmpfs_node *node = (struct tmpfs_node *)mem;
  120         node->tn_id = 0;
  121 
  122         mtx_init(&node->tn_interlock, "tmpfs node interlock", NULL, MTX_DEF);
  123         node->tn_gen = arc4random();
  124 
  125         return (0);
  126 }
  127 
  128 static void
  129 tmpfs_node_fini(void *mem, int size)
  130 {
  131         struct tmpfs_node *node = (struct tmpfs_node *)mem;
  132 
  133         mtx_destroy(&node->tn_interlock);
  134 }
  135 
  136 /*
  137  * Handle updates of time from writes to mmaped regions.  Use
  138  * MNT_VNODE_FOREACH_ALL instead of MNT_VNODE_FOREACH_ACTIVE, since
  139  * unmap of the tmpfs-backed vnode does not call vinactive(), due to
  140  * vm object type is OBJT_SWAP.
  141  * If lazy, only handle delayed update of mtime due to the writes to
  142  * mapped files.
  143  */
  144 static void
  145 tmpfs_update_mtime(struct mount *mp, bool lazy)
  146 {
  147         struct vnode *vp, *mvp;
  148         struct vm_object *obj;
  149 
  150         MNT_VNODE_FOREACH_ALL(vp, mp, mvp) {
  151                 if (vp->v_type != VREG) {
  152                         VI_UNLOCK(vp);
  153                         continue;
  154                 }
  155                 obj = vp->v_object;
  156                 KASSERT((obj->flags & (OBJ_TMPFS_NODE | OBJ_TMPFS)) ==
  157                     (OBJ_TMPFS_NODE | OBJ_TMPFS), ("non-tmpfs obj"));
  158 
  159                 /*
  160                  * In lazy case, do unlocked read, avoid taking vnode
  161                  * lock if not needed.  Lost update will be handled on
  162                  * the next call.
  163                  * For non-lazy case, we must flush all pending
  164                  * metadata changes now.
  165                  */
  166                 if (!lazy || (obj->flags & OBJ_TMPFS_DIRTY) != 0) {
  167                         if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK,
  168                             curthread) != 0)
  169                                 continue;
  170                         tmpfs_check_mtime(vp);
  171                         if (!lazy)
  172                                 tmpfs_update(vp);
  173                         vput(vp);
  174                 } else {
  175                         VI_UNLOCK(vp);
  176                         continue;
  177                 }
  178         }
  179 }
  180 
  181 struct tmpfs_check_rw_maps_arg {
  182         bool found;
  183 };
  184 
  185 static bool
  186 tmpfs_check_rw_maps_cb(struct mount *mp __unused, vm_map_t map __unused,
  187     vm_map_entry_t entry __unused, void *arg)
  188 {
  189         struct tmpfs_check_rw_maps_arg *a;
  190 
  191         a = arg;
  192         a->found = true;
  193         return (true);
  194 }
  195 
  196 /*
  197  * Revoke write permissions from all mappings of regular files
  198  * belonging to the specified tmpfs mount.
  199  */
  200 static bool
  201 tmpfs_revoke_rw_maps_cb(struct mount *mp __unused, vm_map_t map,
  202     vm_map_entry_t entry, void *arg __unused)
  203 {
  204 
  205         /*
  206          * XXXKIB: might be invalidate the mapping
  207          * instead ?  The process is not going to be
  208          * happy in any case.
  209          */
  210         entry->max_protection &= ~VM_PROT_WRITE;
  211         if ((entry->protection & VM_PROT_WRITE) != 0) {
  212                 entry->protection &= ~VM_PROT_WRITE;
  213                 pmap_protect(map->pmap, entry->start, entry->end,
  214                     entry->protection);
  215         }
  216         return (false);
  217 }
  218 
  219 static void
  220 tmpfs_all_rw_maps(struct mount *mp, bool (*cb)(struct mount *mp, vm_map_t,
  221     vm_map_entry_t, void *), void *cb_arg)
  222 {
  223         struct proc *p;
  224         struct vmspace *vm;
  225         vm_map_t map;
  226         vm_map_entry_t entry;
  227         vm_object_t object;
  228         struct vnode *vp;
  229         int gen;
  230         bool terminate;
  231 
  232         terminate = false;
  233         sx_slock(&allproc_lock);
  234 again:
  235         gen = allproc_gen;
  236         FOREACH_PROC_IN_SYSTEM(p) {
  237                 PROC_LOCK(p);
  238                 if (p->p_state != PRS_NORMAL || (p->p_flag & (P_INEXEC |
  239                     P_SYSTEM | P_WEXIT)) != 0) {
  240                         PROC_UNLOCK(p);
  241                         continue;
  242                 }
  243                 vm = vmspace_acquire_ref(p);
  244                 _PHOLD_LITE(p);
  245                 PROC_UNLOCK(p);
  246                 if (vm == NULL) {
  247                         PRELE(p);
  248                         continue;
  249                 }
  250                 sx_sunlock(&allproc_lock);
  251                 map = &vm->vm_map;
  252 
  253                 vm_map_lock(map);
  254                 if (map->busy)
  255                         vm_map_wait_busy(map);
  256                 for (entry = map->header.next; entry != &map->header;
  257                     entry = entry->next) {
  258                         if ((entry->eflags & (MAP_ENTRY_GUARD |
  259                             MAP_ENTRY_IS_SUB_MAP | MAP_ENTRY_COW)) != 0 ||
  260                             (entry->max_protection & VM_PROT_WRITE) == 0)
  261                                 continue;
  262                         object = entry->object.vm_object;
  263                         if (object == NULL || object->type != OBJT_SWAP ||
  264                             (object->flags & OBJ_TMPFS_NODE) == 0)
  265                                 continue;
  266                         /*
  267                          * No need to dig into shadow chain, mapping
  268                          * of the object not at top is readonly.
  269                          */
  270 
  271                         VM_OBJECT_RLOCK(object);
  272                         if (object->type == OBJT_DEAD) {
  273                                 VM_OBJECT_RUNLOCK(object);
  274                                 continue;
  275                         }
  276                         MPASS(object->ref_count > 1);
  277                         if ((object->flags & (OBJ_TMPFS_NODE | OBJ_TMPFS)) !=
  278                             (OBJ_TMPFS_NODE | OBJ_TMPFS)) {
  279                                 VM_OBJECT_RUNLOCK(object);
  280                                 continue;
  281                         }
  282                         vp = object->un_pager.swp.swp_tmpfs;
  283                         if (vp->v_mount != mp) {
  284                                 VM_OBJECT_RUNLOCK(object);
  285                                 continue;
  286                         }
  287 
  288                         terminate = cb(mp, map, entry, cb_arg);
  289                         VM_OBJECT_RUNLOCK(object);
  290                         if (terminate)
  291                                 break;
  292                 }
  293                 vm_map_unlock(map);
  294 
  295                 vmspace_free(vm);
  296                 sx_slock(&allproc_lock);
  297                 PRELE(p);
  298                 if (terminate)
  299                         break;
  300         }
  301         if (!terminate && gen != allproc_gen)
  302                 goto again;
  303         sx_sunlock(&allproc_lock);
  304 }
  305 
  306 static bool
  307 tmpfs_check_rw_maps(struct mount *mp)
  308 {
  309         struct tmpfs_check_rw_maps_arg ca;
  310 
  311         ca.found = false;
  312         tmpfs_all_rw_maps(mp, tmpfs_check_rw_maps_cb, &ca);
  313         return (ca.found);
  314 }
  315 
  316 static int
  317 tmpfs_rw_to_ro(struct mount *mp)
  318 {
  319         int error, flags;
  320         bool forced;
  321 
  322         forced = (mp->mnt_flag & MNT_FORCE) != 0;
  323         flags = WRITECLOSE | (forced ? FORCECLOSE : 0);
  324 
  325         if ((error = vn_start_write(NULL, &mp, V_WAIT)) != 0)
  326                 return (error);
  327         error = vfs_write_suspend_umnt(mp);
  328         if (error != 0)
  329                 return (error);
  330         if (!forced && tmpfs_check_rw_maps(mp)) {
  331                 error = EBUSY;
  332                 goto out;
  333         }
  334         VFS_TO_TMPFS(mp)->tm_ronly = 1;
  335         MNT_ILOCK(mp);
  336         mp->mnt_flag |= MNT_RDONLY;
  337         MNT_IUNLOCK(mp);
  338         for (;;) {
  339                 tmpfs_all_rw_maps(mp, tmpfs_revoke_rw_maps_cb, NULL);
  340                 tmpfs_update_mtime(mp, false);
  341                 error = vflush(mp, 0, flags, curthread);
  342                 if (error != 0) {
  343                         VFS_TO_TMPFS(mp)->tm_ronly = 0;
  344                         MNT_ILOCK(mp);
  345                         mp->mnt_flag &= ~MNT_RDONLY;
  346                         MNT_IUNLOCK(mp);
  347                         goto out;
  348                 }
  349                 if (!tmpfs_check_rw_maps(mp))
  350                         break;
  351         }
  352 out:
  353         vfs_write_resume(mp, 0);
  354         return (error);
  355 }
  356 
  357 static int
  358 tmpfs_mount(struct mount *mp)
  359 {
  360         const size_t nodes_per_page = howmany(PAGE_SIZE,
  361             sizeof(struct tmpfs_dirent) + sizeof(struct tmpfs_node));
  362         struct tmpfs_mount *tmp;
  363         struct tmpfs_node *root;
  364         struct thread *td = curthread;
  365         int error;
  366         bool nonc;
  367         /* Size counters. */
  368         u_quad_t pages;
  369         off_t nodes_max, size_max, maxfilesize;
  370 
  371         /* Root node attributes. */
  372         uid_t root_uid;
  373         gid_t root_gid;
  374         mode_t root_mode;
  375 
  376         struct vattr va;
  377 
  378         if (!prison_allow(td->td_ucred, PR_ALLOW_MOUNT_TMPFS))
  379                 return (EPERM);
  380 
  381         if (vfs_filteropt(mp->mnt_optnew, tmpfs_opts))
  382                 return (EINVAL);
  383 
  384         if (mp->mnt_flag & MNT_UPDATE) {
  385                 /* Only support update mounts for certain options. */
  386                 if (vfs_filteropt(mp->mnt_optnew, tmpfs_updateopts) != 0)
  387                         return (EOPNOTSUPP);
  388                 if (vfs_getopt_size(mp->mnt_optnew, "size", &size_max) == 0) {
  389                         /*
  390                          * On-the-fly resizing is not supported (yet). We still
  391                          * need to have "size" listed as "supported", otherwise
  392                          * trying to update fs that is listed in fstab with size
  393                          * parameter, say trying to change rw to ro or vice
  394                          * versa, would cause vfs_filteropt() to bail.
  395                          */
  396                         if (size_max != VFS_TO_TMPFS(mp)->tm_size_max)
  397                                 return (EOPNOTSUPP);
  398                 }
  399                 if (vfs_flagopt(mp->mnt_optnew, "ro", NULL, 0) &&
  400                     !(VFS_TO_TMPFS(mp)->tm_ronly)) {
  401                         /* RW -> RO */
  402                         return (tmpfs_rw_to_ro(mp));
  403                 } else if (!vfs_flagopt(mp->mnt_optnew, "ro", NULL, 0) &&
  404                     VFS_TO_TMPFS(mp)->tm_ronly) {
  405                         /* RO -> RW */
  406                         VFS_TO_TMPFS(mp)->tm_ronly = 0;
  407                         MNT_ILOCK(mp);
  408                         mp->mnt_flag &= ~MNT_RDONLY;
  409                         MNT_IUNLOCK(mp);
  410                 }
  411                 return (0);
  412         }
  413 
  414         vn_lock(mp->mnt_vnodecovered, LK_SHARED | LK_RETRY);
  415         error = VOP_GETATTR(mp->mnt_vnodecovered, &va, mp->mnt_cred);
  416         VOP_UNLOCK(mp->mnt_vnodecovered, 0);
  417         if (error)
  418                 return (error);
  419 
  420         if (mp->mnt_cred->cr_ruid != 0 ||
  421             vfs_scanopt(mp->mnt_optnew, "gid", "%d", &root_gid) != 1)
  422                 root_gid = va.va_gid;
  423         if (mp->mnt_cred->cr_ruid != 0 ||
  424             vfs_scanopt(mp->mnt_optnew, "uid", "%d", &root_uid) != 1)
  425                 root_uid = va.va_uid;
  426         if (mp->mnt_cred->cr_ruid != 0 ||
  427             vfs_scanopt(mp->mnt_optnew, "mode", "%ho", &root_mode) != 1)
  428                 root_mode = va.va_mode;
  429         if (vfs_getopt_size(mp->mnt_optnew, "inodes", &nodes_max) != 0)
  430                 nodes_max = 0;
  431         if (vfs_getopt_size(mp->mnt_optnew, "size", &size_max) != 0)
  432                 size_max = 0;
  433         if (vfs_getopt_size(mp->mnt_optnew, "maxfilesize", &maxfilesize) != 0)
  434                 maxfilesize = 0;
  435         nonc = vfs_getopt(mp->mnt_optnew, "nonc", NULL, NULL) == 0;
  436 
  437         /* Do not allow mounts if we do not have enough memory to preserve
  438          * the minimum reserved pages. */
  439         if (tmpfs_mem_avail() < TMPFS_PAGES_MINRESERVED)
  440                 return (ENOSPC);
  441 
  442         /* Get the maximum number of memory pages this file system is
  443          * allowed to use, based on the maximum size the user passed in
  444          * the mount structure.  A value of zero is treated as if the
  445          * maximum available space was requested. */
  446         if (size_max == 0 || size_max > OFF_MAX - PAGE_SIZE ||
  447             (SIZE_MAX < OFF_MAX && size_max / PAGE_SIZE >= SIZE_MAX))
  448                 pages = SIZE_MAX;
  449         else {
  450                 size_max = roundup(size_max, PAGE_SIZE);
  451                 pages = howmany(size_max, PAGE_SIZE);
  452         }
  453         MPASS(pages > 0);
  454 
  455         if (nodes_max <= 3) {
  456                 if (pages < INT_MAX / nodes_per_page)
  457                         nodes_max = pages * nodes_per_page;
  458                 else
  459                         nodes_max = INT_MAX;
  460         }
  461         if (nodes_max > INT_MAX)
  462                 nodes_max = INT_MAX;
  463         MPASS(nodes_max >= 3);
  464 
  465         /* Allocate the tmpfs mount structure and fill it. */
  466         tmp = (struct tmpfs_mount *)malloc(sizeof(struct tmpfs_mount),
  467             M_TMPFSMNT, M_WAITOK | M_ZERO);
  468 
  469         mtx_init(&tmp->tm_allnode_lock, "tmpfs allnode lock", NULL, MTX_DEF);
  470         tmp->tm_nodes_max = nodes_max;
  471         tmp->tm_nodes_inuse = 0;
  472         tmp->tm_refcount = 1;
  473         tmp->tm_maxfilesize = maxfilesize > 0 ? maxfilesize : OFF_MAX;
  474         LIST_INIT(&tmp->tm_nodes_used);
  475 
  476         tmp->tm_size_max = size_max;
  477         tmp->tm_pages_max = pages;
  478         tmp->tm_pages_used = 0;
  479         tmp->tm_ino_unr = new_unrhdr(2, INT_MAX, &tmp->tm_allnode_lock);
  480         tmp->tm_dirent_pool = uma_zcreate("TMPFS dirent",
  481             sizeof(struct tmpfs_dirent), NULL, NULL, NULL, NULL,
  482             UMA_ALIGN_PTR, 0);
  483         tmp->tm_node_pool = uma_zcreate("TMPFS node",
  484             sizeof(struct tmpfs_node), tmpfs_node_ctor, tmpfs_node_dtor,
  485             tmpfs_node_init, tmpfs_node_fini, UMA_ALIGN_PTR, 0);
  486         tmp->tm_ronly = (mp->mnt_flag & MNT_RDONLY) != 0;
  487         tmp->tm_nonc = nonc;
  488 
  489         /* Allocate the root node. */
  490         error = tmpfs_alloc_node(mp, tmp, VDIR, root_uid, root_gid,
  491             root_mode & ALLPERMS, NULL, NULL, VNOVAL, &root);
  492 
  493         if (error != 0 || root == NULL) {
  494                 uma_zdestroy(tmp->tm_node_pool);
  495                 uma_zdestroy(tmp->tm_dirent_pool);
  496                 delete_unrhdr(tmp->tm_ino_unr);
  497                 free(tmp, M_TMPFSMNT);
  498                 return (error);
  499         }
  500         KASSERT(root->tn_id == 2,
  501             ("tmpfs root with invalid ino: %ju", (uintmax_t)root->tn_id));
  502         tmp->tm_root = root;
  503 
  504         MNT_ILOCK(mp);
  505         mp->mnt_flag |= MNT_LOCAL;
  506         mp->mnt_kern_flag |= MNTK_LOOKUP_SHARED | MNTK_EXTENDED_SHARED;
  507         MNT_IUNLOCK(mp);
  508 
  509         mp->mnt_data = tmp;
  510         mp->mnt_stat.f_namemax = MAXNAMLEN;
  511         vfs_getnewfsid(mp);
  512         vfs_mountedfrom(mp, "tmpfs");
  513 
  514         return 0;
  515 }
  516 
  517 /* ARGSUSED2 */
  518 static int
  519 tmpfs_unmount(struct mount *mp, int mntflags)
  520 {
  521         struct tmpfs_mount *tmp;
  522         struct tmpfs_node *node;
  523         int error, flags;
  524 
  525         flags = (mntflags & MNT_FORCE) != 0 ? FORCECLOSE : 0;
  526         tmp = VFS_TO_TMPFS(mp);
  527 
  528         /* Stop writers */
  529         error = vfs_write_suspend_umnt(mp);
  530         if (error != 0)
  531                 return (error);
  532         /*
  533          * At this point, nodes cannot be destroyed by any other
  534          * thread because write suspension is started.
  535          */
  536 
  537         for (;;) {
  538                 error = vflush(mp, 0, flags, curthread);
  539                 if (error != 0) {
  540                         vfs_write_resume(mp, VR_START_WRITE);
  541                         return (error);
  542                 }
  543                 MNT_ILOCK(mp);
  544                 if (mp->mnt_nvnodelistsize == 0) {
  545                         MNT_IUNLOCK(mp);
  546                         break;
  547                 }
  548                 MNT_IUNLOCK(mp);
  549                 if ((mntflags & MNT_FORCE) == 0) {
  550                         vfs_write_resume(mp, VR_START_WRITE);
  551                         return (EBUSY);
  552                 }
  553         }
  554 
  555         TMPFS_LOCK(tmp);
  556         while ((node = LIST_FIRST(&tmp->tm_nodes_used)) != NULL) {
  557                 TMPFS_NODE_LOCK(node);
  558                 if (node->tn_type == VDIR)
  559                         tmpfs_dir_destroy(tmp, node);
  560                 if (tmpfs_free_node_locked(tmp, node, true))
  561                         TMPFS_LOCK(tmp);
  562                 else
  563                         TMPFS_NODE_UNLOCK(node);
  564         }
  565 
  566         mp->mnt_data = NULL;
  567         tmpfs_free_tmp(tmp);
  568         vfs_write_resume(mp, VR_START_WRITE);
  569 
  570         MNT_ILOCK(mp);
  571         mp->mnt_flag &= ~MNT_LOCAL;
  572         MNT_IUNLOCK(mp);
  573 
  574         return (0);
  575 }
  576 
  577 void
  578 tmpfs_free_tmp(struct tmpfs_mount *tmp)
  579 {
  580 
  581         MPASS(tmp->tm_refcount > 0);
  582         tmp->tm_refcount--;
  583         if (tmp->tm_refcount > 0) {
  584                 TMPFS_UNLOCK(tmp);
  585                 return;
  586         }
  587         TMPFS_UNLOCK(tmp);
  588 
  589         uma_zdestroy(tmp->tm_dirent_pool);
  590         uma_zdestroy(tmp->tm_node_pool);
  591         delete_unrhdr(tmp->tm_ino_unr);
  592 
  593         mtx_destroy(&tmp->tm_allnode_lock);
  594         MPASS(tmp->tm_pages_used == 0);
  595         MPASS(tmp->tm_nodes_inuse == 0);
  596 
  597         free(tmp, M_TMPFSMNT);
  598 }
  599 
  600 static int
  601 tmpfs_root(struct mount *mp, int flags, struct vnode **vpp)
  602 {
  603         int error;
  604 
  605         error = tmpfs_alloc_vp(mp, VFS_TO_TMPFS(mp)->tm_root, flags, vpp);
  606         if (error == 0)
  607                 (*vpp)->v_vflag |= VV_ROOT;
  608         return (error);
  609 }
  610 
  611 static int
  612 tmpfs_fhtovp(struct mount *mp, struct fid *fhp, int flags,
  613     struct vnode **vpp)
  614 {
  615         struct tmpfs_fid *tfhp;
  616         struct tmpfs_mount *tmp;
  617         struct tmpfs_node *node;
  618         int error;
  619 
  620         tmp = VFS_TO_TMPFS(mp);
  621 
  622         tfhp = (struct tmpfs_fid *)fhp;
  623         if (tfhp->tf_len != sizeof(struct tmpfs_fid))
  624                 return (EINVAL);
  625 
  626         if (tfhp->tf_id >= tmp->tm_nodes_max)
  627                 return (EINVAL);
  628 
  629         TMPFS_LOCK(tmp);
  630         LIST_FOREACH(node, &tmp->tm_nodes_used, tn_entries) {
  631                 if (node->tn_id == tfhp->tf_id &&
  632                     node->tn_gen == tfhp->tf_gen) {
  633                         tmpfs_ref_node(node);
  634                         break;
  635                 }
  636         }
  637         TMPFS_UNLOCK(tmp);
  638 
  639         if (node != NULL) {
  640                 error = tmpfs_alloc_vp(mp, node, LK_EXCLUSIVE, vpp);
  641                 tmpfs_free_node(tmp, node);
  642         } else
  643                 error = EINVAL;
  644         return (error);
  645 }
  646 
  647 /* ARGSUSED2 */
  648 static int
  649 tmpfs_statfs(struct mount *mp, struct statfs *sbp)
  650 {
  651         struct tmpfs_mount *tmp;
  652         size_t used;
  653 
  654         tmp = VFS_TO_TMPFS(mp);
  655 
  656         sbp->f_iosize = PAGE_SIZE;
  657         sbp->f_bsize = PAGE_SIZE;
  658 
  659         used = tmpfs_pages_used(tmp);
  660         if (tmp->tm_pages_max != ULONG_MAX)
  661                  sbp->f_blocks = tmp->tm_pages_max;
  662         else
  663                  sbp->f_blocks = used + tmpfs_mem_avail();
  664         if (sbp->f_blocks <= used)
  665                 sbp->f_bavail = 0;
  666         else
  667                 sbp->f_bavail = sbp->f_blocks - used;
  668         sbp->f_bfree = sbp->f_bavail;
  669         used = tmp->tm_nodes_inuse;
  670         sbp->f_files = tmp->tm_nodes_max;
  671         if (sbp->f_files <= used)
  672                 sbp->f_ffree = 0;
  673         else
  674                 sbp->f_ffree = sbp->f_files - used;
  675         /* sbp->f_owner = tmp->tn_uid; */
  676 
  677         return 0;
  678 }
  679 
  680 static int
  681 tmpfs_sync(struct mount *mp, int waitfor)
  682 {
  683 
  684         if (waitfor == MNT_SUSPEND) {
  685                 MNT_ILOCK(mp);
  686                 mp->mnt_kern_flag |= MNTK_SUSPEND2 | MNTK_SUSPENDED;
  687                 MNT_IUNLOCK(mp);
  688         } else if (waitfor == MNT_LAZY) {
  689                 tmpfs_update_mtime(mp, true);
  690         }
  691         return (0);
  692 }
  693 
  694 /*
  695  * The presence of a susp_clean method tells the VFS to track writes.
  696  */
  697 static void
  698 tmpfs_susp_clean(struct mount *mp __unused)
  699 {
  700 }
  701 
  702 /*
  703  * tmpfs vfs operations.
  704  */
  705 
  706 struct vfsops tmpfs_vfsops = {
  707         .vfs_mount =                    tmpfs_mount,
  708         .vfs_unmount =                  tmpfs_unmount,
  709         .vfs_root =                     tmpfs_root,
  710         .vfs_statfs =                   tmpfs_statfs,
  711         .vfs_fhtovp =                   tmpfs_fhtovp,
  712         .vfs_sync =                     tmpfs_sync,
  713         .vfs_susp_clean =               tmpfs_susp_clean,
  714 };
  715 VFS_SET(tmpfs_vfsops, tmpfs, VFCF_JAIL);

Cache object: 660bdd8337d0589cc5075e88ce9921b5


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.