The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/fs/tmpfs/tmpfs_subr.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*      $NetBSD: tmpfs_subr.c,v 1.35 2007/07/09 21:10:50 ad Exp $       */
    2 
    3 /*-
    4  * SPDX-License-Identifier: BSD-2-Clause-NetBSD
    5  *
    6  * Copyright (c) 2005 The NetBSD Foundation, Inc.
    7  * All rights reserved.
    8  *
    9  * This code is derived from software contributed to The NetBSD Foundation
   10  * by Julio M. Merino Vidal, developed as part of Google's Summer of Code
   11  * 2005 program.
   12  *
   13  * Redistribution and use in source and binary forms, with or without
   14  * modification, are permitted provided that the following conditions
   15  * are met:
   16  * 1. Redistributions of source code must retain the above copyright
   17  *    notice, this list of conditions and the following disclaimer.
   18  * 2. Redistributions in binary form must reproduce the above copyright
   19  *    notice, this list of conditions and the following disclaimer in the
   20  *    documentation and/or other materials provided with the distribution.
   21  *
   22  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
   23  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
   24  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
   25  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
   26  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
   27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
   28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
   29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
   30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
   31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
   32  * POSSIBILITY OF SUCH DAMAGE.
   33  */
   34 
   35 /*
   36  * Efficient memory file system supporting functions.
   37  */
   38 #include <sys/cdefs.h>
   39 __FBSDID("$FreeBSD$");
   40 
   41 #include <sys/param.h>
   42 #include <sys/systm.h>
   43 #include <sys/dirent.h>
   44 #include <sys/fnv_hash.h>
   45 #include <sys/lock.h>
   46 #include <sys/limits.h>
   47 #include <sys/mount.h>
   48 #include <sys/namei.h>
   49 #include <sys/priv.h>
   50 #include <sys/proc.h>
   51 #include <sys/random.h>
   52 #include <sys/refcount.h>
   53 #include <sys/rwlock.h>
   54 #include <sys/smr.h>
   55 #include <sys/stat.h>
   56 #include <sys/sysctl.h>
   57 #include <sys/vnode.h>
   58 #include <sys/vmmeter.h>
   59 
   60 #include <vm/vm.h>
   61 #include <vm/vm_param.h>
   62 #include <vm/vm_object.h>
   63 #include <vm/vm_page.h>
   64 #include <vm/vm_pageout.h>
   65 #include <vm/vm_pager.h>
   66 #include <vm/vm_extern.h>
   67 #include <vm/swap_pager.h>
   68 
   69 #include <fs/tmpfs/tmpfs.h>
   70 #include <fs/tmpfs/tmpfs_fifoops.h>
   71 #include <fs/tmpfs/tmpfs_vnops.h>
   72 
   73 SYSCTL_NODE(_vfs, OID_AUTO, tmpfs, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
   74     "tmpfs file system");
   75 
   76 static long tmpfs_pages_reserved = TMPFS_PAGES_MINRESERVED;
   77 
   78 MALLOC_DEFINE(M_TMPFSDIR, "tmpfs dir", "tmpfs dirent structure");
   79 static uma_zone_t tmpfs_node_pool;
   80 VFS_SMR_DECLARE;
   81 
   82 static int
   83 tmpfs_node_ctor(void *mem, int size, void *arg, int flags)
   84 {
   85         struct tmpfs_node *node;
   86 
   87         node = mem;
   88         node->tn_gen++;
   89         node->tn_size = 0;
   90         node->tn_status = 0;
   91         node->tn_accessed = false;
   92         node->tn_flags = 0;
   93         node->tn_links = 0;
   94         node->tn_vnode = NULL;
   95         node->tn_vpstate = 0;
   96         return (0);
   97 }
   98 
   99 static void
  100 tmpfs_node_dtor(void *mem, int size, void *arg)
  101 {
  102         struct tmpfs_node *node;
  103 
  104         node = mem;
  105         node->tn_type = VNON;
  106 }
  107 
  108 static int
  109 tmpfs_node_init(void *mem, int size, int flags)
  110 {
  111         struct tmpfs_node *node;
  112 
  113         node = mem;
  114         node->tn_id = 0;
  115         mtx_init(&node->tn_interlock, "tmpfsni", NULL, MTX_DEF);
  116         node->tn_gen = arc4random();
  117         return (0);
  118 }
  119 
  120 static void
  121 tmpfs_node_fini(void *mem, int size)
  122 {
  123         struct tmpfs_node *node;
  124 
  125         node = mem;
  126         mtx_destroy(&node->tn_interlock);
  127 }
  128 
  129 void
  130 tmpfs_subr_init(void)
  131 {
  132         tmpfs_node_pool = uma_zcreate("TMPFS node",
  133             sizeof(struct tmpfs_node), tmpfs_node_ctor, tmpfs_node_dtor,
  134             tmpfs_node_init, tmpfs_node_fini, UMA_ALIGN_PTR, 0);
  135         VFS_SMR_ZONE_SET(tmpfs_node_pool);
  136 }
  137 
  138 void
  139 tmpfs_subr_uninit(void)
  140 {
  141         uma_zdestroy(tmpfs_node_pool);
  142 }
  143 
  144 static int
  145 sysctl_mem_reserved(SYSCTL_HANDLER_ARGS)
  146 {
  147         int error;
  148         long pages, bytes;
  149 
  150         pages = *(long *)arg1;
  151         bytes = pages * PAGE_SIZE;
  152 
  153         error = sysctl_handle_long(oidp, &bytes, 0, req);
  154         if (error || !req->newptr)
  155                 return (error);
  156 
  157         pages = bytes / PAGE_SIZE;
  158         if (pages < TMPFS_PAGES_MINRESERVED)
  159                 return (EINVAL);
  160 
  161         *(long *)arg1 = pages;
  162         return (0);
  163 }
  164 
  165 SYSCTL_PROC(_vfs_tmpfs, OID_AUTO, memory_reserved,
  166     CTLTYPE_LONG|CTLFLAG_MPSAFE|CTLFLAG_RW, &tmpfs_pages_reserved, 0,
  167     sysctl_mem_reserved, "L",
  168     "Amount of available memory and swap below which tmpfs growth stops");
  169 
  170 static __inline int tmpfs_dirtree_cmp(struct tmpfs_dirent *a,
  171     struct tmpfs_dirent *b);
  172 RB_PROTOTYPE_STATIC(tmpfs_dir, tmpfs_dirent, uh.td_entries, tmpfs_dirtree_cmp);
  173 
  174 size_t
  175 tmpfs_mem_avail(void)
  176 {
  177         size_t avail;
  178         long reserved;
  179 
  180         avail = swap_pager_avail + vm_free_count();
  181         reserved = atomic_load_long(&tmpfs_pages_reserved);
  182         if (__predict_false(avail < reserved))
  183                 return (0);
  184         return (avail - reserved);
  185 }
  186 
  187 size_t
  188 tmpfs_pages_used(struct tmpfs_mount *tmp)
  189 {
  190         const size_t node_size = sizeof(struct tmpfs_node) +
  191             sizeof(struct tmpfs_dirent);
  192         size_t meta_pages;
  193 
  194         meta_pages = howmany((uintmax_t)tmp->tm_nodes_inuse * node_size,
  195             PAGE_SIZE);
  196         return (meta_pages + tmp->tm_pages_used);
  197 }
  198 
  199 static size_t
  200 tmpfs_pages_check_avail(struct tmpfs_mount *tmp, size_t req_pages)
  201 {
  202         if (tmpfs_mem_avail() < req_pages)
  203                 return (0);
  204 
  205         if (tmp->tm_pages_max != ULONG_MAX &&
  206             tmp->tm_pages_max < req_pages + tmpfs_pages_used(tmp))
  207                         return (0);
  208 
  209         return (1);
  210 }
  211 
  212 void
  213 tmpfs_ref_node(struct tmpfs_node *node)
  214 {
  215 #ifdef INVARIANTS
  216         u_int old;
  217 
  218         old =
  219 #endif
  220         refcount_acquire(&node->tn_refcount);
  221 #ifdef INVARIANTS
  222         KASSERT(old > 0, ("node %p zero refcount", node));
  223 #endif
  224 }
  225 
  226 /*
  227  * Allocates a new node of type 'type' inside the 'tmp' mount point, with
  228  * its owner set to 'uid', its group to 'gid' and its mode set to 'mode',
  229  * using the credentials of the process 'p'.
  230  *
  231  * If the node type is set to 'VDIR', then the parent parameter must point
  232  * to the parent directory of the node being created.  It may only be NULL
  233  * while allocating the root node.
  234  *
  235  * If the node type is set to 'VBLK' or 'VCHR', then the rdev parameter
  236  * specifies the device the node represents.
  237  *
  238  * If the node type is set to 'VLNK', then the parameter target specifies
  239  * the file name of the target file for the symbolic link that is being
  240  * created.
  241  *
  242  * Note that new nodes are retrieved from the available list if it has
  243  * items or, if it is empty, from the node pool as long as there is enough
  244  * space to create them.
  245  *
  246  * Returns zero on success or an appropriate error code on failure.
  247  */
  248 int
  249 tmpfs_alloc_node(struct mount *mp, struct tmpfs_mount *tmp, enum vtype type,
  250     uid_t uid, gid_t gid, mode_t mode, struct tmpfs_node *parent,
  251     const char *target, dev_t rdev, struct tmpfs_node **node)
  252 {
  253         struct tmpfs_node *nnode;
  254         vm_object_t obj;
  255         char *symlink;
  256         char symlink_smr;
  257 
  258         /* If the root directory of the 'tmp' file system is not yet
  259          * allocated, this must be the request to do it. */
  260         MPASS(IMPLIES(tmp->tm_root == NULL, parent == NULL && type == VDIR));
  261 
  262         MPASS(IFF(type == VLNK, target != NULL));
  263         MPASS(IFF(type == VBLK || type == VCHR, rdev != VNOVAL));
  264 
  265         if (tmp->tm_nodes_inuse >= tmp->tm_nodes_max)
  266                 return (ENOSPC);
  267         if (tmpfs_pages_check_avail(tmp, 1) == 0)
  268                 return (ENOSPC);
  269 
  270         if ((mp->mnt_kern_flag & MNTK_UNMOUNT) != 0) {
  271                 /*
  272                  * When a new tmpfs node is created for fully
  273                  * constructed mount point, there must be a parent
  274                  * node, which vnode is locked exclusively.  As
  275                  * consequence, if the unmount is executing in
  276                  * parallel, vflush() cannot reclaim the parent vnode.
  277                  * Due to this, the check for MNTK_UNMOUNT flag is not
  278                  * racy: if we did not see MNTK_UNMOUNT flag, then tmp
  279                  * cannot be destroyed until node construction is
  280                  * finished and the parent vnode unlocked.
  281                  *
  282                  * Tmpfs does not need to instantiate new nodes during
  283                  * unmount.
  284                  */
  285                 return (EBUSY);
  286         }
  287         if ((mp->mnt_kern_flag & MNT_RDONLY) != 0)
  288                 return (EROFS);
  289 
  290         nnode = uma_zalloc_smr(tmpfs_node_pool, M_WAITOK);
  291 
  292         /* Generic initialization. */
  293         nnode->tn_type = type;
  294         vfs_timestamp(&nnode->tn_atime);
  295         nnode->tn_birthtime = nnode->tn_ctime = nnode->tn_mtime =
  296             nnode->tn_atime;
  297         nnode->tn_uid = uid;
  298         nnode->tn_gid = gid;
  299         nnode->tn_mode = mode;
  300         nnode->tn_id = alloc_unr64(&tmp->tm_ino_unr);
  301         nnode->tn_refcount = 1;
  302 
  303         /* Type-specific initialization. */
  304         switch (nnode->tn_type) {
  305         case VBLK:
  306         case VCHR:
  307                 nnode->tn_rdev = rdev;
  308                 break;
  309 
  310         case VDIR:
  311                 RB_INIT(&nnode->tn_dir.tn_dirhead);
  312                 LIST_INIT(&nnode->tn_dir.tn_dupindex);
  313                 MPASS(parent != nnode);
  314                 MPASS(IMPLIES(parent == NULL, tmp->tm_root == NULL));
  315                 nnode->tn_dir.tn_parent = (parent == NULL) ? nnode : parent;
  316                 nnode->tn_dir.tn_readdir_lastn = 0;
  317                 nnode->tn_dir.tn_readdir_lastp = NULL;
  318                 nnode->tn_links++;
  319                 TMPFS_NODE_LOCK(nnode->tn_dir.tn_parent);
  320                 nnode->tn_dir.tn_parent->tn_links++;
  321                 TMPFS_NODE_UNLOCK(nnode->tn_dir.tn_parent);
  322                 break;
  323 
  324         case VFIFO:
  325                 /* FALLTHROUGH */
  326         case VSOCK:
  327                 break;
  328 
  329         case VLNK:
  330                 MPASS(strlen(target) < MAXPATHLEN);
  331                 nnode->tn_size = strlen(target);
  332 
  333                 symlink = NULL;
  334                 if (!tmp->tm_nonc) {
  335                         symlink = cache_symlink_alloc(nnode->tn_size + 1, M_WAITOK);
  336                         symlink_smr = true;
  337                 }
  338                 if (symlink == NULL) {
  339                         symlink = malloc(nnode->tn_size + 1, M_TMPFSNAME, M_WAITOK);
  340                         symlink_smr = false;
  341                 }
  342                 memcpy(symlink, target, nnode->tn_size + 1);
  343 
  344                 /*
  345                  * Allow safe symlink resolving for lockless lookup.
  346                  * tmpfs_fplookup_symlink references this comment.
  347                  *
  348                  * 1. nnode is not yet visible to the world
  349                  * 2. both tn_link_target and tn_link_smr get populated
  350                  * 3. release fence publishes their content
  351                  * 4. tn_link_target content is immutable until node destruction,
  352                  *    where the pointer gets set to NULL
  353                  * 5. tn_link_smr is never changed once set
  354                  *
  355                  * As a result it is sufficient to issue load consume on the node
  356                  * pointer to also get the above content in a stable manner.
  357                  * Worst case tn_link_smr flag may be set to true despite being stale,
  358                  * while the target buffer is already cleared out.
  359                  */
  360                 atomic_store_ptr(&nnode->tn_link_target, symlink);
  361                 atomic_store_char((char *)&nnode->tn_link_smr, symlink_smr);
  362                 atomic_thread_fence_rel();
  363                 break;
  364 
  365         case VREG:
  366                 obj = nnode->tn_reg.tn_aobj =
  367                     vm_pager_allocate(OBJT_SWAP, NULL, 0, VM_PROT_DEFAULT, 0,
  368                         NULL /* XXXKIB - tmpfs needs swap reservation */);
  369                 VM_OBJECT_WLOCK(obj);
  370                 /* OBJ_TMPFS is set together with the setting of vp->v_object */
  371                 vm_object_set_flag(obj, OBJ_TMPFS_NODE);
  372                 VM_OBJECT_WUNLOCK(obj);
  373                 nnode->tn_reg.tn_tmp = tmp;
  374                 break;
  375 
  376         default:
  377                 panic("tmpfs_alloc_node: type %p %d", nnode,
  378                     (int)nnode->tn_type);
  379         }
  380 
  381         TMPFS_LOCK(tmp);
  382         LIST_INSERT_HEAD(&tmp->tm_nodes_used, nnode, tn_entries);
  383         nnode->tn_attached = true;
  384         tmp->tm_nodes_inuse++;
  385         tmp->tm_refcount++;
  386         TMPFS_UNLOCK(tmp);
  387 
  388         *node = nnode;
  389         return (0);
  390 }
  391 
  392 /*
  393  * Destroys the node pointed to by node from the file system 'tmp'.
  394  * If the node references a directory, no entries are allowed.
  395  */
  396 void
  397 tmpfs_free_node(struct tmpfs_mount *tmp, struct tmpfs_node *node)
  398 {
  399         if (refcount_release_if_not_last(&node->tn_refcount))
  400                 return;
  401 
  402         TMPFS_LOCK(tmp);
  403         TMPFS_NODE_LOCK(node);
  404         if (!tmpfs_free_node_locked(tmp, node, false)) {
  405                 TMPFS_NODE_UNLOCK(node);
  406                 TMPFS_UNLOCK(tmp);
  407         }
  408 }
  409 
  410 bool
  411 tmpfs_free_node_locked(struct tmpfs_mount *tmp, struct tmpfs_node *node,
  412     bool detach)
  413 {
  414         vm_object_t uobj;
  415         char *symlink;
  416         bool last;
  417 
  418         TMPFS_MP_ASSERT_LOCKED(tmp);
  419         TMPFS_NODE_ASSERT_LOCKED(node);
  420 
  421         last = refcount_release(&node->tn_refcount);
  422         if (node->tn_attached && (detach || last)) {
  423                 MPASS(tmp->tm_nodes_inuse > 0);
  424                 tmp->tm_nodes_inuse--;
  425                 LIST_REMOVE(node, tn_entries);
  426                 node->tn_attached = false;
  427         }
  428         if (!last)
  429                 return (false);
  430 
  431 #ifdef INVARIANTS
  432         MPASS(node->tn_vnode == NULL);
  433         MPASS((node->tn_vpstate & TMPFS_VNODE_ALLOCATING) == 0);
  434 #endif
  435         TMPFS_NODE_UNLOCK(node);
  436         TMPFS_UNLOCK(tmp);
  437 
  438         switch (node->tn_type) {
  439         case VBLK:
  440                 /* FALLTHROUGH */
  441         case VCHR:
  442                 /* FALLTHROUGH */
  443         case VDIR:
  444                 /* FALLTHROUGH */
  445         case VFIFO:
  446                 /* FALLTHROUGH */
  447         case VSOCK:
  448                 break;
  449 
  450         case VLNK:
  451                 symlink = node->tn_link_target;
  452                 atomic_store_ptr(&node->tn_link_target, NULL);
  453                 if (atomic_load_char(&node->tn_link_smr)) {
  454                         cache_symlink_free(symlink, node->tn_size + 1);
  455                 } else {
  456                         free(symlink, M_TMPFSNAME);
  457                 }
  458                 break;
  459 
  460         case VREG:
  461                 uobj = node->tn_reg.tn_aobj;
  462                 if (uobj != NULL) {
  463                         if (uobj->size != 0)
  464                                 atomic_subtract_long(&tmp->tm_pages_used, uobj->size);
  465                         KASSERT((uobj->flags & OBJ_TMPFS) == 0,
  466                             ("leaked OBJ_TMPFS node %p vm_obj %p", node, uobj));
  467                         vm_object_deallocate(uobj);
  468                 }
  469                 break;
  470 
  471         default:
  472                 panic("tmpfs_free_node: type %p %d", node, (int)node->tn_type);
  473         }
  474 
  475         uma_zfree_smr(tmpfs_node_pool, node);
  476         TMPFS_LOCK(tmp);
  477         tmpfs_free_tmp(tmp);
  478         return (true);
  479 }
  480 
  481 static __inline uint32_t
  482 tmpfs_dirent_hash(const char *name, u_int len)
  483 {
  484         uint32_t hash;
  485 
  486         hash = fnv_32_buf(name, len, FNV1_32_INIT + len) & TMPFS_DIRCOOKIE_MASK;
  487 #ifdef TMPFS_DEBUG_DIRCOOKIE_DUP
  488         hash &= 0xf;
  489 #endif
  490         if (hash < TMPFS_DIRCOOKIE_MIN)
  491                 hash += TMPFS_DIRCOOKIE_MIN;
  492 
  493         return (hash);
  494 }
  495 
  496 static __inline off_t
  497 tmpfs_dirent_cookie(struct tmpfs_dirent *de)
  498 {
  499         if (de == NULL)
  500                 return (TMPFS_DIRCOOKIE_EOF);
  501 
  502         MPASS(de->td_cookie >= TMPFS_DIRCOOKIE_MIN);
  503 
  504         return (de->td_cookie);
  505 }
  506 
  507 static __inline boolean_t
  508 tmpfs_dirent_dup(struct tmpfs_dirent *de)
  509 {
  510         return ((de->td_cookie & TMPFS_DIRCOOKIE_DUP) != 0);
  511 }
  512 
  513 static __inline boolean_t
  514 tmpfs_dirent_duphead(struct tmpfs_dirent *de)
  515 {
  516         return ((de->td_cookie & TMPFS_DIRCOOKIE_DUPHEAD) != 0);
  517 }
  518 
  519 void
  520 tmpfs_dirent_init(struct tmpfs_dirent *de, const char *name, u_int namelen)
  521 {
  522         de->td_hash = de->td_cookie = tmpfs_dirent_hash(name, namelen);
  523         memcpy(de->ud.td_name, name, namelen);
  524         de->td_namelen = namelen;
  525 }
  526 
  527 /*
  528  * Allocates a new directory entry for the node node with a name of name.
  529  * The new directory entry is returned in *de.
  530  *
  531  * The link count of node is increased by one to reflect the new object
  532  * referencing it.
  533  *
  534  * Returns zero on success or an appropriate error code on failure.
  535  */
  536 int
  537 tmpfs_alloc_dirent(struct tmpfs_mount *tmp, struct tmpfs_node *node,
  538     const char *name, u_int len, struct tmpfs_dirent **de)
  539 {
  540         struct tmpfs_dirent *nde;
  541 
  542         nde = malloc(sizeof(*nde), M_TMPFSDIR, M_WAITOK);
  543         nde->td_node = node;
  544         if (name != NULL) {
  545                 nde->ud.td_name = malloc(len, M_TMPFSNAME, M_WAITOK);
  546                 tmpfs_dirent_init(nde, name, len);
  547         } else
  548                 nde->td_namelen = 0;
  549         if (node != NULL)
  550                 node->tn_links++;
  551 
  552         *de = nde;
  553 
  554         return 0;
  555 }
  556 
  557 /*
  558  * Frees a directory entry.  It is the caller's responsibility to destroy
  559  * the node referenced by it if needed.
  560  *
  561  * The link count of node is decreased by one to reflect the removal of an
  562  * object that referenced it.  This only happens if 'node_exists' is true;
  563  * otherwise the function will not access the node referred to by the
  564  * directory entry, as it may already have been released from the outside.
  565  */
  566 void
  567 tmpfs_free_dirent(struct tmpfs_mount *tmp, struct tmpfs_dirent *de)
  568 {
  569         struct tmpfs_node *node;
  570 
  571         node = de->td_node;
  572         if (node != NULL) {
  573                 MPASS(node->tn_links > 0);
  574                 node->tn_links--;
  575         }
  576         if (!tmpfs_dirent_duphead(de) && de->ud.td_name != NULL)
  577                 free(de->ud.td_name, M_TMPFSNAME);
  578         free(de, M_TMPFSDIR);
  579 }
  580 
  581 void
  582 tmpfs_destroy_vobject(struct vnode *vp, vm_object_t obj)
  583 {
  584 
  585         ASSERT_VOP_ELOCKED(vp, "tmpfs_destroy_vobject");
  586         if (vp->v_type != VREG || obj == NULL)
  587                 return;
  588 
  589         VM_OBJECT_WLOCK(obj);
  590         VI_LOCK(vp);
  591         vm_object_clear_flag(obj, OBJ_TMPFS);
  592         obj->un_pager.swp.swp_tmpfs = NULL;
  593         if (vp->v_writecount < 0)
  594                 vp->v_writecount = 0;
  595         VI_UNLOCK(vp);
  596         VM_OBJECT_WUNLOCK(obj);
  597 }
  598 
  599 /*
  600  * Need to clear v_object for insmntque failure.
  601  */
  602 static void
  603 tmpfs_insmntque_dtr(struct vnode *vp, void *dtr_arg)
  604 {
  605 
  606         tmpfs_destroy_vobject(vp, vp->v_object);
  607         vp->v_object = NULL;
  608         vp->v_data = NULL;
  609         vp->v_op = &dead_vnodeops;
  610         vgone(vp);
  611         vput(vp);
  612 }
  613 
  614 /*
  615  * Allocates a new vnode for the node node or returns a new reference to
  616  * an existing one if the node had already a vnode referencing it.  The
  617  * resulting locked vnode is returned in *vpp.
  618  *
  619  * Returns zero on success or an appropriate error code on failure.
  620  */
  621 int
  622 tmpfs_alloc_vp(struct mount *mp, struct tmpfs_node *node, int lkflag,
  623     struct vnode **vpp)
  624 {
  625         struct vnode *vp;
  626         enum vgetstate vs;
  627         struct tmpfs_mount *tm;
  628         vm_object_t object;
  629         int error;
  630 
  631         error = 0;
  632         tm = VFS_TO_TMPFS(mp);
  633         TMPFS_NODE_LOCK(node);
  634         tmpfs_ref_node(node);
  635 loop:
  636         TMPFS_NODE_ASSERT_LOCKED(node);
  637         if ((vp = node->tn_vnode) != NULL) {
  638                 MPASS((node->tn_vpstate & TMPFS_VNODE_DOOMED) == 0);
  639                 if ((node->tn_type == VDIR && node->tn_dir.tn_parent == NULL) ||
  640                     (VN_IS_DOOMED(vp) &&
  641                      (lkflag & LK_NOWAIT) != 0)) {
  642                         TMPFS_NODE_UNLOCK(node);
  643                         error = ENOENT;
  644                         vp = NULL;
  645                         goto out;
  646                 }
  647                 if (VN_IS_DOOMED(vp)) {
  648                         node->tn_vpstate |= TMPFS_VNODE_WRECLAIM;
  649                         while ((node->tn_vpstate & TMPFS_VNODE_WRECLAIM) != 0) {
  650                                 msleep(&node->tn_vnode, TMPFS_NODE_MTX(node),
  651                                     0, "tmpfsE", 0);
  652                         }
  653                         goto loop;
  654                 }
  655                 vs = vget_prep(vp);
  656                 TMPFS_NODE_UNLOCK(node);
  657                 error = vget_finish(vp, lkflag, vs);
  658                 if (error == ENOENT) {
  659                         TMPFS_NODE_LOCK(node);
  660                         goto loop;
  661                 }
  662                 if (error != 0) {
  663                         vp = NULL;
  664                         goto out;
  665                 }
  666 
  667                 /*
  668                  * Make sure the vnode is still there after
  669                  * getting the interlock to avoid racing a free.
  670                  */
  671                 if (node->tn_vnode == NULL || node->tn_vnode != vp) {
  672                         vput(vp);
  673                         TMPFS_NODE_LOCK(node);
  674                         goto loop;
  675                 }
  676 
  677                 goto out;
  678         }
  679 
  680         if ((node->tn_vpstate & TMPFS_VNODE_DOOMED) ||
  681             (node->tn_type == VDIR && node->tn_dir.tn_parent == NULL)) {
  682                 TMPFS_NODE_UNLOCK(node);
  683                 error = ENOENT;
  684                 vp = NULL;
  685                 goto out;
  686         }
  687 
  688         /*
  689          * otherwise lock the vp list while we call getnewvnode
  690          * since that can block.
  691          */
  692         if (node->tn_vpstate & TMPFS_VNODE_ALLOCATING) {
  693                 node->tn_vpstate |= TMPFS_VNODE_WANT;
  694                 error = msleep((caddr_t) &node->tn_vpstate,
  695                     TMPFS_NODE_MTX(node), 0, "tmpfs_alloc_vp", 0);
  696                 if (error != 0)
  697                         goto out;
  698                 goto loop;
  699         } else
  700                 node->tn_vpstate |= TMPFS_VNODE_ALLOCATING;
  701 
  702         TMPFS_NODE_UNLOCK(node);
  703 
  704         /* Get a new vnode and associate it with our node. */
  705         error = getnewvnode("tmpfs", mp, VFS_TO_TMPFS(mp)->tm_nonc ?
  706             &tmpfs_vnodeop_nonc_entries : &tmpfs_vnodeop_entries, &vp);
  707         if (error != 0)
  708                 goto unlock;
  709         MPASS(vp != NULL);
  710 
  711         /* lkflag is ignored, the lock is exclusive */
  712         (void) vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
  713 
  714         vp->v_data = node;
  715         vp->v_type = node->tn_type;
  716 
  717         /* Type-specific initialization. */
  718         switch (node->tn_type) {
  719         case VBLK:
  720                 /* FALLTHROUGH */
  721         case VCHR:
  722                 /* FALLTHROUGH */
  723         case VLNK:
  724                 /* FALLTHROUGH */
  725         case VSOCK:
  726                 break;
  727         case VFIFO:
  728                 vp->v_op = &tmpfs_fifoop_entries;
  729                 break;
  730         case VREG:
  731                 object = node->tn_reg.tn_aobj;
  732                 VM_OBJECT_WLOCK(object);
  733                 VI_LOCK(vp);
  734                 KASSERT(vp->v_object == NULL, ("Not NULL v_object in tmpfs"));
  735                 vp->v_object = object;
  736                 object->un_pager.swp.swp_tmpfs = vp;
  737                 vm_object_set_flag(object, OBJ_TMPFS);
  738                 vn_irflag_set_locked(vp, VIRF_PGREAD);
  739                 VI_UNLOCK(vp);
  740                 VM_OBJECT_WUNLOCK(object);
  741                 break;
  742         case VDIR:
  743                 MPASS(node->tn_dir.tn_parent != NULL);
  744                 if (node->tn_dir.tn_parent == node)
  745                         vp->v_vflag |= VV_ROOT;
  746                 break;
  747 
  748         default:
  749                 panic("tmpfs_alloc_vp: type %p %d", node, (int)node->tn_type);
  750         }
  751         if (vp->v_type != VFIFO)
  752                 VN_LOCK_ASHARE(vp);
  753 
  754         error = insmntque1(vp, mp, tmpfs_insmntque_dtr, NULL);
  755         if (error != 0)
  756                 vp = NULL;
  757 
  758 unlock:
  759         TMPFS_NODE_LOCK(node);
  760 
  761         MPASS(node->tn_vpstate & TMPFS_VNODE_ALLOCATING);
  762         node->tn_vpstate &= ~TMPFS_VNODE_ALLOCATING;
  763         node->tn_vnode = vp;
  764 
  765         if (node->tn_vpstate & TMPFS_VNODE_WANT) {
  766                 node->tn_vpstate &= ~TMPFS_VNODE_WANT;
  767                 TMPFS_NODE_UNLOCK(node);
  768                 wakeup((caddr_t) &node->tn_vpstate);
  769         } else
  770                 TMPFS_NODE_UNLOCK(node);
  771 
  772 out:
  773         if (error == 0) {
  774                 *vpp = vp;
  775 
  776 #ifdef INVARIANTS
  777                 MPASS(*vpp != NULL && VOP_ISLOCKED(*vpp));
  778                 TMPFS_NODE_LOCK(node);
  779                 MPASS(*vpp == node->tn_vnode);
  780                 TMPFS_NODE_UNLOCK(node);
  781 #endif
  782         }
  783         tmpfs_free_node(tm, node);
  784 
  785         return (error);
  786 }
  787 
  788 /*
  789  * Destroys the association between the vnode vp and the node it
  790  * references.
  791  */
  792 void
  793 tmpfs_free_vp(struct vnode *vp)
  794 {
  795         struct tmpfs_node *node;
  796 
  797         node = VP_TO_TMPFS_NODE(vp);
  798 
  799         TMPFS_NODE_ASSERT_LOCKED(node);
  800         node->tn_vnode = NULL;
  801         if ((node->tn_vpstate & TMPFS_VNODE_WRECLAIM) != 0)
  802                 wakeup(&node->tn_vnode);
  803         node->tn_vpstate &= ~TMPFS_VNODE_WRECLAIM;
  804         vp->v_data = NULL;
  805 }
  806 
  807 /*
  808  * Allocates a new file of type 'type' and adds it to the parent directory
  809  * 'dvp'; this addition is done using the component name given in 'cnp'.
  810  * The ownership of the new file is automatically assigned based on the
  811  * credentials of the caller (through 'cnp'), the group is set based on
  812  * the parent directory and the mode is determined from the 'vap' argument.
  813  * If successful, *vpp holds a vnode to the newly created file and zero
  814  * is returned.  Otherwise *vpp is NULL and the function returns an
  815  * appropriate error code.
  816  */
  817 int
  818 tmpfs_alloc_file(struct vnode *dvp, struct vnode **vpp, struct vattr *vap,
  819     struct componentname *cnp, const char *target)
  820 {
  821         int error;
  822         struct tmpfs_dirent *de;
  823         struct tmpfs_mount *tmp;
  824         struct tmpfs_node *dnode;
  825         struct tmpfs_node *node;
  826         struct tmpfs_node *parent;
  827 
  828         ASSERT_VOP_ELOCKED(dvp, "tmpfs_alloc_file");
  829         MPASS(cnp->cn_flags & HASBUF);
  830 
  831         tmp = VFS_TO_TMPFS(dvp->v_mount);
  832         dnode = VP_TO_TMPFS_DIR(dvp);
  833         *vpp = NULL;
  834 
  835         /* If the entry we are creating is a directory, we cannot overflow
  836          * the number of links of its parent, because it will get a new
  837          * link. */
  838         if (vap->va_type == VDIR) {
  839                 /* Ensure that we do not overflow the maximum number of links
  840                  * imposed by the system. */
  841                 MPASS(dnode->tn_links <= TMPFS_LINK_MAX);
  842                 if (dnode->tn_links == TMPFS_LINK_MAX) {
  843                         return (EMLINK);
  844                 }
  845 
  846                 parent = dnode;
  847                 MPASS(parent != NULL);
  848         } else
  849                 parent = NULL;
  850 
  851         /* Allocate a node that represents the new file. */
  852         error = tmpfs_alloc_node(dvp->v_mount, tmp, vap->va_type,
  853             cnp->cn_cred->cr_uid, dnode->tn_gid, vap->va_mode, parent,
  854             target, vap->va_rdev, &node);
  855         if (error != 0)
  856                 return (error);
  857 
  858         /* Allocate a directory entry that points to the new file. */
  859         error = tmpfs_alloc_dirent(tmp, node, cnp->cn_nameptr, cnp->cn_namelen,
  860             &de);
  861         if (error != 0) {
  862                 tmpfs_free_node(tmp, node);
  863                 return (error);
  864         }
  865 
  866         /* Allocate a vnode for the new file. */
  867         error = tmpfs_alloc_vp(dvp->v_mount, node, LK_EXCLUSIVE, vpp);
  868         if (error != 0) {
  869                 tmpfs_free_dirent(tmp, de);
  870                 tmpfs_free_node(tmp, node);
  871                 return (error);
  872         }
  873 
  874         /* Now that all required items are allocated, we can proceed to
  875          * insert the new node into the directory, an operation that
  876          * cannot fail. */
  877         if (cnp->cn_flags & ISWHITEOUT)
  878                 tmpfs_dir_whiteout_remove(dvp, cnp);
  879         tmpfs_dir_attach(dvp, de);
  880         return (0);
  881 }
  882 
  883 struct tmpfs_dirent *
  884 tmpfs_dir_first(struct tmpfs_node *dnode, struct tmpfs_dir_cursor *dc)
  885 {
  886         struct tmpfs_dirent *de;
  887 
  888         de = RB_MIN(tmpfs_dir, &dnode->tn_dir.tn_dirhead);
  889         dc->tdc_tree = de;
  890         if (de != NULL && tmpfs_dirent_duphead(de))
  891                 de = LIST_FIRST(&de->ud.td_duphead);
  892         dc->tdc_current = de;
  893 
  894         return (dc->tdc_current);
  895 }
  896 
  897 struct tmpfs_dirent *
  898 tmpfs_dir_next(struct tmpfs_node *dnode, struct tmpfs_dir_cursor *dc)
  899 {
  900         struct tmpfs_dirent *de;
  901 
  902         MPASS(dc->tdc_tree != NULL);
  903         if (tmpfs_dirent_dup(dc->tdc_current)) {
  904                 dc->tdc_current = LIST_NEXT(dc->tdc_current, uh.td_dup.entries);
  905                 if (dc->tdc_current != NULL)
  906                         return (dc->tdc_current);
  907         }
  908         dc->tdc_tree = dc->tdc_current = RB_NEXT(tmpfs_dir,
  909             &dnode->tn_dir.tn_dirhead, dc->tdc_tree);
  910         if ((de = dc->tdc_current) != NULL && tmpfs_dirent_duphead(de)) {
  911                 dc->tdc_current = LIST_FIRST(&de->ud.td_duphead);
  912                 MPASS(dc->tdc_current != NULL);
  913         }
  914 
  915         return (dc->tdc_current);
  916 }
  917 
  918 /* Lookup directory entry in RB-Tree. Function may return duphead entry. */
  919 static struct tmpfs_dirent *
  920 tmpfs_dir_xlookup_hash(struct tmpfs_node *dnode, uint32_t hash)
  921 {
  922         struct tmpfs_dirent *de, dekey;
  923 
  924         dekey.td_hash = hash;
  925         de = RB_FIND(tmpfs_dir, &dnode->tn_dir.tn_dirhead, &dekey);
  926         return (de);
  927 }
  928 
  929 /* Lookup directory entry by cookie, initialize directory cursor accordingly. */
  930 static struct tmpfs_dirent *
  931 tmpfs_dir_lookup_cookie(struct tmpfs_node *node, off_t cookie,
  932     struct tmpfs_dir_cursor *dc)
  933 {
  934         struct tmpfs_dir *dirhead = &node->tn_dir.tn_dirhead;
  935         struct tmpfs_dirent *de, dekey;
  936 
  937         MPASS(cookie >= TMPFS_DIRCOOKIE_MIN);
  938 
  939         if (cookie == node->tn_dir.tn_readdir_lastn &&
  940             (de = node->tn_dir.tn_readdir_lastp) != NULL) {
  941                 /* Protect against possible race, tn_readdir_last[pn]
  942                  * may be updated with only shared vnode lock held. */
  943                 if (cookie == tmpfs_dirent_cookie(de))
  944                         goto out;
  945         }
  946 
  947         if ((cookie & TMPFS_DIRCOOKIE_DUP) != 0) {
  948                 LIST_FOREACH(de, &node->tn_dir.tn_dupindex,
  949                     uh.td_dup.index_entries) {
  950                         MPASS(tmpfs_dirent_dup(de));
  951                         if (de->td_cookie == cookie)
  952                                 goto out;
  953                         /* dupindex list is sorted. */
  954                         if (de->td_cookie < cookie) {
  955                                 de = NULL;
  956                                 goto out;
  957                         }
  958                 }
  959                 MPASS(de == NULL);
  960                 goto out;
  961         }
  962 
  963         if ((cookie & TMPFS_DIRCOOKIE_MASK) != cookie) {
  964                 de = NULL;
  965         } else {
  966                 dekey.td_hash = cookie;
  967                 /* Recover if direntry for cookie was removed */
  968                 de = RB_NFIND(tmpfs_dir, dirhead, &dekey);
  969         }
  970         dc->tdc_tree = de;
  971         dc->tdc_current = de;
  972         if (de != NULL && tmpfs_dirent_duphead(de)) {
  973                 dc->tdc_current = LIST_FIRST(&de->ud.td_duphead);
  974                 MPASS(dc->tdc_current != NULL);
  975         }
  976         return (dc->tdc_current);
  977 
  978 out:
  979         dc->tdc_tree = de;
  980         dc->tdc_current = de;
  981         if (de != NULL && tmpfs_dirent_dup(de))
  982                 dc->tdc_tree = tmpfs_dir_xlookup_hash(node,
  983                     de->td_hash);
  984         return (dc->tdc_current);
  985 }
  986 
  987 /*
  988  * Looks for a directory entry in the directory represented by node.
  989  * 'cnp' describes the name of the entry to look for.  Note that the .
  990  * and .. components are not allowed as they do not physically exist
  991  * within directories.
  992  *
  993  * Returns a pointer to the entry when found, otherwise NULL.
  994  */
  995 struct tmpfs_dirent *
  996 tmpfs_dir_lookup(struct tmpfs_node *node, struct tmpfs_node *f,
  997     struct componentname *cnp)
  998 {
  999         struct tmpfs_dir_duphead *duphead;
 1000         struct tmpfs_dirent *de;
 1001         uint32_t hash;
 1002 
 1003         MPASS(IMPLIES(cnp->cn_namelen == 1, cnp->cn_nameptr[0] != '.'));
 1004         MPASS(IMPLIES(cnp->cn_namelen == 2, !(cnp->cn_nameptr[0] == '.' &&
 1005             cnp->cn_nameptr[1] == '.')));
 1006         TMPFS_VALIDATE_DIR(node);
 1007 
 1008         hash = tmpfs_dirent_hash(cnp->cn_nameptr, cnp->cn_namelen);
 1009         de = tmpfs_dir_xlookup_hash(node, hash);
 1010         if (de != NULL && tmpfs_dirent_duphead(de)) {
 1011                 duphead = &de->ud.td_duphead;
 1012                 LIST_FOREACH(de, duphead, uh.td_dup.entries) {
 1013                         if (TMPFS_DIRENT_MATCHES(de, cnp->cn_nameptr,
 1014                             cnp->cn_namelen))
 1015                                 break;
 1016                 }
 1017         } else if (de != NULL) {
 1018                 if (!TMPFS_DIRENT_MATCHES(de, cnp->cn_nameptr,
 1019                     cnp->cn_namelen))
 1020                         de = NULL;
 1021         }
 1022         if (de != NULL && f != NULL && de->td_node != f)
 1023                 de = NULL;
 1024 
 1025         return (de);
 1026 }
 1027 
 1028 /*
 1029  * Attach duplicate-cookie directory entry nde to dnode and insert to dupindex
 1030  * list, allocate new cookie value.
 1031  */
 1032 static void
 1033 tmpfs_dir_attach_dup(struct tmpfs_node *dnode,
 1034     struct tmpfs_dir_duphead *duphead, struct tmpfs_dirent *nde)
 1035 {
 1036         struct tmpfs_dir_duphead *dupindex;
 1037         struct tmpfs_dirent *de, *pde;
 1038 
 1039         dupindex = &dnode->tn_dir.tn_dupindex;
 1040         de = LIST_FIRST(dupindex);
 1041         if (de == NULL || de->td_cookie < TMPFS_DIRCOOKIE_DUP_MAX) {
 1042                 if (de == NULL)
 1043                         nde->td_cookie = TMPFS_DIRCOOKIE_DUP_MIN;
 1044                 else
 1045                         nde->td_cookie = de->td_cookie + 1;
 1046                 MPASS(tmpfs_dirent_dup(nde));
 1047                 LIST_INSERT_HEAD(dupindex, nde, uh.td_dup.index_entries);
 1048                 LIST_INSERT_HEAD(duphead, nde, uh.td_dup.entries);
 1049                 return;
 1050         }
 1051 
 1052         /*
 1053          * Cookie numbers are near exhaustion. Scan dupindex list for unused
 1054          * numbers. dupindex list is sorted in descending order. Keep it so
 1055          * after inserting nde.
 1056          */
 1057         while (1) {
 1058                 pde = de;
 1059                 de = LIST_NEXT(de, uh.td_dup.index_entries);
 1060                 if (de == NULL && pde->td_cookie != TMPFS_DIRCOOKIE_DUP_MIN) {
 1061                         /*
 1062                          * Last element of the index doesn't have minimal cookie
 1063                          * value, use it.
 1064                          */
 1065                         nde->td_cookie = TMPFS_DIRCOOKIE_DUP_MIN;
 1066                         LIST_INSERT_AFTER(pde, nde, uh.td_dup.index_entries);
 1067                         LIST_INSERT_HEAD(duphead, nde, uh.td_dup.entries);
 1068                         return;
 1069                 } else if (de == NULL) {
 1070                         /*
 1071                          * We are so lucky have 2^30 hash duplicates in single
 1072                          * directory :) Return largest possible cookie value.
 1073                          * It should be fine except possible issues with
 1074                          * VOP_READDIR restart.
 1075                          */
 1076                         nde->td_cookie = TMPFS_DIRCOOKIE_DUP_MAX;
 1077                         LIST_INSERT_HEAD(dupindex, nde,
 1078                             uh.td_dup.index_entries);
 1079                         LIST_INSERT_HEAD(duphead, nde, uh.td_dup.entries);
 1080                         return;
 1081                 }
 1082                 if (de->td_cookie + 1 == pde->td_cookie ||
 1083                     de->td_cookie >= TMPFS_DIRCOOKIE_DUP_MAX)
 1084                         continue;       /* No hole or invalid cookie. */
 1085                 nde->td_cookie = de->td_cookie + 1;
 1086                 MPASS(tmpfs_dirent_dup(nde));
 1087                 MPASS(pde->td_cookie > nde->td_cookie);
 1088                 MPASS(nde->td_cookie > de->td_cookie);
 1089                 LIST_INSERT_BEFORE(de, nde, uh.td_dup.index_entries);
 1090                 LIST_INSERT_HEAD(duphead, nde, uh.td_dup.entries);
 1091                 return;
 1092         }
 1093 }
 1094 
 1095 /*
 1096  * Attaches the directory entry de to the directory represented by vp.
 1097  * Note that this does not change the link count of the node pointed by
 1098  * the directory entry, as this is done by tmpfs_alloc_dirent.
 1099  */
 1100 void
 1101 tmpfs_dir_attach(struct vnode *vp, struct tmpfs_dirent *de)
 1102 {
 1103         struct tmpfs_node *dnode;
 1104         struct tmpfs_dirent *xde, *nde;
 1105 
 1106         ASSERT_VOP_ELOCKED(vp, __func__);
 1107         MPASS(de->td_namelen > 0);
 1108         MPASS(de->td_hash >= TMPFS_DIRCOOKIE_MIN);
 1109         MPASS(de->td_cookie == de->td_hash);
 1110 
 1111         dnode = VP_TO_TMPFS_DIR(vp);
 1112         dnode->tn_dir.tn_readdir_lastn = 0;
 1113         dnode->tn_dir.tn_readdir_lastp = NULL;
 1114 
 1115         MPASS(!tmpfs_dirent_dup(de));
 1116         xde = RB_INSERT(tmpfs_dir, &dnode->tn_dir.tn_dirhead, de);
 1117         if (xde != NULL && tmpfs_dirent_duphead(xde))
 1118                 tmpfs_dir_attach_dup(dnode, &xde->ud.td_duphead, de);
 1119         else if (xde != NULL) {
 1120                 /*
 1121                  * Allocate new duphead. Swap xde with duphead to avoid
 1122                  * adding/removing elements with the same hash.
 1123                  */
 1124                 MPASS(!tmpfs_dirent_dup(xde));
 1125                 tmpfs_alloc_dirent(VFS_TO_TMPFS(vp->v_mount), NULL, NULL, 0,
 1126                     &nde);
 1127                 /* *nde = *xde; XXX gcc 4.2.1 may generate invalid code. */
 1128                 memcpy(nde, xde, sizeof(*xde));
 1129                 xde->td_cookie |= TMPFS_DIRCOOKIE_DUPHEAD;
 1130                 LIST_INIT(&xde->ud.td_duphead);
 1131                 xde->td_namelen = 0;
 1132                 xde->td_node = NULL;
 1133                 tmpfs_dir_attach_dup(dnode, &xde->ud.td_duphead, nde);
 1134                 tmpfs_dir_attach_dup(dnode, &xde->ud.td_duphead, de);
 1135         }
 1136         dnode->tn_size += sizeof(struct tmpfs_dirent);
 1137         dnode->tn_status |= TMPFS_NODE_CHANGED | TMPFS_NODE_MODIFIED;
 1138         dnode->tn_accessed = true;
 1139         tmpfs_update(vp);
 1140 }
 1141 
 1142 /*
 1143  * Detaches the directory entry de from the directory represented by vp.
 1144  * Note that this does not change the link count of the node pointed by
 1145  * the directory entry, as this is done by tmpfs_free_dirent.
 1146  */
 1147 void
 1148 tmpfs_dir_detach(struct vnode *vp, struct tmpfs_dirent *de)
 1149 {
 1150         struct tmpfs_mount *tmp;
 1151         struct tmpfs_dir *head;
 1152         struct tmpfs_node *dnode;
 1153         struct tmpfs_dirent *xde;
 1154 
 1155         ASSERT_VOP_ELOCKED(vp, __func__);
 1156 
 1157         dnode = VP_TO_TMPFS_DIR(vp);
 1158         head = &dnode->tn_dir.tn_dirhead;
 1159         dnode->tn_dir.tn_readdir_lastn = 0;
 1160         dnode->tn_dir.tn_readdir_lastp = NULL;
 1161 
 1162         if (tmpfs_dirent_dup(de)) {
 1163                 /* Remove duphead if de was last entry. */
 1164                 if (LIST_NEXT(de, uh.td_dup.entries) == NULL) {
 1165                         xde = tmpfs_dir_xlookup_hash(dnode, de->td_hash);
 1166                         MPASS(tmpfs_dirent_duphead(xde));
 1167                 } else
 1168                         xde = NULL;
 1169                 LIST_REMOVE(de, uh.td_dup.entries);
 1170                 LIST_REMOVE(de, uh.td_dup.index_entries);
 1171                 if (xde != NULL) {
 1172                         if (LIST_EMPTY(&xde->ud.td_duphead)) {
 1173                                 RB_REMOVE(tmpfs_dir, head, xde);
 1174                                 tmp = VFS_TO_TMPFS(vp->v_mount);
 1175                                 MPASS(xde->td_node == NULL);
 1176                                 tmpfs_free_dirent(tmp, xde);
 1177                         }
 1178                 }
 1179                 de->td_cookie = de->td_hash;
 1180         } else
 1181                 RB_REMOVE(tmpfs_dir, head, de);
 1182 
 1183         dnode->tn_size -= sizeof(struct tmpfs_dirent);
 1184         dnode->tn_status |= TMPFS_NODE_CHANGED | TMPFS_NODE_MODIFIED;
 1185         dnode->tn_accessed = true;
 1186         tmpfs_update(vp);
 1187 }
 1188 
 1189 void
 1190 tmpfs_dir_destroy(struct tmpfs_mount *tmp, struct tmpfs_node *dnode)
 1191 {
 1192         struct tmpfs_dirent *de, *dde, *nde;
 1193 
 1194         RB_FOREACH_SAFE(de, tmpfs_dir, &dnode->tn_dir.tn_dirhead, nde) {
 1195                 RB_REMOVE(tmpfs_dir, &dnode->tn_dir.tn_dirhead, de);
 1196                 /* Node may already be destroyed. */
 1197                 de->td_node = NULL;
 1198                 if (tmpfs_dirent_duphead(de)) {
 1199                         while ((dde = LIST_FIRST(&de->ud.td_duphead)) != NULL) {
 1200                                 LIST_REMOVE(dde, uh.td_dup.entries);
 1201                                 dde->td_node = NULL;
 1202                                 tmpfs_free_dirent(tmp, dde);
 1203                         }
 1204                 }
 1205                 tmpfs_free_dirent(tmp, de);
 1206         }
 1207 }
 1208 
 1209 /*
 1210  * Helper function for tmpfs_readdir.  Creates a '.' entry for the given
 1211  * directory and returns it in the uio space.  The function returns 0
 1212  * on success, -1 if there was not enough space in the uio structure to
 1213  * hold the directory entry or an appropriate error code if another
 1214  * error happens.
 1215  */
 1216 static int
 1217 tmpfs_dir_getdotdent(struct tmpfs_mount *tm, struct tmpfs_node *node,
 1218     struct uio *uio)
 1219 {
 1220         int error;
 1221         struct dirent dent;
 1222 
 1223         TMPFS_VALIDATE_DIR(node);
 1224         MPASS(uio->uio_offset == TMPFS_DIRCOOKIE_DOT);
 1225 
 1226         dent.d_fileno = node->tn_id;
 1227         dent.d_off = TMPFS_DIRCOOKIE_DOTDOT;
 1228         dent.d_type = DT_DIR;
 1229         dent.d_namlen = 1;
 1230         dent.d_name[0] = '.';
 1231         dent.d_reclen = GENERIC_DIRSIZ(&dent);
 1232         dirent_terminate(&dent);
 1233 
 1234         if (dent.d_reclen > uio->uio_resid)
 1235                 error = EJUSTRETURN;
 1236         else
 1237                 error = uiomove(&dent, dent.d_reclen, uio);
 1238 
 1239         tmpfs_set_accessed(tm, node);
 1240 
 1241         return (error);
 1242 }
 1243 
 1244 /*
 1245  * Helper function for tmpfs_readdir.  Creates a '..' entry for the given
 1246  * directory and returns it in the uio space.  The function returns 0
 1247  * on success, -1 if there was not enough space in the uio structure to
 1248  * hold the directory entry or an appropriate error code if another
 1249  * error happens.
 1250  */
 1251 static int
 1252 tmpfs_dir_getdotdotdent(struct tmpfs_mount *tm, struct tmpfs_node *node,
 1253     struct uio *uio, off_t next)
 1254 {
 1255         struct tmpfs_node *parent;
 1256         struct dirent dent;
 1257         int error;
 1258 
 1259         TMPFS_VALIDATE_DIR(node);
 1260         MPASS(uio->uio_offset == TMPFS_DIRCOOKIE_DOTDOT);
 1261 
 1262         /*
 1263          * Return ENOENT if the current node is already removed.
 1264          */
 1265         TMPFS_ASSERT_LOCKED(node);
 1266         parent = node->tn_dir.tn_parent;
 1267         if (parent == NULL)
 1268                 return (ENOENT);
 1269 
 1270         TMPFS_NODE_LOCK(parent);
 1271         dent.d_fileno = parent->tn_id;
 1272         TMPFS_NODE_UNLOCK(parent);
 1273 
 1274         dent.d_off = next;
 1275         dent.d_type = DT_DIR;
 1276         dent.d_namlen = 2;
 1277         dent.d_name[0] = '.';
 1278         dent.d_name[1] = '.';
 1279         dent.d_reclen = GENERIC_DIRSIZ(&dent);
 1280         dirent_terminate(&dent);
 1281 
 1282         if (dent.d_reclen > uio->uio_resid)
 1283                 error = EJUSTRETURN;
 1284         else
 1285                 error = uiomove(&dent, dent.d_reclen, uio);
 1286 
 1287         tmpfs_set_accessed(tm, node);
 1288 
 1289         return (error);
 1290 }
 1291 
 1292 /*
 1293  * Helper function for tmpfs_readdir.  Returns as much directory entries
 1294  * as can fit in the uio space.  The read starts at uio->uio_offset.
 1295  * The function returns 0 on success, -1 if there was not enough space
 1296  * in the uio structure to hold the directory entry or an appropriate
 1297  * error code if another error happens.
 1298  */
 1299 int
 1300 tmpfs_dir_getdents(struct tmpfs_mount *tm, struct tmpfs_node *node,
 1301     struct uio *uio, int maxcookies, u_long *cookies, int *ncookies)
 1302 {
 1303         struct tmpfs_dir_cursor dc;
 1304         struct tmpfs_dirent *de, *nde;
 1305         off_t off;
 1306         int error;
 1307 
 1308         TMPFS_VALIDATE_DIR(node);
 1309 
 1310         off = 0;
 1311 
 1312         /*
 1313          * Lookup the node from the current offset.  The starting offset of
 1314          * 0 will lookup both '.' and '..', and then the first real entry,
 1315          * or EOF if there are none.  Then find all entries for the dir that
 1316          * fit into the buffer.  Once no more entries are found (de == NULL),
 1317          * the offset is set to TMPFS_DIRCOOKIE_EOF, which will cause the next
 1318          * call to return 0.
 1319          */
 1320         switch (uio->uio_offset) {
 1321         case TMPFS_DIRCOOKIE_DOT:
 1322                 error = tmpfs_dir_getdotdent(tm, node, uio);
 1323                 if (error != 0)
 1324                         return (error);
 1325                 uio->uio_offset = off = TMPFS_DIRCOOKIE_DOTDOT;
 1326                 if (cookies != NULL)
 1327                         cookies[(*ncookies)++] = off;
 1328                 /* FALLTHROUGH */
 1329         case TMPFS_DIRCOOKIE_DOTDOT:
 1330                 de = tmpfs_dir_first(node, &dc);
 1331                 off = tmpfs_dirent_cookie(de);
 1332                 error = tmpfs_dir_getdotdotdent(tm, node, uio, off);
 1333                 if (error != 0)
 1334                         return (error);
 1335                 uio->uio_offset = off;
 1336                 if (cookies != NULL)
 1337                         cookies[(*ncookies)++] = off;
 1338                 /* EOF. */
 1339                 if (de == NULL)
 1340                         return (0);
 1341                 break;
 1342         case TMPFS_DIRCOOKIE_EOF:
 1343                 return (0);
 1344         default:
 1345                 de = tmpfs_dir_lookup_cookie(node, uio->uio_offset, &dc);
 1346                 if (de == NULL)
 1347                         return (EINVAL);
 1348                 if (cookies != NULL)
 1349                         off = tmpfs_dirent_cookie(de);
 1350         }
 1351 
 1352         /*
 1353          * Read as much entries as possible; i.e., until we reach the end of the
 1354          * directory or we exhaust uio space.
 1355          */
 1356         do {
 1357                 struct dirent d;
 1358 
 1359                 /*
 1360                  * Create a dirent structure representing the current tmpfs_node
 1361                  * and fill it.
 1362                  */
 1363                 if (de->td_node == NULL) {
 1364                         d.d_fileno = 1;
 1365                         d.d_type = DT_WHT;
 1366                 } else {
 1367                         d.d_fileno = de->td_node->tn_id;
 1368                         switch (de->td_node->tn_type) {
 1369                         case VBLK:
 1370                                 d.d_type = DT_BLK;
 1371                                 break;
 1372 
 1373                         case VCHR:
 1374                                 d.d_type = DT_CHR;
 1375                                 break;
 1376 
 1377                         case VDIR:
 1378                                 d.d_type = DT_DIR;
 1379                                 break;
 1380 
 1381                         case VFIFO:
 1382                                 d.d_type = DT_FIFO;
 1383                                 break;
 1384 
 1385                         case VLNK:
 1386                                 d.d_type = DT_LNK;
 1387                                 break;
 1388 
 1389                         case VREG:
 1390                                 d.d_type = DT_REG;
 1391                                 break;
 1392 
 1393                         case VSOCK:
 1394                                 d.d_type = DT_SOCK;
 1395                                 break;
 1396 
 1397                         default:
 1398                                 panic("tmpfs_dir_getdents: type %p %d",
 1399                                     de->td_node, (int)de->td_node->tn_type);
 1400                         }
 1401                 }
 1402                 d.d_namlen = de->td_namelen;
 1403                 MPASS(de->td_namelen < sizeof(d.d_name));
 1404                 (void)memcpy(d.d_name, de->ud.td_name, de->td_namelen);
 1405                 d.d_reclen = GENERIC_DIRSIZ(&d);
 1406 
 1407                 /*
 1408                  * Stop reading if the directory entry we are treating is bigger
 1409                  * than the amount of data that can be returned.
 1410                  */
 1411                 if (d.d_reclen > uio->uio_resid) {
 1412                         error = EJUSTRETURN;
 1413                         break;
 1414                 }
 1415 
 1416                 nde = tmpfs_dir_next(node, &dc);
 1417                 d.d_off = tmpfs_dirent_cookie(nde);
 1418                 dirent_terminate(&d);
 1419 
 1420                 /*
 1421                  * Copy the new dirent structure into the output buffer and
 1422                  * advance pointers.
 1423                  */
 1424                 error = uiomove(&d, d.d_reclen, uio);
 1425                 if (error == 0) {
 1426                         de = nde;
 1427                         if (cookies != NULL) {
 1428                                 off = tmpfs_dirent_cookie(de);
 1429                                 MPASS(*ncookies < maxcookies);
 1430                                 cookies[(*ncookies)++] = off;
 1431                         }
 1432                 }
 1433         } while (error == 0 && uio->uio_resid > 0 && de != NULL);
 1434 
 1435         /* Skip setting off when using cookies as it is already done above. */
 1436         if (cookies == NULL)
 1437                 off = tmpfs_dirent_cookie(de);
 1438 
 1439         /* Update the offset and cache. */
 1440         uio->uio_offset = off;
 1441         node->tn_dir.tn_readdir_lastn = off;
 1442         node->tn_dir.tn_readdir_lastp = de;
 1443 
 1444         tmpfs_set_accessed(tm, node);
 1445         return (error);
 1446 }
 1447 
 1448 int
 1449 tmpfs_dir_whiteout_add(struct vnode *dvp, struct componentname *cnp)
 1450 {
 1451         struct tmpfs_dirent *de;
 1452         int error;
 1453 
 1454         error = tmpfs_alloc_dirent(VFS_TO_TMPFS(dvp->v_mount), NULL,
 1455             cnp->cn_nameptr, cnp->cn_namelen, &de);
 1456         if (error != 0)
 1457                 return (error);
 1458         tmpfs_dir_attach(dvp, de);
 1459         return (0);
 1460 }
 1461 
 1462 void
 1463 tmpfs_dir_whiteout_remove(struct vnode *dvp, struct componentname *cnp)
 1464 {
 1465         struct tmpfs_dirent *de;
 1466 
 1467         de = tmpfs_dir_lookup(VP_TO_TMPFS_DIR(dvp), NULL, cnp);
 1468         MPASS(de != NULL && de->td_node == NULL);
 1469         tmpfs_dir_detach(dvp, de);
 1470         tmpfs_free_dirent(VFS_TO_TMPFS(dvp->v_mount), de);
 1471 }
 1472 
 1473 /*
 1474  * Resizes the aobj associated with the regular file pointed to by 'vp' to the
 1475  * size 'newsize'.  'vp' must point to a vnode that represents a regular file.
 1476  * 'newsize' must be positive.
 1477  *
 1478  * Returns zero on success or an appropriate error code on failure.
 1479  */
 1480 int
 1481 tmpfs_reg_resize(struct vnode *vp, off_t newsize, boolean_t ignerr)
 1482 {
 1483         struct tmpfs_mount *tmp;
 1484         struct tmpfs_node *node;
 1485         vm_object_t uobj;
 1486         vm_page_t m;
 1487         vm_pindex_t idx, newpages, oldpages;
 1488         off_t oldsize;
 1489         int base, rv;
 1490 
 1491         MPASS(vp->v_type == VREG);
 1492         MPASS(newsize >= 0);
 1493 
 1494         node = VP_TO_TMPFS_NODE(vp);
 1495         uobj = node->tn_reg.tn_aobj;
 1496         tmp = VFS_TO_TMPFS(vp->v_mount);
 1497 
 1498         /*
 1499          * Convert the old and new sizes to the number of pages needed to
 1500          * store them.  It may happen that we do not need to do anything
 1501          * because the last allocated page can accommodate the change on
 1502          * its own.
 1503          */
 1504         oldsize = node->tn_size;
 1505         oldpages = OFF_TO_IDX(oldsize + PAGE_MASK);
 1506         MPASS(oldpages == uobj->size);
 1507         newpages = OFF_TO_IDX(newsize + PAGE_MASK);
 1508 
 1509         if (__predict_true(newpages == oldpages && newsize >= oldsize)) {
 1510                 node->tn_size = newsize;
 1511                 return (0);
 1512         }
 1513 
 1514         if (newpages > oldpages &&
 1515             tmpfs_pages_check_avail(tmp, newpages - oldpages) == 0)
 1516                 return (ENOSPC);
 1517 
 1518         VM_OBJECT_WLOCK(uobj);
 1519         if (newsize < oldsize) {
 1520                 /*
 1521                  * Zero the truncated part of the last page.
 1522                  */
 1523                 base = newsize & PAGE_MASK;
 1524                 if (base != 0) {
 1525                         idx = OFF_TO_IDX(newsize);
 1526 retry:
 1527                         m = vm_page_grab(uobj, idx, VM_ALLOC_NOCREAT);
 1528                         if (m != NULL) {
 1529                                 MPASS(vm_page_all_valid(m));
 1530                         } else if (vm_pager_has_page(uobj, idx, NULL, NULL)) {
 1531                                 m = vm_page_alloc(uobj, idx, VM_ALLOC_NORMAL |
 1532                                     VM_ALLOC_WAITFAIL);
 1533                                 if (m == NULL)
 1534                                         goto retry;
 1535                                 vm_object_pip_add(uobj, 1);
 1536                                 VM_OBJECT_WUNLOCK(uobj);
 1537                                 rv = vm_pager_get_pages(uobj, &m, 1, NULL,
 1538                                     NULL);
 1539                                 VM_OBJECT_WLOCK(uobj);
 1540                                 vm_object_pip_wakeup(uobj);
 1541                                 if (rv == VM_PAGER_OK) {
 1542                                         /*
 1543                                          * Since the page was not resident,
 1544                                          * and therefore not recently
 1545                                          * accessed, immediately enqueue it
 1546                                          * for asynchronous laundering.  The
 1547                                          * current operation is not regarded
 1548                                          * as an access.
 1549                                          */
 1550                                         vm_page_launder(m);
 1551                                 } else {
 1552                                         vm_page_free(m);
 1553                                         if (ignerr)
 1554                                                 m = NULL;
 1555                                         else {
 1556                                                 VM_OBJECT_WUNLOCK(uobj);
 1557                                                 return (EIO);
 1558                                         }
 1559                                 }
 1560                         }
 1561                         if (m != NULL) {
 1562                                 pmap_zero_page_area(m, base, PAGE_SIZE - base);
 1563                                 vm_page_set_dirty(m);
 1564                                 vm_page_xunbusy(m);
 1565                         }
 1566                 }
 1567 
 1568                 /*
 1569                  * Release any swap space and free any whole pages.
 1570                  */
 1571                 if (newpages < oldpages)
 1572                         vm_object_page_remove(uobj, newpages, 0, 0);
 1573         }
 1574         uobj->size = newpages;
 1575         VM_OBJECT_WUNLOCK(uobj);
 1576 
 1577         atomic_add_long(&tmp->tm_pages_used, newpages - oldpages);
 1578 
 1579         node->tn_size = newsize;
 1580         return (0);
 1581 }
 1582 
 1583 void
 1584 tmpfs_check_mtime(struct vnode *vp)
 1585 {
 1586         struct tmpfs_node *node;
 1587         struct vm_object *obj;
 1588 
 1589         ASSERT_VOP_ELOCKED(vp, "check_mtime");
 1590         if (vp->v_type != VREG)
 1591                 return;
 1592         obj = vp->v_object;
 1593         KASSERT((obj->flags & (OBJ_TMPFS_NODE | OBJ_TMPFS)) ==
 1594             (OBJ_TMPFS_NODE | OBJ_TMPFS), ("non-tmpfs obj"));
 1595         /* unlocked read */
 1596         if (obj->generation != obj->cleangeneration) {
 1597                 VM_OBJECT_WLOCK(obj);
 1598                 if (obj->generation != obj->cleangeneration) {
 1599                         obj->cleangeneration = obj->generation;
 1600                         node = VP_TO_TMPFS_NODE(vp);
 1601                         node->tn_status |= TMPFS_NODE_MODIFIED |
 1602                             TMPFS_NODE_CHANGED;
 1603                 }
 1604                 VM_OBJECT_WUNLOCK(obj);
 1605         }
 1606 }
 1607 
 1608 /*
 1609  * Change flags of the given vnode.
 1610  * Caller should execute tmpfs_update on vp after a successful execution.
 1611  * The vnode must be locked on entry and remain locked on exit.
 1612  */
 1613 int
 1614 tmpfs_chflags(struct vnode *vp, u_long flags, struct ucred *cred,
 1615     struct thread *p)
 1616 {
 1617         int error;
 1618         struct tmpfs_node *node;
 1619 
 1620         ASSERT_VOP_ELOCKED(vp, "chflags");
 1621 
 1622         node = VP_TO_TMPFS_NODE(vp);
 1623 
 1624         if ((flags & ~(SF_APPEND | SF_ARCHIVED | SF_IMMUTABLE | SF_NOUNLINK |
 1625             UF_APPEND | UF_ARCHIVE | UF_HIDDEN | UF_IMMUTABLE | UF_NODUMP |
 1626             UF_NOUNLINK | UF_OFFLINE | UF_OPAQUE | UF_READONLY | UF_REPARSE |
 1627             UF_SPARSE | UF_SYSTEM)) != 0)
 1628                 return (EOPNOTSUPP);
 1629 
 1630         /* Disallow this operation if the file system is mounted read-only. */
 1631         if (vp->v_mount->mnt_flag & MNT_RDONLY)
 1632                 return (EROFS);
 1633 
 1634         /*
 1635          * Callers may only modify the file flags on objects they
 1636          * have VADMIN rights for.
 1637          */
 1638         if ((error = VOP_ACCESS(vp, VADMIN, cred, p)))
 1639                 return (error);
 1640         /*
 1641          * Unprivileged processes are not permitted to unset system
 1642          * flags, or modify flags if any system flags are set.
 1643          */
 1644         if (!priv_check_cred(cred, PRIV_VFS_SYSFLAGS)) {
 1645                 if (node->tn_flags &
 1646                     (SF_NOUNLINK | SF_IMMUTABLE | SF_APPEND)) {
 1647                         error = securelevel_gt(cred, 0);
 1648                         if (error)
 1649                                 return (error);
 1650                 }
 1651         } else {
 1652                 if (node->tn_flags &
 1653                     (SF_NOUNLINK | SF_IMMUTABLE | SF_APPEND) ||
 1654                     ((flags ^ node->tn_flags) & SF_SETTABLE))
 1655                         return (EPERM);
 1656         }
 1657         node->tn_flags = flags;
 1658         node->tn_status |= TMPFS_NODE_CHANGED;
 1659 
 1660         ASSERT_VOP_ELOCKED(vp, "chflags2");
 1661 
 1662         return (0);
 1663 }
 1664 
 1665 /*
 1666  * Change access mode on the given vnode.
 1667  * Caller should execute tmpfs_update on vp after a successful execution.
 1668  * The vnode must be locked on entry and remain locked on exit.
 1669  */
 1670 int
 1671 tmpfs_chmod(struct vnode *vp, mode_t mode, struct ucred *cred, struct thread *p)
 1672 {
 1673         int error;
 1674         struct tmpfs_node *node;
 1675         mode_t newmode;
 1676 
 1677         ASSERT_VOP_ELOCKED(vp, "chmod");
 1678         ASSERT_VOP_IN_SEQC(vp);
 1679 
 1680         node = VP_TO_TMPFS_NODE(vp);
 1681 
 1682         /* Disallow this operation if the file system is mounted read-only. */
 1683         if (vp->v_mount->mnt_flag & MNT_RDONLY)
 1684                 return EROFS;
 1685 
 1686         /* Immutable or append-only files cannot be modified, either. */
 1687         if (node->tn_flags & (IMMUTABLE | APPEND))
 1688                 return EPERM;
 1689 
 1690         /*
 1691          * To modify the permissions on a file, must possess VADMIN
 1692          * for that file.
 1693          */
 1694         if ((error = VOP_ACCESS(vp, VADMIN, cred, p)))
 1695                 return (error);
 1696 
 1697         /*
 1698          * Privileged processes may set the sticky bit on non-directories,
 1699          * as well as set the setgid bit on a file with a group that the
 1700          * process is not a member of.
 1701          */
 1702         if (vp->v_type != VDIR && (mode & S_ISTXT)) {
 1703                 if (priv_check_cred(cred, PRIV_VFS_STICKYFILE))
 1704                         return (EFTYPE);
 1705         }
 1706         if (!groupmember(node->tn_gid, cred) && (mode & S_ISGID)) {
 1707                 error = priv_check_cred(cred, PRIV_VFS_SETGID);
 1708                 if (error)
 1709                         return (error);
 1710         }
 1711 
 1712         newmode = node->tn_mode & ~ALLPERMS;
 1713         newmode |= mode & ALLPERMS;
 1714         atomic_store_short(&node->tn_mode, newmode);
 1715 
 1716         node->tn_status |= TMPFS_NODE_CHANGED;
 1717 
 1718         ASSERT_VOP_ELOCKED(vp, "chmod2");
 1719 
 1720         return (0);
 1721 }
 1722 
 1723 /*
 1724  * Change ownership of the given vnode.  At least one of uid or gid must
 1725  * be different than VNOVAL.  If one is set to that value, the attribute
 1726  * is unchanged.
 1727  * Caller should execute tmpfs_update on vp after a successful execution.
 1728  * The vnode must be locked on entry and remain locked on exit.
 1729  */
 1730 int
 1731 tmpfs_chown(struct vnode *vp, uid_t uid, gid_t gid, struct ucred *cred,
 1732     struct thread *p)
 1733 {
 1734         int error;
 1735         struct tmpfs_node *node;
 1736         uid_t ouid;
 1737         gid_t ogid;
 1738         mode_t newmode;
 1739 
 1740         ASSERT_VOP_ELOCKED(vp, "chown");
 1741         ASSERT_VOP_IN_SEQC(vp);
 1742 
 1743         node = VP_TO_TMPFS_NODE(vp);
 1744 
 1745         /* Assign default values if they are unknown. */
 1746         MPASS(uid != VNOVAL || gid != VNOVAL);
 1747         if (uid == VNOVAL)
 1748                 uid = node->tn_uid;
 1749         if (gid == VNOVAL)
 1750                 gid = node->tn_gid;
 1751         MPASS(uid != VNOVAL && gid != VNOVAL);
 1752 
 1753         /* Disallow this operation if the file system is mounted read-only. */
 1754         if (vp->v_mount->mnt_flag & MNT_RDONLY)
 1755                 return (EROFS);
 1756 
 1757         /* Immutable or append-only files cannot be modified, either. */
 1758         if (node->tn_flags & (IMMUTABLE | APPEND))
 1759                 return (EPERM);
 1760 
 1761         /*
 1762          * To modify the ownership of a file, must possess VADMIN for that
 1763          * file.
 1764          */
 1765         if ((error = VOP_ACCESS(vp, VADMIN, cred, p)))
 1766                 return (error);
 1767 
 1768         /*
 1769          * To change the owner of a file, or change the group of a file to a
 1770          * group of which we are not a member, the caller must have
 1771          * privilege.
 1772          */
 1773         if ((uid != node->tn_uid ||
 1774             (gid != node->tn_gid && !groupmember(gid, cred))) &&
 1775             (error = priv_check_cred(cred, PRIV_VFS_CHOWN)))
 1776                 return (error);
 1777 
 1778         ogid = node->tn_gid;
 1779         ouid = node->tn_uid;
 1780 
 1781         node->tn_uid = uid;
 1782         node->tn_gid = gid;
 1783 
 1784         node->tn_status |= TMPFS_NODE_CHANGED;
 1785 
 1786         if ((node->tn_mode & (S_ISUID | S_ISGID)) && (ouid != uid || ogid != gid)) {
 1787                 if (priv_check_cred(cred, PRIV_VFS_RETAINSUGID)) {
 1788                         newmode = node->tn_mode & ~(S_ISUID | S_ISGID);
 1789                         atomic_store_short(&node->tn_mode, newmode);
 1790                 }
 1791         }
 1792 
 1793         ASSERT_VOP_ELOCKED(vp, "chown2");
 1794 
 1795         return (0);
 1796 }
 1797 
 1798 /*
 1799  * Change size of the given vnode.
 1800  * Caller should execute tmpfs_update on vp after a successful execution.
 1801  * The vnode must be locked on entry and remain locked on exit.
 1802  */
 1803 int
 1804 tmpfs_chsize(struct vnode *vp, u_quad_t size, struct ucred *cred,
 1805     struct thread *p)
 1806 {
 1807         int error;
 1808         struct tmpfs_node *node;
 1809 
 1810         ASSERT_VOP_ELOCKED(vp, "chsize");
 1811 
 1812         node = VP_TO_TMPFS_NODE(vp);
 1813 
 1814         /* Decide whether this is a valid operation based on the file type. */
 1815         error = 0;
 1816         switch (vp->v_type) {
 1817         case VDIR:
 1818                 return (EISDIR);
 1819 
 1820         case VREG:
 1821                 if (vp->v_mount->mnt_flag & MNT_RDONLY)
 1822                         return (EROFS);
 1823                 break;
 1824 
 1825         case VBLK:
 1826                 /* FALLTHROUGH */
 1827         case VCHR:
 1828                 /* FALLTHROUGH */
 1829         case VFIFO:
 1830                 /*
 1831                  * Allow modifications of special files even if in the file
 1832                  * system is mounted read-only (we are not modifying the
 1833                  * files themselves, but the objects they represent).
 1834                  */
 1835                 return (0);
 1836 
 1837         default:
 1838                 /* Anything else is unsupported. */
 1839                 return (EOPNOTSUPP);
 1840         }
 1841 
 1842         /* Immutable or append-only files cannot be modified, either. */
 1843         if (node->tn_flags & (IMMUTABLE | APPEND))
 1844                 return (EPERM);
 1845 
 1846         error = tmpfs_truncate(vp, size);
 1847         /*
 1848          * tmpfs_truncate will raise the NOTE_EXTEND and NOTE_ATTRIB kevents
 1849          * for us, as will update tn_status; no need to do that here.
 1850          */
 1851 
 1852         ASSERT_VOP_ELOCKED(vp, "chsize2");
 1853 
 1854         return (error);
 1855 }
 1856 
 1857 /*
 1858  * Change access and modification times of the given vnode.
 1859  * Caller should execute tmpfs_update on vp after a successful execution.
 1860  * The vnode must be locked on entry and remain locked on exit.
 1861  */
 1862 int
 1863 tmpfs_chtimes(struct vnode *vp, struct vattr *vap,
 1864     struct ucred *cred, struct thread *l)
 1865 {
 1866         int error;
 1867         struct tmpfs_node *node;
 1868 
 1869         ASSERT_VOP_ELOCKED(vp, "chtimes");
 1870 
 1871         node = VP_TO_TMPFS_NODE(vp);
 1872 
 1873         /* Disallow this operation if the file system is mounted read-only. */
 1874         if (vp->v_mount->mnt_flag & MNT_RDONLY)
 1875                 return (EROFS);
 1876 
 1877         /* Immutable or append-only files cannot be modified, either. */
 1878         if (node->tn_flags & (IMMUTABLE | APPEND))
 1879                 return (EPERM);
 1880 
 1881         error = vn_utimes_perm(vp, vap, cred, l);
 1882         if (error != 0)
 1883                 return (error);
 1884 
 1885         if (vap->va_atime.tv_sec != VNOVAL)
 1886                 node->tn_accessed = true;
 1887 
 1888         if (vap->va_mtime.tv_sec != VNOVAL)
 1889                 node->tn_status |= TMPFS_NODE_MODIFIED;
 1890 
 1891         if (vap->va_birthtime.tv_sec != VNOVAL)
 1892                 node->tn_status |= TMPFS_NODE_MODIFIED;
 1893 
 1894         tmpfs_itimes(vp, &vap->va_atime, &vap->va_mtime);
 1895 
 1896         if (vap->va_birthtime.tv_sec != VNOVAL)
 1897                 node->tn_birthtime = vap->va_birthtime;
 1898         ASSERT_VOP_ELOCKED(vp, "chtimes2");
 1899 
 1900         return (0);
 1901 }
 1902 
 1903 void
 1904 tmpfs_set_status(struct tmpfs_mount *tm, struct tmpfs_node *node, int status)
 1905 {
 1906 
 1907         if ((node->tn_status & status) == status || tm->tm_ronly)
 1908                 return;
 1909         TMPFS_NODE_LOCK(node);
 1910         node->tn_status |= status;
 1911         TMPFS_NODE_UNLOCK(node);
 1912 }
 1913 
 1914 void
 1915 tmpfs_set_accessed(struct tmpfs_mount *tm, struct tmpfs_node *node)
 1916 {
 1917         if (node->tn_accessed || tm->tm_ronly)
 1918                 return;
 1919         atomic_store_8(&node->tn_accessed, true);
 1920 }
 1921 
 1922 /* Sync timestamps */
 1923 void
 1924 tmpfs_itimes(struct vnode *vp, const struct timespec *acc,
 1925     const struct timespec *mod)
 1926 {
 1927         struct tmpfs_node *node;
 1928         struct timespec now;
 1929 
 1930         ASSERT_VOP_LOCKED(vp, "tmpfs_itimes");
 1931         node = VP_TO_TMPFS_NODE(vp);
 1932 
 1933         if (!node->tn_accessed &&
 1934             (node->tn_status & (TMPFS_NODE_MODIFIED | TMPFS_NODE_CHANGED)) == 0)
 1935                 return;
 1936 
 1937         vfs_timestamp(&now);
 1938         TMPFS_NODE_LOCK(node);
 1939         if (node->tn_accessed) {
 1940                 if (acc == NULL)
 1941                          acc = &now;
 1942                 node->tn_atime = *acc;
 1943         }
 1944         if (node->tn_status & TMPFS_NODE_MODIFIED) {
 1945                 if (mod == NULL)
 1946                         mod = &now;
 1947                 node->tn_mtime = *mod;
 1948         }
 1949         if (node->tn_status & TMPFS_NODE_CHANGED)
 1950                 node->tn_ctime = now;
 1951         node->tn_status &= ~(TMPFS_NODE_MODIFIED | TMPFS_NODE_CHANGED);
 1952         node->tn_accessed = false;
 1953         TMPFS_NODE_UNLOCK(node);
 1954 
 1955         /* XXX: FIX? The entropy here is desirable, but the harvesting may be expensive */
 1956         random_harvest_queue(node, sizeof(*node), RANDOM_FS_ATIME);
 1957 }
 1958 
 1959 int
 1960 tmpfs_truncate(struct vnode *vp, off_t length)
 1961 {
 1962         int error;
 1963         struct tmpfs_node *node;
 1964 
 1965         node = VP_TO_TMPFS_NODE(vp);
 1966 
 1967         if (length < 0) {
 1968                 error = EINVAL;
 1969                 goto out;
 1970         }
 1971 
 1972         if (node->tn_size == length) {
 1973                 error = 0;
 1974                 goto out;
 1975         }
 1976 
 1977         if (length > VFS_TO_TMPFS(vp->v_mount)->tm_maxfilesize)
 1978                 return (EFBIG);
 1979 
 1980         error = tmpfs_reg_resize(vp, length, FALSE);
 1981         if (error == 0)
 1982                 node->tn_status |= TMPFS_NODE_CHANGED | TMPFS_NODE_MODIFIED;
 1983 
 1984 out:
 1985         tmpfs_update(vp);
 1986 
 1987         return (error);
 1988 }
 1989 
 1990 static __inline int
 1991 tmpfs_dirtree_cmp(struct tmpfs_dirent *a, struct tmpfs_dirent *b)
 1992 {
 1993         if (a->td_hash > b->td_hash)
 1994                 return (1);
 1995         else if (a->td_hash < b->td_hash)
 1996                 return (-1);
 1997         return (0);
 1998 }
 1999 
 2000 RB_GENERATE_STATIC(tmpfs_dir, tmpfs_dirent, uh.td_entries, tmpfs_dirtree_cmp);

Cache object: 6488fe738d5f4b455b793cda85030dd4


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.