The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/kern/uipc_shm.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
    3  *
    4  * Copyright (c) 2006, 2011, 2016-2017 Robert N. M. Watson
    5  * All rights reserved.
    6  *
    7  * Portions of this software were developed by BAE Systems, the University of
    8  * Cambridge Computer Laboratory, and Memorial University under DARPA/AFRL
    9  * contract FA8650-15-C-7558 ("CADETS"), as part of the DARPA Transparent
   10  * Computing (TC) research program.
   11  *
   12  * Redistribution and use in source and binary forms, with or without
   13  * modification, are permitted provided that the following conditions
   14  * are met:
   15  * 1. Redistributions of source code must retain the above copyright
   16  *    notice, this list of conditions and the following disclaimer.
   17  * 2. Redistributions in binary form must reproduce the above copyright
   18  *    notice, this list of conditions and the following disclaimer in the
   19  *    documentation and/or other materials provided with the distribution.
   20  *
   21  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   24  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   25  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   26  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   27  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   28  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   30  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   31  * SUCH DAMAGE.
   32  */
   33 
   34 /*
   35  * Support for shared swap-backed anonymous memory objects via
   36  * shm_open(2) and shm_unlink(2).  While most of the implementation is
   37  * here, vm_mmap.c contains mapping logic changes.
   38  *
   39  * posixshmcontrol(1) allows users to inspect the state of the memory
   40  * objects.  Per-uid swap resource limit controls total amount of
   41  * memory that user can consume for anonymous objects, including
   42  * shared.
   43  */
   44 
   45 #include <sys/cdefs.h>
   46 __FBSDID("$FreeBSD$");
   47 
   48 #include "opt_capsicum.h"
   49 #include "opt_ktrace.h"
   50 
   51 #include <sys/param.h>
   52 #include <sys/capsicum.h>
   53 #include <sys/conf.h>
   54 #include <sys/fcntl.h>
   55 #include <sys/file.h>
   56 #include <sys/filedesc.h>
   57 #include <sys/filio.h>
   58 #include <sys/fnv_hash.h>
   59 #include <sys/kernel.h>
   60 #include <sys/limits.h>
   61 #include <sys/uio.h>
   62 #include <sys/signal.h>
   63 #include <sys/jail.h>
   64 #include <sys/ktrace.h>
   65 #include <sys/lock.h>
   66 #include <sys/malloc.h>
   67 #include <sys/mman.h>
   68 #include <sys/mutex.h>
   69 #include <sys/priv.h>
   70 #include <sys/proc.h>
   71 #include <sys/refcount.h>
   72 #include <sys/resourcevar.h>
   73 #include <sys/rwlock.h>
   74 #include <sys/sbuf.h>
   75 #include <sys/stat.h>
   76 #include <sys/syscallsubr.h>
   77 #include <sys/sysctl.h>
   78 #include <sys/sysproto.h>
   79 #include <sys/systm.h>
   80 #include <sys/sx.h>
   81 #include <sys/time.h>
   82 #include <sys/vnode.h>
   83 #include <sys/unistd.h>
   84 #include <sys/user.h>
   85 
   86 #include <security/audit/audit.h>
   87 #include <security/mac/mac_framework.h>
   88 
   89 #include <vm/vm.h>
   90 #include <vm/vm_param.h>
   91 #include <vm/pmap.h>
   92 #include <vm/vm_extern.h>
   93 #include <vm/vm_map.h>
   94 #include <vm/vm_kern.h>
   95 #include <vm/vm_object.h>
   96 #include <vm/vm_page.h>
   97 #include <vm/vm_pageout.h>
   98 #include <vm/vm_pager.h>
   99 #include <vm/swap_pager.h>
  100 
  101 struct shm_mapping {
  102         char            *sm_path;
  103         Fnv32_t         sm_fnv;
  104         struct shmfd    *sm_shmfd;
  105         LIST_ENTRY(shm_mapping) sm_link;
  106 };
  107 
  108 static MALLOC_DEFINE(M_SHMFD, "shmfd", "shared memory file descriptor");
  109 static LIST_HEAD(, shm_mapping) *shm_dictionary;
  110 static struct sx shm_dict_lock;
  111 static struct mtx shm_timestamp_lock;
  112 static u_long shm_hash;
  113 static struct unrhdr64 shm_ino_unr;
  114 static dev_t shm_dev_ino;
  115 
  116 #define SHM_HASH(fnv)   (&shm_dictionary[(fnv) & shm_hash])
  117 
  118 static void     shm_init(void *arg);
  119 static void     shm_insert(char *path, Fnv32_t fnv, struct shmfd *shmfd);
  120 static struct shmfd *shm_lookup(char *path, Fnv32_t fnv);
  121 static int      shm_remove(char *path, Fnv32_t fnv, struct ucred *ucred);
  122 
  123 static fo_rdwr_t        shm_read;
  124 static fo_rdwr_t        shm_write;
  125 static fo_truncate_t    shm_truncate;
  126 static fo_ioctl_t       shm_ioctl;
  127 static fo_stat_t        shm_stat;
  128 static fo_close_t       shm_close;
  129 static fo_chmod_t       shm_chmod;
  130 static fo_chown_t       shm_chown;
  131 static fo_seek_t        shm_seek;
  132 static fo_fill_kinfo_t  shm_fill_kinfo;
  133 static fo_mmap_t        shm_mmap;
  134 
  135 /* File descriptor operations. */
  136 struct fileops shm_ops = {
  137         .fo_read = shm_read,
  138         .fo_write = shm_write,
  139         .fo_truncate = shm_truncate,
  140         .fo_ioctl = shm_ioctl,
  141         .fo_poll = invfo_poll,
  142         .fo_kqfilter = invfo_kqfilter,
  143         .fo_stat = shm_stat,
  144         .fo_close = shm_close,
  145         .fo_chmod = shm_chmod,
  146         .fo_chown = shm_chown,
  147         .fo_sendfile = vn_sendfile,
  148         .fo_seek = shm_seek,
  149         .fo_fill_kinfo = shm_fill_kinfo,
  150         .fo_mmap = shm_mmap,
  151         .fo_flags = DFLAG_PASSABLE | DFLAG_SEEKABLE
  152 };
  153 
  154 FEATURE(posix_shm, "POSIX shared memory");
  155 
  156 static int
  157 uiomove_object_page(vm_object_t obj, size_t len, struct uio *uio)
  158 {
  159         vm_page_t m;
  160         vm_pindex_t idx;
  161         size_t tlen;
  162         int error, offset, rv;
  163 
  164         idx = OFF_TO_IDX(uio->uio_offset);
  165         offset = uio->uio_offset & PAGE_MASK;
  166         tlen = MIN(PAGE_SIZE - offset, len);
  167 
  168         VM_OBJECT_WLOCK(obj);
  169 
  170         /*
  171          * Read I/O without either a corresponding resident page or swap
  172          * page: use zero_region.  This is intended to avoid instantiating
  173          * pages on read from a sparse region.
  174          */
  175         if (uio->uio_rw == UIO_READ && vm_page_lookup(obj, idx) == NULL &&
  176             !vm_pager_has_page(obj, idx, NULL, NULL)) {
  177                 VM_OBJECT_WUNLOCK(obj);
  178                 return (uiomove(__DECONST(void *, zero_region), tlen, uio));
  179         }
  180 
  181         /*
  182          * Parallel reads of the page content from disk are prevented
  183          * by exclusive busy.
  184          *
  185          * Although the tmpfs vnode lock is held here, it is
  186          * nonetheless safe to sleep waiting for a free page.  The
  187          * pageout daemon does not need to acquire the tmpfs vnode
  188          * lock to page out tobj's pages because tobj is a OBJT_SWAP
  189          * type object.
  190          */
  191         m = vm_page_grab(obj, idx, VM_ALLOC_NORMAL | VM_ALLOC_NOBUSY);
  192         if (m->valid != VM_PAGE_BITS_ALL) {
  193                 vm_page_xbusy(m);
  194                 if (vm_pager_has_page(obj, idx, NULL, NULL)) {
  195                         rv = vm_pager_get_pages(obj, &m, 1, NULL, NULL);
  196                         if (rv != VM_PAGER_OK) {
  197                                 printf(
  198             "uiomove_object: vm_obj %p idx %jd valid %x pager error %d\n",
  199                                     obj, idx, m->valid, rv);
  200                                 vm_page_lock(m);
  201                                 vm_page_free(m);
  202                                 vm_page_unlock(m);
  203                                 VM_OBJECT_WUNLOCK(obj);
  204                                 return (EIO);
  205                         }
  206                 } else
  207                         vm_page_zero_invalid(m, TRUE);
  208                 vm_page_xunbusy(m);
  209         }
  210         vm_page_lock(m);
  211         vm_page_hold(m);
  212         if (vm_page_active(m))
  213                 vm_page_reference(m);
  214         else
  215                 vm_page_activate(m);
  216         vm_page_unlock(m);
  217         VM_OBJECT_WUNLOCK(obj);
  218         error = uiomove_fromphys(&m, offset, tlen, uio);
  219         if (uio->uio_rw == UIO_WRITE && error == 0) {
  220                 VM_OBJECT_WLOCK(obj);
  221                 vm_page_dirty(m);
  222                 vm_pager_page_unswapped(m);
  223                 VM_OBJECT_WUNLOCK(obj);
  224         }
  225         vm_page_lock(m);
  226         vm_page_unhold(m);
  227         vm_page_unlock(m);
  228 
  229         return (error);
  230 }
  231 
  232 int
  233 uiomove_object(vm_object_t obj, off_t obj_size, struct uio *uio)
  234 {
  235         ssize_t resid;
  236         size_t len;
  237         int error;
  238 
  239         error = 0;
  240         while ((resid = uio->uio_resid) > 0) {
  241                 if (obj_size <= uio->uio_offset)
  242                         break;
  243                 len = MIN(obj_size - uio->uio_offset, resid);
  244                 if (len == 0)
  245                         break;
  246                 error = uiomove_object_page(obj, len, uio);
  247                 if (error != 0 || resid == uio->uio_resid)
  248                         break;
  249         }
  250         return (error);
  251 }
  252 
  253 static int
  254 shm_seek(struct file *fp, off_t offset, int whence, struct thread *td)
  255 {
  256         struct shmfd *shmfd;
  257         off_t foffset;
  258         int error;
  259 
  260         shmfd = fp->f_data;
  261         foffset = foffset_lock(fp, 0);
  262         error = 0;
  263         switch (whence) {
  264         case L_INCR:
  265                 if (foffset < 0 ||
  266                     (offset > 0 && foffset > OFF_MAX - offset)) {
  267                         error = EOVERFLOW;
  268                         break;
  269                 }
  270                 offset += foffset;
  271                 break;
  272         case L_XTND:
  273                 if (offset > 0 && shmfd->shm_size > OFF_MAX - offset) {
  274                         error = EOVERFLOW;
  275                         break;
  276                 }
  277                 offset += shmfd->shm_size;
  278                 break;
  279         case L_SET:
  280                 break;
  281         default:
  282                 error = EINVAL;
  283         }
  284         if (error == 0) {
  285                 if (offset < 0 || offset > shmfd->shm_size)
  286                         error = EINVAL;
  287                 else
  288                         td->td_uretoff.tdu_off = offset;
  289         }
  290         foffset_unlock(fp, offset, error != 0 ? FOF_NOUPDATE : 0);
  291         return (error);
  292 }
  293 
  294 static int
  295 shm_read(struct file *fp, struct uio *uio, struct ucred *active_cred,
  296     int flags, struct thread *td)
  297 {
  298         struct shmfd *shmfd;
  299         void *rl_cookie;
  300         int error;
  301 
  302         shmfd = fp->f_data;
  303 #ifdef MAC
  304         error = mac_posixshm_check_read(active_cred, fp->f_cred, shmfd);
  305         if (error)
  306                 return (error);
  307 #endif
  308         foffset_lock_uio(fp, uio, flags);
  309         rl_cookie = rangelock_rlock(&shmfd->shm_rl, uio->uio_offset,
  310             uio->uio_offset + uio->uio_resid, &shmfd->shm_mtx);
  311         error = uiomove_object(shmfd->shm_object, shmfd->shm_size, uio);
  312         rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx);
  313         foffset_unlock_uio(fp, uio, flags);
  314         return (error);
  315 }
  316 
  317 static int
  318 shm_write(struct file *fp, struct uio *uio, struct ucred *active_cred,
  319     int flags, struct thread *td)
  320 {
  321         struct shmfd *shmfd;
  322         void *rl_cookie;
  323         int error;
  324 
  325         shmfd = fp->f_data;
  326 #ifdef MAC
  327         error = mac_posixshm_check_write(active_cred, fp->f_cred, shmfd);
  328         if (error)
  329                 return (error);
  330 #endif
  331         foffset_lock_uio(fp, uio, flags);
  332         if ((flags & FOF_OFFSET) == 0) {
  333                 rl_cookie = rangelock_wlock(&shmfd->shm_rl, 0, OFF_MAX,
  334                     &shmfd->shm_mtx);
  335         } else {
  336                 rl_cookie = rangelock_wlock(&shmfd->shm_rl, uio->uio_offset,
  337                     uio->uio_offset + uio->uio_resid, &shmfd->shm_mtx);
  338         }
  339 
  340         error = uiomove_object(shmfd->shm_object, shmfd->shm_size, uio);
  341         rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx);
  342         foffset_unlock_uio(fp, uio, flags);
  343         return (error);
  344 }
  345 
  346 static int
  347 shm_truncate(struct file *fp, off_t length, struct ucred *active_cred,
  348     struct thread *td)
  349 {
  350         struct shmfd *shmfd;
  351 #ifdef MAC
  352         int error;
  353 #endif
  354 
  355         shmfd = fp->f_data;
  356 #ifdef MAC
  357         error = mac_posixshm_check_truncate(active_cred, fp->f_cred, shmfd);
  358         if (error)
  359                 return (error);
  360 #endif
  361         return (shm_dotruncate(shmfd, length));
  362 }
  363 
  364 int
  365 shm_ioctl(struct file *fp, u_long com, void *data, struct ucred *active_cred,
  366     struct thread *td)
  367 {
  368 
  369         switch (com) {
  370         case FIONBIO:
  371         case FIOASYNC:
  372                 /*
  373                  * Allow fcntl(fd, F_SETFL, O_NONBLOCK) to work,
  374                  * just like it would on an unlinked regular file
  375                  */
  376                 return (0);
  377         default:
  378                 return (ENOTTY);
  379         }
  380 }
  381 
  382 static int
  383 shm_stat(struct file *fp, struct stat *sb, struct ucred *active_cred,
  384     struct thread *td)
  385 {
  386         struct shmfd *shmfd;
  387 #ifdef MAC
  388         int error;
  389 #endif
  390 
  391         shmfd = fp->f_data;
  392 
  393 #ifdef MAC
  394         error = mac_posixshm_check_stat(active_cred, fp->f_cred, shmfd);
  395         if (error)
  396                 return (error);
  397 #endif
  398         
  399         /*
  400          * Attempt to return sanish values for fstat() on a memory file
  401          * descriptor.
  402          */
  403         bzero(sb, sizeof(*sb));
  404         sb->st_blksize = PAGE_SIZE;
  405         sb->st_size = shmfd->shm_size;
  406         sb->st_blocks = howmany(sb->st_size, sb->st_blksize);
  407         mtx_lock(&shm_timestamp_lock);
  408         sb->st_atim = shmfd->shm_atime;
  409         sb->st_ctim = shmfd->shm_ctime;
  410         sb->st_mtim = shmfd->shm_mtime;
  411         sb->st_birthtim = shmfd->shm_birthtime;
  412         sb->st_mode = S_IFREG | shmfd->shm_mode;                /* XXX */
  413         sb->st_uid = shmfd->shm_uid;
  414         sb->st_gid = shmfd->shm_gid;
  415         mtx_unlock(&shm_timestamp_lock);
  416         sb->st_dev = shm_dev_ino;
  417         sb->st_ino = shmfd->shm_ino;
  418         sb->st_nlink = shmfd->shm_object->ref_count;
  419 
  420         return (0);
  421 }
  422 
  423 static int
  424 shm_close(struct file *fp, struct thread *td)
  425 {
  426         struct shmfd *shmfd;
  427 
  428         shmfd = fp->f_data;
  429         fp->f_data = NULL;
  430         shm_drop(shmfd);
  431 
  432         return (0);
  433 }
  434 
  435 int
  436 shm_dotruncate(struct shmfd *shmfd, off_t length)
  437 {
  438         vm_object_t object;
  439         vm_page_t m;
  440         vm_pindex_t idx, nobjsize;
  441         vm_ooffset_t delta;
  442         int base, rv;
  443 
  444         KASSERT(length >= 0, ("shm_dotruncate: length < 0"));
  445         object = shmfd->shm_object;
  446         VM_OBJECT_WLOCK(object);
  447         if (length == shmfd->shm_size) {
  448                 VM_OBJECT_WUNLOCK(object);
  449                 return (0);
  450         }
  451         nobjsize = OFF_TO_IDX(length + PAGE_MASK);
  452 
  453         /* Are we shrinking?  If so, trim the end. */
  454         if (length < shmfd->shm_size) {
  455                 /*
  456                  * Disallow any requests to shrink the size if this
  457                  * object is mapped into the kernel.
  458                  */
  459                 if (shmfd->shm_kmappings > 0) {
  460                         VM_OBJECT_WUNLOCK(object);
  461                         return (EBUSY);
  462                 }
  463 
  464                 /*
  465                  * Zero the truncated part of the last page.
  466                  */
  467                 base = length & PAGE_MASK;
  468                 if (base != 0) {
  469                         idx = OFF_TO_IDX(length);
  470 retry:
  471                         m = vm_page_lookup(object, idx);
  472                         if (m != NULL) {
  473                                 if (vm_page_sleep_if_busy(m, "shmtrc"))
  474                                         goto retry;
  475                         } else if (vm_pager_has_page(object, idx, NULL, NULL)) {
  476                                 m = vm_page_alloc(object, idx,
  477                                     VM_ALLOC_NORMAL | VM_ALLOC_WAITFAIL);
  478                                 if (m == NULL)
  479                                         goto retry;
  480                                 rv = vm_pager_get_pages(object, &m, 1, NULL,
  481                                     NULL);
  482                                 vm_page_lock(m);
  483                                 if (rv == VM_PAGER_OK) {
  484                                         /*
  485                                          * Since the page was not resident,
  486                                          * and therefore not recently
  487                                          * accessed, immediately enqueue it
  488                                          * for asynchronous laundering.  The
  489                                          * current operation is not regarded
  490                                          * as an access.
  491                                          */
  492                                         vm_page_launder(m);
  493                                         vm_page_unlock(m);
  494                                         vm_page_xunbusy(m);
  495                                 } else {
  496                                         vm_page_free(m);
  497                                         vm_page_unlock(m);
  498                                         VM_OBJECT_WUNLOCK(object);
  499                                         return (EIO);
  500                                 }
  501                         }
  502                         if (m != NULL) {
  503                                 pmap_zero_page_area(m, base, PAGE_SIZE - base);
  504                                 KASSERT(m->valid == VM_PAGE_BITS_ALL,
  505                                     ("shm_dotruncate: page %p is invalid", m));
  506                                 vm_page_dirty(m);
  507                                 vm_pager_page_unswapped(m);
  508                         }
  509                 }
  510                 delta = IDX_TO_OFF(object->size - nobjsize);
  511 
  512                 /* Toss in memory pages. */
  513                 if (nobjsize < object->size)
  514                         vm_object_page_remove(object, nobjsize, object->size,
  515                             0);
  516 
  517                 /* Toss pages from swap. */
  518                 if (object->type == OBJT_SWAP)
  519                         swap_pager_freespace(object, nobjsize, delta);
  520 
  521                 /* Free the swap accounted for shm */
  522                 swap_release_by_cred(delta, object->cred);
  523                 object->charge -= delta;
  524         } else {
  525                 /* Try to reserve additional swap space. */
  526                 delta = IDX_TO_OFF(nobjsize - object->size);
  527                 if (!swap_reserve_by_cred(delta, object->cred)) {
  528                         VM_OBJECT_WUNLOCK(object);
  529                         return (ENOMEM);
  530                 }
  531                 object->charge += delta;
  532         }
  533         shmfd->shm_size = length;
  534         shmfd->shm_oldsize = (size_t)length;
  535         mtx_lock(&shm_timestamp_lock);
  536         vfs_timestamp(&shmfd->shm_ctime);
  537         shmfd->shm_mtime = shmfd->shm_ctime;
  538         mtx_unlock(&shm_timestamp_lock);
  539         object->size = nobjsize;
  540         VM_OBJECT_WUNLOCK(object);
  541         return (0);
  542 }
  543 
  544 /*
  545  * shmfd object management including creation and reference counting
  546  * routines.
  547  */
  548 struct shmfd *
  549 shm_alloc(struct ucred *ucred, mode_t mode)
  550 {
  551         struct shmfd *shmfd;
  552 
  553         shmfd = malloc(sizeof(*shmfd), M_SHMFD, M_WAITOK | M_ZERO);
  554         shmfd->shm_size = 0;
  555         shmfd->shm_uid = ucred->cr_uid;
  556         shmfd->shm_gid = ucred->cr_gid;
  557         shmfd->shm_mode = mode;
  558         shmfd->shm_object = vm_pager_allocate(OBJT_SWAP, NULL,
  559             shmfd->shm_size, VM_PROT_DEFAULT, 0, ucred);
  560         KASSERT(shmfd->shm_object != NULL, ("shm_create: vm_pager_allocate"));
  561         shmfd->shm_object->pg_color = 0;
  562         VM_OBJECT_WLOCK(shmfd->shm_object);
  563         vm_object_clear_flag(shmfd->shm_object, OBJ_ONEMAPPING);
  564         vm_object_set_flag(shmfd->shm_object, OBJ_COLORED | OBJ_NOSPLIT);
  565         VM_OBJECT_WUNLOCK(shmfd->shm_object);
  566         vfs_timestamp(&shmfd->shm_birthtime);
  567         shmfd->shm_atime = shmfd->shm_mtime = shmfd->shm_ctime =
  568             shmfd->shm_birthtime;
  569         shmfd->shm_ino = alloc_unr64(&shm_ino_unr);
  570         refcount_init(&shmfd->shm_refs, 1);
  571         mtx_init(&shmfd->shm_mtx, "shmrl", NULL, MTX_DEF);
  572         rangelock_init(&shmfd->shm_rl);
  573 #ifdef MAC
  574         mac_posixshm_init(shmfd);
  575         mac_posixshm_create(ucred, shmfd);
  576 #endif
  577 
  578         return (shmfd);
  579 }
  580 
  581 struct shmfd *
  582 shm_hold(struct shmfd *shmfd)
  583 {
  584 
  585         refcount_acquire(&shmfd->shm_refs);
  586         return (shmfd);
  587 }
  588 
  589 void
  590 shm_drop(struct shmfd *shmfd)
  591 {
  592 
  593         if (refcount_release(&shmfd->shm_refs)) {
  594 #ifdef MAC
  595                 mac_posixshm_destroy(shmfd);
  596 #endif
  597                 rangelock_destroy(&shmfd->shm_rl);
  598                 mtx_destroy(&shmfd->shm_mtx);
  599                 vm_object_deallocate(shmfd->shm_object);
  600                 free(shmfd, M_SHMFD);
  601         }
  602 }
  603 
  604 /*
  605  * Determine if the credentials have sufficient permissions for a
  606  * specified combination of FREAD and FWRITE.
  607  */
  608 int
  609 shm_access(struct shmfd *shmfd, struct ucred *ucred, int flags)
  610 {
  611         accmode_t accmode;
  612         int error;
  613 
  614         accmode = 0;
  615         if (flags & FREAD)
  616                 accmode |= VREAD;
  617         if (flags & FWRITE)
  618                 accmode |= VWRITE;
  619         mtx_lock(&shm_timestamp_lock);
  620         error = vaccess(VREG, shmfd->shm_mode, shmfd->shm_uid, shmfd->shm_gid,
  621             accmode, ucred, NULL);
  622         mtx_unlock(&shm_timestamp_lock);
  623         return (error);
  624 }
  625 
  626 static void
  627 shm_init(void *arg)
  628 {
  629 
  630         mtx_init(&shm_timestamp_lock, "shm timestamps", NULL, MTX_DEF);
  631         sx_init(&shm_dict_lock, "shm dictionary");
  632         shm_dictionary = hashinit(1024, M_SHMFD, &shm_hash);
  633         new_unrhdr64(&shm_ino_unr, 1);
  634         shm_dev_ino = devfs_alloc_cdp_inode();
  635         KASSERT(shm_dev_ino > 0, ("shm dev inode not initialized"));
  636 }
  637 SYSINIT(shm_init, SI_SUB_SYSV_SHM, SI_ORDER_ANY, shm_init, NULL);
  638 
  639 /*
  640  * Dictionary management.  We maintain an in-kernel dictionary to map
  641  * paths to shmfd objects.  We use the FNV hash on the path to store
  642  * the mappings in a hash table.
  643  */
  644 static struct shmfd *
  645 shm_lookup(char *path, Fnv32_t fnv)
  646 {
  647         struct shm_mapping *map;
  648 
  649         LIST_FOREACH(map, SHM_HASH(fnv), sm_link) {
  650                 if (map->sm_fnv != fnv)
  651                         continue;
  652                 if (strcmp(map->sm_path, path) == 0)
  653                         return (map->sm_shmfd);
  654         }
  655 
  656         return (NULL);
  657 }
  658 
  659 static void
  660 shm_insert(char *path, Fnv32_t fnv, struct shmfd *shmfd)
  661 {
  662         struct shm_mapping *map;
  663 
  664         map = malloc(sizeof(struct shm_mapping), M_SHMFD, M_WAITOK);
  665         map->sm_path = path;
  666         map->sm_fnv = fnv;
  667         map->sm_shmfd = shm_hold(shmfd);
  668         shmfd->shm_path = path;
  669         LIST_INSERT_HEAD(SHM_HASH(fnv), map, sm_link);
  670 }
  671 
  672 static int
  673 shm_remove(char *path, Fnv32_t fnv, struct ucred *ucred)
  674 {
  675         struct shm_mapping *map;
  676         int error;
  677 
  678         LIST_FOREACH(map, SHM_HASH(fnv), sm_link) {
  679                 if (map->sm_fnv != fnv)
  680                         continue;
  681                 if (strcmp(map->sm_path, path) == 0) {
  682 #ifdef MAC
  683                         error = mac_posixshm_check_unlink(ucred, map->sm_shmfd);
  684                         if (error)
  685                                 return (error);
  686 #endif
  687                         error = shm_access(map->sm_shmfd, ucred,
  688                             FREAD | FWRITE);
  689                         if (error)
  690                                 return (error);
  691                         map->sm_shmfd->shm_path = NULL;
  692                         LIST_REMOVE(map, sm_link);
  693                         shm_drop(map->sm_shmfd);
  694                         free(map->sm_path, M_SHMFD);
  695                         free(map, M_SHMFD);
  696                         return (0);
  697                 }
  698         }
  699 
  700         return (ENOENT);
  701 }
  702 
  703 int
  704 kern_shm_open(struct thread *td, const char *userpath, int flags, mode_t mode,
  705     struct filecaps *fcaps)
  706 {
  707         struct filedesc *fdp;
  708         struct shmfd *shmfd;
  709         struct file *fp;
  710         char *path;
  711         const char *pr_path;
  712         size_t pr_pathlen;
  713         Fnv32_t fnv;
  714         mode_t cmode;
  715         int fd, error;
  716 
  717 #ifdef CAPABILITY_MODE
  718         /*
  719          * shm_open(2) is only allowed for anonymous objects.
  720          */
  721         if (IN_CAPABILITY_MODE(td) && (userpath != SHM_ANON))
  722                 return (ECAPMODE);
  723 #endif
  724 
  725         AUDIT_ARG_FFLAGS(flags);
  726         AUDIT_ARG_MODE(mode);
  727 
  728         if ((flags & O_ACCMODE) != O_RDONLY && (flags & O_ACCMODE) != O_RDWR)
  729                 return (EINVAL);
  730 
  731         if ((flags & ~(O_ACCMODE | O_CREAT | O_EXCL | O_TRUNC | O_CLOEXEC)) != 0)
  732                 return (EINVAL);
  733 
  734         fdp = td->td_proc->p_fd;
  735         cmode = (mode & ~fdp->fd_cmask) & ACCESSPERMS;
  736 
  737         /*
  738          * shm_open(2) created shm should always have O_CLOEXEC set, as mandated
  739          * by POSIX.  We allow it to be unset here so that an in-kernel
  740          * interface may be written as a thin layer around shm, optionally not
  741          * setting CLOEXEC.  For shm_open(2), O_CLOEXEC is set unconditionally
  742          * in sys_shm_open() to keep this implementation compliant.
  743          */
  744         error = falloc_caps(td, &fp, &fd, flags & O_CLOEXEC, fcaps);
  745         if (error)
  746                 return (error);
  747 
  748         /* A SHM_ANON path pointer creates an anonymous object. */
  749         if (userpath == SHM_ANON) {
  750                 /* A read-only anonymous object is pointless. */
  751                 if ((flags & O_ACCMODE) == O_RDONLY) {
  752                         fdclose(td, fp, fd);
  753                         fdrop(fp, td);
  754                         return (EINVAL);
  755                 }
  756                 shmfd = shm_alloc(td->td_ucred, cmode);
  757         } else {
  758                 path = malloc(MAXPATHLEN, M_SHMFD, M_WAITOK);
  759                 pr_path = td->td_ucred->cr_prison->pr_path;
  760 
  761                 /* Construct a full pathname for jailed callers. */
  762                 pr_pathlen = strcmp(pr_path, "/") == 0 ? 0
  763                     : strlcpy(path, pr_path, MAXPATHLEN);
  764                 error = copyinstr(userpath, path + pr_pathlen,
  765                     MAXPATHLEN - pr_pathlen, NULL);
  766 #ifdef KTRACE
  767                 if (error == 0 && KTRPOINT(curthread, KTR_NAMEI))
  768                         ktrnamei(path);
  769 #endif
  770                 /* Require paths to start with a '/' character. */
  771                 if (error == 0 && path[pr_pathlen] != '/')
  772                         error = EINVAL;
  773                 if (error) {
  774                         fdclose(td, fp, fd);
  775                         fdrop(fp, td);
  776                         free(path, M_SHMFD);
  777                         return (error);
  778                 }
  779 
  780                 AUDIT_ARG_UPATH1_CANON(path);
  781                 fnv = fnv_32_str(path, FNV1_32_INIT);
  782                 sx_xlock(&shm_dict_lock);
  783                 shmfd = shm_lookup(path, fnv);
  784                 if (shmfd == NULL) {
  785                         /* Object does not yet exist, create it if requested. */
  786                         if (flags & O_CREAT) {
  787 #ifdef MAC
  788                                 error = mac_posixshm_check_create(td->td_ucred,
  789                                     path);
  790                                 if (error == 0) {
  791 #endif
  792                                         shmfd = shm_alloc(td->td_ucred, cmode);
  793                                         shm_insert(path, fnv, shmfd);
  794 #ifdef MAC
  795                                 }
  796 #endif
  797                         } else {
  798                                 free(path, M_SHMFD);
  799                                 error = ENOENT;
  800                         }
  801                 } else {
  802                         /*
  803                          * Object already exists, obtain a new
  804                          * reference if requested and permitted.
  805                          */
  806                         free(path, M_SHMFD);
  807                         if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
  808                                 error = EEXIST;
  809                         else {
  810 #ifdef MAC
  811                                 error = mac_posixshm_check_open(td->td_ucred,
  812                                     shmfd, FFLAGS(flags & O_ACCMODE));
  813                                 if (error == 0)
  814 #endif
  815                                 error = shm_access(shmfd, td->td_ucred,
  816                                     FFLAGS(flags & O_ACCMODE));
  817                         }
  818 
  819                         /*
  820                          * Truncate the file back to zero length if
  821                          * O_TRUNC was specified and the object was
  822                          * opened with read/write.
  823                          */
  824                         if (error == 0 &&
  825                             (flags & (O_ACCMODE | O_TRUNC)) ==
  826                             (O_RDWR | O_TRUNC)) {
  827 #ifdef MAC
  828                                 error = mac_posixshm_check_truncate(
  829                                         td->td_ucred, fp->f_cred, shmfd);
  830                                 if (error == 0)
  831 #endif
  832                                         shm_dotruncate(shmfd, 0);
  833                         }
  834                         if (error == 0)
  835                                 shm_hold(shmfd);
  836                 }
  837                 sx_xunlock(&shm_dict_lock);
  838 
  839                 if (error) {
  840                         fdclose(td, fp, fd);
  841                         fdrop(fp, td);
  842                         return (error);
  843                 }
  844         }
  845 
  846         finit(fp, FFLAGS(flags & O_ACCMODE), DTYPE_SHM, shmfd, &shm_ops);
  847 
  848         td->td_retval[0] = fd;
  849         fdrop(fp, td);
  850 
  851         return (0);
  852 }
  853 
  854 /* System calls. */
  855 int
  856 sys_shm_open(struct thread *td, struct shm_open_args *uap)
  857 {
  858 
  859         return (kern_shm_open(td, uap->path, uap->flags | O_CLOEXEC, uap->mode,
  860             NULL));
  861 }
  862 
  863 int
  864 sys_shm_unlink(struct thread *td, struct shm_unlink_args *uap)
  865 {
  866         char *path;
  867         const char *pr_path;
  868         size_t pr_pathlen;
  869         Fnv32_t fnv;
  870         int error;
  871 
  872         path = malloc(MAXPATHLEN, M_TEMP, M_WAITOK);
  873         pr_path = td->td_ucred->cr_prison->pr_path;
  874         pr_pathlen = strcmp(pr_path, "/") == 0 ? 0
  875             : strlcpy(path, pr_path, MAXPATHLEN);
  876         error = copyinstr(uap->path, path + pr_pathlen, MAXPATHLEN - pr_pathlen,
  877             NULL);
  878         if (error) {
  879                 free(path, M_TEMP);
  880                 return (error);
  881         }
  882 #ifdef KTRACE
  883         if (KTRPOINT(curthread, KTR_NAMEI))
  884                 ktrnamei(path);
  885 #endif
  886         AUDIT_ARG_UPATH1_CANON(path);
  887         fnv = fnv_32_str(path, FNV1_32_INIT);
  888         sx_xlock(&shm_dict_lock);
  889         error = shm_remove(path, fnv, td->td_ucred);
  890         sx_xunlock(&shm_dict_lock);
  891         free(path, M_TEMP);
  892 
  893         return (error);
  894 }
  895 
  896 int
  897 shm_mmap(struct file *fp, vm_map_t map, vm_offset_t *addr, vm_size_t objsize,
  898     vm_prot_t prot, vm_prot_t cap_maxprot, int flags,
  899     vm_ooffset_t foff, struct thread *td)
  900 {
  901         struct shmfd *shmfd;
  902         vm_prot_t maxprot;
  903         int error;
  904         bool writecnt;
  905 
  906         shmfd = fp->f_data;
  907         maxprot = VM_PROT_NONE;
  908 
  909         /* FREAD should always be set. */
  910         if ((fp->f_flag & FREAD) != 0)
  911                 maxprot |= VM_PROT_EXECUTE | VM_PROT_READ;
  912         if ((fp->f_flag & FWRITE) != 0)
  913                 maxprot |= VM_PROT_WRITE;
  914 
  915         writecnt = (flags & MAP_SHARED) != 0 && (prot & VM_PROT_WRITE) != 0;
  916 
  917         /* Don't permit shared writable mappings on read-only descriptors. */
  918         if (writecnt && (maxprot & VM_PROT_WRITE) == 0)
  919                 return (EACCES);
  920         maxprot &= cap_maxprot;
  921 
  922         /* See comment in vn_mmap(). */
  923         if (
  924 #ifdef _LP64
  925             objsize > OFF_MAX ||
  926 #endif
  927             foff < 0 || foff > OFF_MAX - objsize)
  928                 return (EINVAL);
  929 
  930 #ifdef MAC
  931         error = mac_posixshm_check_mmap(td->td_ucred, shmfd, prot, flags);
  932         if (error != 0)
  933                 return (error);
  934 #endif
  935         
  936         mtx_lock(&shm_timestamp_lock);
  937         vfs_timestamp(&shmfd->shm_atime);
  938         mtx_unlock(&shm_timestamp_lock);
  939         vm_object_reference(shmfd->shm_object);
  940 
  941         if (writecnt)
  942                 vm_pager_update_writecount(shmfd->shm_object, 0, objsize);
  943         error = vm_mmap_object(map, addr, objsize, prot, maxprot, flags,
  944             shmfd->shm_object, foff, writecnt, td);
  945         if (error != 0) {
  946                 if (writecnt)
  947                         vm_pager_release_writecount(shmfd->shm_object, 0,
  948                             objsize);
  949                 vm_object_deallocate(shmfd->shm_object);
  950         }
  951         return (error);
  952 }
  953 
  954 static int
  955 shm_chmod(struct file *fp, mode_t mode, struct ucred *active_cred,
  956     struct thread *td)
  957 {
  958         struct shmfd *shmfd;
  959         int error;
  960 
  961         error = 0;
  962         shmfd = fp->f_data;
  963         mtx_lock(&shm_timestamp_lock);
  964         /*
  965          * SUSv4 says that x bits of permission need not be affected.
  966          * Be consistent with our shm_open there.
  967          */
  968 #ifdef MAC
  969         error = mac_posixshm_check_setmode(active_cred, shmfd, mode);
  970         if (error != 0)
  971                 goto out;
  972 #endif
  973         error = vaccess(VREG, shmfd->shm_mode, shmfd->shm_uid,
  974             shmfd->shm_gid, VADMIN, active_cred, NULL);
  975         if (error != 0)
  976                 goto out;
  977         shmfd->shm_mode = mode & ACCESSPERMS;
  978 out:
  979         mtx_unlock(&shm_timestamp_lock);
  980         return (error);
  981 }
  982 
  983 static int
  984 shm_chown(struct file *fp, uid_t uid, gid_t gid, struct ucred *active_cred,
  985     struct thread *td)
  986 {
  987         struct shmfd *shmfd;
  988         int error;
  989 
  990         error = 0;
  991         shmfd = fp->f_data;
  992         mtx_lock(&shm_timestamp_lock);
  993 #ifdef MAC
  994         error = mac_posixshm_check_setowner(active_cred, shmfd, uid, gid);
  995         if (error != 0)
  996                 goto out;
  997 #endif
  998         if (uid == (uid_t)-1)
  999                 uid = shmfd->shm_uid;
 1000         if (gid == (gid_t)-1)
 1001                  gid = shmfd->shm_gid;
 1002         if (((uid != shmfd->shm_uid && uid != active_cred->cr_uid) ||
 1003             (gid != shmfd->shm_gid && !groupmember(gid, active_cred))) &&
 1004             (error = priv_check_cred(active_cred, PRIV_VFS_CHOWN, 0)))
 1005                 goto out;
 1006         shmfd->shm_uid = uid;
 1007         shmfd->shm_gid = gid;
 1008 out:
 1009         mtx_unlock(&shm_timestamp_lock);
 1010         return (error);
 1011 }
 1012 
 1013 /*
 1014  * Helper routines to allow the backing object of a shared memory file
 1015  * descriptor to be mapped in the kernel.
 1016  */
 1017 int
 1018 shm_map(struct file *fp, size_t size, off_t offset, void **memp)
 1019 {
 1020         struct shmfd *shmfd;
 1021         vm_offset_t kva, ofs;
 1022         vm_object_t obj;
 1023         int rv;
 1024 
 1025         if (fp->f_type != DTYPE_SHM)
 1026                 return (EINVAL);
 1027         shmfd = fp->f_data;
 1028         obj = shmfd->shm_object;
 1029         VM_OBJECT_WLOCK(obj);
 1030         /*
 1031          * XXXRW: This validation is probably insufficient, and subject to
 1032          * sign errors.  It should be fixed.
 1033          */
 1034         if (offset >= shmfd->shm_size ||
 1035             offset + size > round_page(shmfd->shm_size)) {
 1036                 VM_OBJECT_WUNLOCK(obj);
 1037                 return (EINVAL);
 1038         }
 1039 
 1040         shmfd->shm_kmappings++;
 1041         vm_object_reference_locked(obj);
 1042         VM_OBJECT_WUNLOCK(obj);
 1043 
 1044         /* Map the object into the kernel_map and wire it. */
 1045         kva = vm_map_min(kernel_map);
 1046         ofs = offset & PAGE_MASK;
 1047         offset = trunc_page(offset);
 1048         size = round_page(size + ofs);
 1049         rv = vm_map_find(kernel_map, obj, offset, &kva, size, 0,
 1050             VMFS_OPTIMAL_SPACE, VM_PROT_READ | VM_PROT_WRITE,
 1051             VM_PROT_READ | VM_PROT_WRITE, 0);
 1052         if (rv == KERN_SUCCESS) {
 1053                 rv = vm_map_wire(kernel_map, kva, kva + size,
 1054                     VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES);
 1055                 if (rv == KERN_SUCCESS) {
 1056                         *memp = (void *)(kva + ofs);
 1057                         return (0);
 1058                 }
 1059                 vm_map_remove(kernel_map, kva, kva + size);
 1060         } else
 1061                 vm_object_deallocate(obj);
 1062 
 1063         /* On failure, drop our mapping reference. */
 1064         VM_OBJECT_WLOCK(obj);
 1065         shmfd->shm_kmappings--;
 1066         VM_OBJECT_WUNLOCK(obj);
 1067 
 1068         return (vm_mmap_to_errno(rv));
 1069 }
 1070 
 1071 /*
 1072  * We require the caller to unmap the entire entry.  This allows us to
 1073  * safely decrement shm_kmappings when a mapping is removed.
 1074  */
 1075 int
 1076 shm_unmap(struct file *fp, void *mem, size_t size)
 1077 {
 1078         struct shmfd *shmfd;
 1079         vm_map_entry_t entry;
 1080         vm_offset_t kva, ofs;
 1081         vm_object_t obj;
 1082         vm_pindex_t pindex;
 1083         vm_prot_t prot;
 1084         boolean_t wired;
 1085         vm_map_t map;
 1086         int rv;
 1087 
 1088         if (fp->f_type != DTYPE_SHM)
 1089                 return (EINVAL);
 1090         shmfd = fp->f_data;
 1091         kva = (vm_offset_t)mem;
 1092         ofs = kva & PAGE_MASK;
 1093         kva = trunc_page(kva);
 1094         size = round_page(size + ofs);
 1095         map = kernel_map;
 1096         rv = vm_map_lookup(&map, kva, VM_PROT_READ | VM_PROT_WRITE, &entry,
 1097             &obj, &pindex, &prot, &wired);
 1098         if (rv != KERN_SUCCESS)
 1099                 return (EINVAL);
 1100         if (entry->start != kva || entry->end != kva + size) {
 1101                 vm_map_lookup_done(map, entry);
 1102                 return (EINVAL);
 1103         }
 1104         vm_map_lookup_done(map, entry);
 1105         if (obj != shmfd->shm_object)
 1106                 return (EINVAL);
 1107         vm_map_remove(map, kva, kva + size);
 1108         VM_OBJECT_WLOCK(obj);
 1109         KASSERT(shmfd->shm_kmappings > 0, ("shm_unmap: object not mapped"));
 1110         shmfd->shm_kmappings--;
 1111         VM_OBJECT_WUNLOCK(obj);
 1112         return (0);
 1113 }
 1114 
 1115 static int
 1116 shm_fill_kinfo_locked(struct shmfd *shmfd, struct kinfo_file *kif, bool list)
 1117 {
 1118         const char *path, *pr_path;
 1119         size_t pr_pathlen;
 1120         bool visible;
 1121 
 1122         sx_assert(&shm_dict_lock, SA_LOCKED);
 1123         kif->kf_type = KF_TYPE_SHM;
 1124         kif->kf_un.kf_file.kf_file_mode = S_IFREG | shmfd->shm_mode;
 1125         kif->kf_un.kf_file.kf_file_size = shmfd->shm_size;
 1126         if (shmfd->shm_path != NULL) {
 1127                 if (shmfd->shm_path != NULL) {
 1128                         path = shmfd->shm_path;
 1129                         pr_path = curthread->td_ucred->cr_prison->pr_path;
 1130                         if (strcmp(pr_path, "/") != 0) {
 1131                                 /* Return the jail-rooted pathname. */
 1132                                 pr_pathlen = strlen(pr_path);
 1133                                 visible = strncmp(path, pr_path, pr_pathlen)
 1134                                     == 0 && path[pr_pathlen] == '/';
 1135                                 if (list && !visible)
 1136                                         return (EPERM);
 1137                                 if (visible)
 1138                                         path += pr_pathlen;
 1139                         }
 1140                         strlcpy(kif->kf_path, path, sizeof(kif->kf_path));
 1141                 }
 1142         }
 1143         return (0);
 1144 }
 1145 
 1146 static int
 1147 shm_fill_kinfo(struct file *fp, struct kinfo_file *kif,
 1148     struct filedesc *fdp __unused)
 1149 {
 1150         int res;
 1151 
 1152         sx_slock(&shm_dict_lock);
 1153         res = shm_fill_kinfo_locked(fp->f_data, kif, false);
 1154         sx_sunlock(&shm_dict_lock);
 1155         return (res);
 1156 }
 1157 
 1158 static int
 1159 sysctl_posix_shm_list(SYSCTL_HANDLER_ARGS)
 1160 {
 1161         struct shm_mapping *shmm;
 1162         struct sbuf sb;
 1163         struct kinfo_file kif;
 1164         u_long i;
 1165         ssize_t curlen;
 1166         int error, error2;
 1167 
 1168         sbuf_new_for_sysctl(&sb, NULL, sizeof(struct kinfo_file) * 5, req);
 1169         sbuf_clear_flags(&sb, SBUF_INCLUDENUL);
 1170         curlen = 0;
 1171         error = 0;
 1172         sx_slock(&shm_dict_lock);
 1173         for (i = 0; i < shm_hash + 1; i++) {
 1174                 LIST_FOREACH(shmm, &shm_dictionary[i], sm_link) {
 1175                         error = shm_fill_kinfo_locked(shmm->sm_shmfd,
 1176                             &kif, true);
 1177                         if (error == EPERM) {
 1178                                 error = 0;
 1179                                 continue;
 1180                         }
 1181                         if (error != 0)
 1182                                 break;
 1183                         pack_kinfo(&kif);
 1184                         if (req->oldptr != NULL &&
 1185                             kif.kf_structsize + curlen > req->oldlen)
 1186                                 break;
 1187                         error = sbuf_bcat(&sb, &kif, kif.kf_structsize) == 0 ?
 1188                             0 : ENOMEM;
 1189                         if (error != 0)
 1190                                 break;
 1191                         curlen += kif.kf_structsize;
 1192                 }
 1193         }
 1194         sx_sunlock(&shm_dict_lock);
 1195         error2 = sbuf_finish(&sb);
 1196         sbuf_delete(&sb);
 1197         return (error != 0 ? error : error2);
 1198 }
 1199 
 1200 SYSCTL_PROC(_kern_ipc, OID_AUTO, posix_shm_list,
 1201     CTLFLAG_RD | CTLFLAG_MPSAFE | CTLTYPE_OPAQUE,
 1202     NULL, 0, sysctl_posix_shm_list, "",
 1203     "POSIX SHM list");

Cache object: 7d4c35861e6b693e58c41f44b60a5828


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.