The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/kern/uipc_shm.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 2006, 2011 Robert N. M. Watson
    3  * All rights reserved.
    4  *
    5  * Redistribution and use in source and binary forms, with or without
    6  * modification, are permitted provided that the following conditions
    7  * are met:
    8  * 1. Redistributions of source code must retain the above copyright
    9  *    notice, this list of conditions and the following disclaimer.
   10  * 2. Redistributions in binary form must reproduce the above copyright
   11  *    notice, this list of conditions and the following disclaimer in the
   12  *    documentation and/or other materials provided with the distribution.
   13  *
   14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   24  * SUCH DAMAGE.
   25  */
   26 
   27 /*
   28  * Support for shared swap-backed anonymous memory objects via
   29  * shm_open(2) and shm_unlink(2).  While most of the implementation is
   30  * here, vm_mmap.c contains mapping logic changes.
   31  *
   32  * TODO:
   33  *
   34  * (1) Need to export data to a userland tool via a sysctl.  Should ipcs(1)
   35  *     and ipcrm(1) be expanded or should new tools to manage both POSIX
   36  *     kernel semaphores and POSIX shared memory be written?
   37  *
   38  * (2) Add support for this file type to fstat(1).
   39  *
   40  * (3) Resource limits?  Does this need its own resource limits or are the
   41  *     existing limits in mmap(2) sufficient?
   42  */
   43 
   44 #include <sys/cdefs.h>
   45 __FBSDID("$FreeBSD: releng/11.1/sys/kern/uipc_shm.c 320639 2017-07-04 05:37:58Z kib $");
   46 
   47 #include "opt_capsicum.h"
   48 #include "opt_ktrace.h"
   49 
   50 #include <sys/param.h>
   51 #include <sys/capsicum.h>
   52 #include <sys/conf.h>
   53 #include <sys/fcntl.h>
   54 #include <sys/file.h>
   55 #include <sys/filedesc.h>
   56 #include <sys/fnv_hash.h>
   57 #include <sys/kernel.h>
   58 #include <sys/uio.h>
   59 #include <sys/signal.h>
   60 #include <sys/jail.h>
   61 #include <sys/ktrace.h>
   62 #include <sys/lock.h>
   63 #include <sys/malloc.h>
   64 #include <sys/mman.h>
   65 #include <sys/mutex.h>
   66 #include <sys/priv.h>
   67 #include <sys/proc.h>
   68 #include <sys/refcount.h>
   69 #include <sys/resourcevar.h>
   70 #include <sys/rwlock.h>
   71 #include <sys/stat.h>
   72 #include <sys/syscallsubr.h>
   73 #include <sys/sysctl.h>
   74 #include <sys/sysproto.h>
   75 #include <sys/systm.h>
   76 #include <sys/sx.h>
   77 #include <sys/time.h>
   78 #include <sys/vnode.h>
   79 #include <sys/unistd.h>
   80 #include <sys/user.h>
   81 
   82 #include <security/mac/mac_framework.h>
   83 
   84 #include <vm/vm.h>
   85 #include <vm/vm_param.h>
   86 #include <vm/pmap.h>
   87 #include <vm/vm_extern.h>
   88 #include <vm/vm_map.h>
   89 #include <vm/vm_kern.h>
   90 #include <vm/vm_object.h>
   91 #include <vm/vm_page.h>
   92 #include <vm/vm_pageout.h>
   93 #include <vm/vm_pager.h>
   94 #include <vm/swap_pager.h>
   95 
   96 struct shm_mapping {
   97         char            *sm_path;
   98         Fnv32_t         sm_fnv;
   99         struct shmfd    *sm_shmfd;
  100         LIST_ENTRY(shm_mapping) sm_link;
  101 };
  102 
  103 static MALLOC_DEFINE(M_SHMFD, "shmfd", "shared memory file descriptor");
  104 static LIST_HEAD(, shm_mapping) *shm_dictionary;
  105 static struct sx shm_dict_lock;
  106 static struct mtx shm_timestamp_lock;
  107 static u_long shm_hash;
  108 static struct unrhdr *shm_ino_unr;
  109 static dev_t shm_dev_ino;
  110 
  111 #define SHM_HASH(fnv)   (&shm_dictionary[(fnv) & shm_hash])
  112 
  113 static void     shm_init(void *arg);
  114 static void     shm_insert(char *path, Fnv32_t fnv, struct shmfd *shmfd);
  115 static struct shmfd *shm_lookup(char *path, Fnv32_t fnv);
  116 static int      shm_remove(char *path, Fnv32_t fnv, struct ucred *ucred);
  117 
  118 static fo_rdwr_t        shm_read;
  119 static fo_rdwr_t        shm_write;
  120 static fo_truncate_t    shm_truncate;
  121 static fo_stat_t        shm_stat;
  122 static fo_close_t       shm_close;
  123 static fo_chmod_t       shm_chmod;
  124 static fo_chown_t       shm_chown;
  125 static fo_seek_t        shm_seek;
  126 static fo_fill_kinfo_t  shm_fill_kinfo;
  127 static fo_mmap_t        shm_mmap;
  128 
  129 /* File descriptor operations. */
  130 struct fileops shm_ops = {
  131         .fo_read = shm_read,
  132         .fo_write = shm_write,
  133         .fo_truncate = shm_truncate,
  134         .fo_ioctl = invfo_ioctl,
  135         .fo_poll = invfo_poll,
  136         .fo_kqfilter = invfo_kqfilter,
  137         .fo_stat = shm_stat,
  138         .fo_close = shm_close,
  139         .fo_chmod = shm_chmod,
  140         .fo_chown = shm_chown,
  141         .fo_sendfile = vn_sendfile,
  142         .fo_seek = shm_seek,
  143         .fo_fill_kinfo = shm_fill_kinfo,
  144         .fo_mmap = shm_mmap,
  145         .fo_flags = DFLAG_PASSABLE | DFLAG_SEEKABLE
  146 };
  147 
  148 FEATURE(posix_shm, "POSIX shared memory");
  149 
  150 static int
  151 uiomove_object_page(vm_object_t obj, size_t len, struct uio *uio)
  152 {
  153         vm_page_t m;
  154         vm_pindex_t idx;
  155         size_t tlen;
  156         int error, offset, rv;
  157 
  158         idx = OFF_TO_IDX(uio->uio_offset);
  159         offset = uio->uio_offset & PAGE_MASK;
  160         tlen = MIN(PAGE_SIZE - offset, len);
  161 
  162         VM_OBJECT_WLOCK(obj);
  163 
  164         /*
  165          * Read I/O without either a corresponding resident page or swap
  166          * page: use zero_region.  This is intended to avoid instantiating
  167          * pages on read from a sparse region.
  168          */
  169         if (uio->uio_rw == UIO_READ && vm_page_lookup(obj, idx) == NULL &&
  170             !vm_pager_has_page(obj, idx, NULL, NULL)) {
  171                 VM_OBJECT_WUNLOCK(obj);
  172                 return (uiomove(__DECONST(void *, zero_region), tlen, uio));
  173         }
  174 
  175         /*
  176          * Parallel reads of the page content from disk are prevented
  177          * by exclusive busy.
  178          *
  179          * Although the tmpfs vnode lock is held here, it is
  180          * nonetheless safe to sleep waiting for a free page.  The
  181          * pageout daemon does not need to acquire the tmpfs vnode
  182          * lock to page out tobj's pages because tobj is a OBJT_SWAP
  183          * type object.
  184          */
  185         m = vm_page_grab(obj, idx, VM_ALLOC_NORMAL | VM_ALLOC_NOBUSY);
  186         if (m->valid != VM_PAGE_BITS_ALL) {
  187                 vm_page_xbusy(m);
  188                 if (vm_pager_has_page(obj, idx, NULL, NULL)) {
  189                         rv = vm_pager_get_pages(obj, &m, 1, NULL, NULL);
  190                         if (rv != VM_PAGER_OK) {
  191                                 printf(
  192             "uiomove_object: vm_obj %p idx %jd valid %x pager error %d\n",
  193                                     obj, idx, m->valid, rv);
  194                                 vm_page_lock(m);
  195                                 vm_page_free(m);
  196                                 vm_page_unlock(m);
  197                                 VM_OBJECT_WUNLOCK(obj);
  198                                 return (EIO);
  199                         }
  200                 } else
  201                         vm_page_zero_invalid(m, TRUE);
  202                 vm_page_xunbusy(m);
  203         }
  204         vm_page_lock(m);
  205         vm_page_hold(m);
  206         if (m->queue == PQ_NONE) {
  207                 vm_page_deactivate(m);
  208         } else {
  209                 /* Requeue to maintain LRU ordering. */
  210                 vm_page_requeue(m);
  211         }
  212         vm_page_unlock(m);
  213         VM_OBJECT_WUNLOCK(obj);
  214         error = uiomove_fromphys(&m, offset, tlen, uio);
  215         if (uio->uio_rw == UIO_WRITE && error == 0) {
  216                 VM_OBJECT_WLOCK(obj);
  217                 vm_page_dirty(m);
  218                 vm_pager_page_unswapped(m);
  219                 VM_OBJECT_WUNLOCK(obj);
  220         }
  221         vm_page_lock(m);
  222         vm_page_unhold(m);
  223         vm_page_unlock(m);
  224 
  225         return (error);
  226 }
  227 
  228 int
  229 uiomove_object(vm_object_t obj, off_t obj_size, struct uio *uio)
  230 {
  231         ssize_t resid;
  232         size_t len;
  233         int error;
  234 
  235         error = 0;
  236         while ((resid = uio->uio_resid) > 0) {
  237                 if (obj_size <= uio->uio_offset)
  238                         break;
  239                 len = MIN(obj_size - uio->uio_offset, resid);
  240                 if (len == 0)
  241                         break;
  242                 error = uiomove_object_page(obj, len, uio);
  243                 if (error != 0 || resid == uio->uio_resid)
  244                         break;
  245         }
  246         return (error);
  247 }
  248 
  249 static int
  250 shm_seek(struct file *fp, off_t offset, int whence, struct thread *td)
  251 {
  252         struct shmfd *shmfd;
  253         off_t foffset;
  254         int error;
  255 
  256         shmfd = fp->f_data;
  257         foffset = foffset_lock(fp, 0);
  258         error = 0;
  259         switch (whence) {
  260         case L_INCR:
  261                 if (foffset < 0 ||
  262                     (offset > 0 && foffset > OFF_MAX - offset)) {
  263                         error = EOVERFLOW;
  264                         break;
  265                 }
  266                 offset += foffset;
  267                 break;
  268         case L_XTND:
  269                 if (offset > 0 && shmfd->shm_size > OFF_MAX - offset) {
  270                         error = EOVERFLOW;
  271                         break;
  272                 }
  273                 offset += shmfd->shm_size;
  274                 break;
  275         case L_SET:
  276                 break;
  277         default:
  278                 error = EINVAL;
  279         }
  280         if (error == 0) {
  281                 if (offset < 0 || offset > shmfd->shm_size)
  282                         error = EINVAL;
  283                 else
  284                         td->td_uretoff.tdu_off = offset;
  285         }
  286         foffset_unlock(fp, offset, error != 0 ? FOF_NOUPDATE : 0);
  287         return (error);
  288 }
  289 
  290 static int
  291 shm_read(struct file *fp, struct uio *uio, struct ucred *active_cred,
  292     int flags, struct thread *td)
  293 {
  294         struct shmfd *shmfd;
  295         void *rl_cookie;
  296         int error;
  297 
  298         shmfd = fp->f_data;
  299 #ifdef MAC
  300         error = mac_posixshm_check_read(active_cred, fp->f_cred, shmfd);
  301         if (error)
  302                 return (error);
  303 #endif
  304         foffset_lock_uio(fp, uio, flags);
  305         rl_cookie = rangelock_rlock(&shmfd->shm_rl, uio->uio_offset,
  306             uio->uio_offset + uio->uio_resid, &shmfd->shm_mtx);
  307         error = uiomove_object(shmfd->shm_object, shmfd->shm_size, uio);
  308         rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx);
  309         foffset_unlock_uio(fp, uio, flags);
  310         return (error);
  311 }
  312 
  313 static int
  314 shm_write(struct file *fp, struct uio *uio, struct ucred *active_cred,
  315     int flags, struct thread *td)
  316 {
  317         struct shmfd *shmfd;
  318         void *rl_cookie;
  319         int error;
  320 
  321         shmfd = fp->f_data;
  322 #ifdef MAC
  323         error = mac_posixshm_check_write(active_cred, fp->f_cred, shmfd);
  324         if (error)
  325                 return (error);
  326 #endif
  327         foffset_lock_uio(fp, uio, flags);
  328         if ((flags & FOF_OFFSET) == 0) {
  329                 rl_cookie = rangelock_wlock(&shmfd->shm_rl, 0, OFF_MAX,
  330                     &shmfd->shm_mtx);
  331         } else {
  332                 rl_cookie = rangelock_wlock(&shmfd->shm_rl, uio->uio_offset,
  333                     uio->uio_offset + uio->uio_resid, &shmfd->shm_mtx);
  334         }
  335 
  336         error = uiomove_object(shmfd->shm_object, shmfd->shm_size, uio);
  337         rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx);
  338         foffset_unlock_uio(fp, uio, flags);
  339         return (error);
  340 }
  341 
  342 static int
  343 shm_truncate(struct file *fp, off_t length, struct ucred *active_cred,
  344     struct thread *td)
  345 {
  346         struct shmfd *shmfd;
  347 #ifdef MAC
  348         int error;
  349 #endif
  350 
  351         shmfd = fp->f_data;
  352 #ifdef MAC
  353         error = mac_posixshm_check_truncate(active_cred, fp->f_cred, shmfd);
  354         if (error)
  355                 return (error);
  356 #endif
  357         return (shm_dotruncate(shmfd, length));
  358 }
  359 
  360 static int
  361 shm_stat(struct file *fp, struct stat *sb, struct ucred *active_cred,
  362     struct thread *td)
  363 {
  364         struct shmfd *shmfd;
  365 #ifdef MAC
  366         int error;
  367 #endif
  368 
  369         shmfd = fp->f_data;
  370 
  371 #ifdef MAC
  372         error = mac_posixshm_check_stat(active_cred, fp->f_cred, shmfd);
  373         if (error)
  374                 return (error);
  375 #endif
  376         
  377         /*
  378          * Attempt to return sanish values for fstat() on a memory file
  379          * descriptor.
  380          */
  381         bzero(sb, sizeof(*sb));
  382         sb->st_blksize = PAGE_SIZE;
  383         sb->st_size = shmfd->shm_size;
  384         sb->st_blocks = howmany(sb->st_size, sb->st_blksize);
  385         mtx_lock(&shm_timestamp_lock);
  386         sb->st_atim = shmfd->shm_atime;
  387         sb->st_ctim = shmfd->shm_ctime;
  388         sb->st_mtim = shmfd->shm_mtime;
  389         sb->st_birthtim = shmfd->shm_birthtime;
  390         sb->st_mode = S_IFREG | shmfd->shm_mode;                /* XXX */
  391         sb->st_uid = shmfd->shm_uid;
  392         sb->st_gid = shmfd->shm_gid;
  393         mtx_unlock(&shm_timestamp_lock);
  394         sb->st_dev = shm_dev_ino;
  395         sb->st_ino = shmfd->shm_ino;
  396 
  397         return (0);
  398 }
  399 
  400 static int
  401 shm_close(struct file *fp, struct thread *td)
  402 {
  403         struct shmfd *shmfd;
  404 
  405         shmfd = fp->f_data;
  406         fp->f_data = NULL;
  407         shm_drop(shmfd);
  408 
  409         return (0);
  410 }
  411 
  412 int
  413 shm_dotruncate(struct shmfd *shmfd, off_t length)
  414 {
  415         vm_object_t object;
  416         vm_page_t m;
  417         vm_pindex_t idx, nobjsize;
  418         vm_ooffset_t delta;
  419         int base, rv;
  420 
  421         object = shmfd->shm_object;
  422         VM_OBJECT_WLOCK(object);
  423         if (length == shmfd->shm_size) {
  424                 VM_OBJECT_WUNLOCK(object);
  425                 return (0);
  426         }
  427         nobjsize = OFF_TO_IDX(length + PAGE_MASK);
  428 
  429         /* Are we shrinking?  If so, trim the end. */
  430         if (length < shmfd->shm_size) {
  431                 /*
  432                  * Disallow any requests to shrink the size if this
  433                  * object is mapped into the kernel.
  434                  */
  435                 if (shmfd->shm_kmappings > 0) {
  436                         VM_OBJECT_WUNLOCK(object);
  437                         return (EBUSY);
  438                 }
  439 
  440                 /*
  441                  * Zero the truncated part of the last page.
  442                  */
  443                 base = length & PAGE_MASK;
  444                 if (base != 0) {
  445                         idx = OFF_TO_IDX(length);
  446 retry:
  447                         m = vm_page_lookup(object, idx);
  448                         if (m != NULL) {
  449                                 if (vm_page_sleep_if_busy(m, "shmtrc"))
  450                                         goto retry;
  451                         } else if (vm_pager_has_page(object, idx, NULL, NULL)) {
  452                                 m = vm_page_alloc(object, idx, VM_ALLOC_NORMAL);
  453                                 if (m == NULL) {
  454                                         VM_OBJECT_WUNLOCK(object);
  455                                         VM_WAIT;
  456                                         VM_OBJECT_WLOCK(object);
  457                                         goto retry;
  458                                 }
  459                                 rv = vm_pager_get_pages(object, &m, 1, NULL,
  460                                     NULL);
  461                                 vm_page_lock(m);
  462                                 if (rv == VM_PAGER_OK) {
  463                                         vm_page_deactivate(m);
  464                                         vm_page_unlock(m);
  465                                         vm_page_xunbusy(m);
  466                                 } else {
  467                                         vm_page_free(m);
  468                                         vm_page_unlock(m);
  469                                         VM_OBJECT_WUNLOCK(object);
  470                                         return (EIO);
  471                                 }
  472                         }
  473                         if (m != NULL) {
  474                                 pmap_zero_page_area(m, base, PAGE_SIZE - base);
  475                                 KASSERT(m->valid == VM_PAGE_BITS_ALL,
  476                                     ("shm_dotruncate: page %p is invalid", m));
  477                                 vm_page_dirty(m);
  478                                 vm_pager_page_unswapped(m);
  479                         }
  480                 }
  481                 delta = ptoa(object->size - nobjsize);
  482 
  483                 /* Toss in memory pages. */
  484                 if (nobjsize < object->size)
  485                         vm_object_page_remove(object, nobjsize, object->size,
  486                             0);
  487 
  488                 /* Toss pages from swap. */
  489                 if (object->type == OBJT_SWAP)
  490                         swap_pager_freespace(object, nobjsize, delta);
  491 
  492                 /* Free the swap accounted for shm */
  493                 swap_release_by_cred(delta, object->cred);
  494                 object->charge -= delta;
  495         } else {
  496                 /* Attempt to reserve the swap */
  497                 delta = ptoa(nobjsize - object->size);
  498                 if (!swap_reserve_by_cred(delta, object->cred)) {
  499                         VM_OBJECT_WUNLOCK(object);
  500                         return (ENOMEM);
  501                 }
  502                 object->charge += delta;
  503         }
  504         shmfd->shm_size = length;
  505         mtx_lock(&shm_timestamp_lock);
  506         vfs_timestamp(&shmfd->shm_ctime);
  507         shmfd->shm_mtime = shmfd->shm_ctime;
  508         mtx_unlock(&shm_timestamp_lock);
  509         object->size = nobjsize;
  510         VM_OBJECT_WUNLOCK(object);
  511         return (0);
  512 }
  513 
  514 /*
  515  * shmfd object management including creation and reference counting
  516  * routines.
  517  */
  518 struct shmfd *
  519 shm_alloc(struct ucred *ucred, mode_t mode)
  520 {
  521         struct shmfd *shmfd;
  522         int ino;
  523 
  524         shmfd = malloc(sizeof(*shmfd), M_SHMFD, M_WAITOK | M_ZERO);
  525         shmfd->shm_size = 0;
  526         shmfd->shm_uid = ucred->cr_uid;
  527         shmfd->shm_gid = ucred->cr_gid;
  528         shmfd->shm_mode = mode;
  529         shmfd->shm_object = vm_pager_allocate(OBJT_DEFAULT, NULL,
  530             shmfd->shm_size, VM_PROT_DEFAULT, 0, ucred);
  531         KASSERT(shmfd->shm_object != NULL, ("shm_create: vm_pager_allocate"));
  532         shmfd->shm_object->pg_color = 0;
  533         VM_OBJECT_WLOCK(shmfd->shm_object);
  534         vm_object_clear_flag(shmfd->shm_object, OBJ_ONEMAPPING);
  535         vm_object_set_flag(shmfd->shm_object, OBJ_COLORED | OBJ_NOSPLIT);
  536         VM_OBJECT_WUNLOCK(shmfd->shm_object);
  537         vfs_timestamp(&shmfd->shm_birthtime);
  538         shmfd->shm_atime = shmfd->shm_mtime = shmfd->shm_ctime =
  539             shmfd->shm_birthtime;
  540         ino = alloc_unr(shm_ino_unr);
  541         if (ino == -1)
  542                 shmfd->shm_ino = 0;
  543         else
  544                 shmfd->shm_ino = ino;
  545         refcount_init(&shmfd->shm_refs, 1);
  546         mtx_init(&shmfd->shm_mtx, "shmrl", NULL, MTX_DEF);
  547         rangelock_init(&shmfd->shm_rl);
  548 #ifdef MAC
  549         mac_posixshm_init(shmfd);
  550         mac_posixshm_create(ucred, shmfd);
  551 #endif
  552 
  553         return (shmfd);
  554 }
  555 
  556 struct shmfd *
  557 shm_hold(struct shmfd *shmfd)
  558 {
  559 
  560         refcount_acquire(&shmfd->shm_refs);
  561         return (shmfd);
  562 }
  563 
  564 void
  565 shm_drop(struct shmfd *shmfd)
  566 {
  567 
  568         if (refcount_release(&shmfd->shm_refs)) {
  569 #ifdef MAC
  570                 mac_posixshm_destroy(shmfd);
  571 #endif
  572                 rangelock_destroy(&shmfd->shm_rl);
  573                 mtx_destroy(&shmfd->shm_mtx);
  574                 vm_object_deallocate(shmfd->shm_object);
  575                 if (shmfd->shm_ino != 0)
  576                         free_unr(shm_ino_unr, shmfd->shm_ino);
  577                 free(shmfd, M_SHMFD);
  578         }
  579 }
  580 
  581 /*
  582  * Determine if the credentials have sufficient permissions for a
  583  * specified combination of FREAD and FWRITE.
  584  */
  585 int
  586 shm_access(struct shmfd *shmfd, struct ucred *ucred, int flags)
  587 {
  588         accmode_t accmode;
  589         int error;
  590 
  591         accmode = 0;
  592         if (flags & FREAD)
  593                 accmode |= VREAD;
  594         if (flags & FWRITE)
  595                 accmode |= VWRITE;
  596         mtx_lock(&shm_timestamp_lock);
  597         error = vaccess(VREG, shmfd->shm_mode, shmfd->shm_uid, shmfd->shm_gid,
  598             accmode, ucred, NULL);
  599         mtx_unlock(&shm_timestamp_lock);
  600         return (error);
  601 }
  602 
  603 /*
  604  * Dictionary management.  We maintain an in-kernel dictionary to map
  605  * paths to shmfd objects.  We use the FNV hash on the path to store
  606  * the mappings in a hash table.
  607  */
  608 static void
  609 shm_init(void *arg)
  610 {
  611 
  612         mtx_init(&shm_timestamp_lock, "shm timestamps", NULL, MTX_DEF);
  613         sx_init(&shm_dict_lock, "shm dictionary");
  614         shm_dictionary = hashinit(1024, M_SHMFD, &shm_hash);
  615         shm_ino_unr = new_unrhdr(1, INT32_MAX, NULL);
  616         KASSERT(shm_ino_unr != NULL, ("shm fake inodes not initialized"));
  617         shm_dev_ino = devfs_alloc_cdp_inode();
  618         KASSERT(shm_dev_ino > 0, ("shm dev inode not initialized"));
  619 }
  620 SYSINIT(shm_init, SI_SUB_SYSV_SHM, SI_ORDER_ANY, shm_init, NULL);
  621 
  622 static struct shmfd *
  623 shm_lookup(char *path, Fnv32_t fnv)
  624 {
  625         struct shm_mapping *map;
  626 
  627         LIST_FOREACH(map, SHM_HASH(fnv), sm_link) {
  628                 if (map->sm_fnv != fnv)
  629                         continue;
  630                 if (strcmp(map->sm_path, path) == 0)
  631                         return (map->sm_shmfd);
  632         }
  633 
  634         return (NULL);
  635 }
  636 
  637 static void
  638 shm_insert(char *path, Fnv32_t fnv, struct shmfd *shmfd)
  639 {
  640         struct shm_mapping *map;
  641 
  642         map = malloc(sizeof(struct shm_mapping), M_SHMFD, M_WAITOK);
  643         map->sm_path = path;
  644         map->sm_fnv = fnv;
  645         map->sm_shmfd = shm_hold(shmfd);
  646         shmfd->shm_path = path;
  647         LIST_INSERT_HEAD(SHM_HASH(fnv), map, sm_link);
  648 }
  649 
  650 static int
  651 shm_remove(char *path, Fnv32_t fnv, struct ucred *ucred)
  652 {
  653         struct shm_mapping *map;
  654         int error;
  655 
  656         LIST_FOREACH(map, SHM_HASH(fnv), sm_link) {
  657                 if (map->sm_fnv != fnv)
  658                         continue;
  659                 if (strcmp(map->sm_path, path) == 0) {
  660 #ifdef MAC
  661                         error = mac_posixshm_check_unlink(ucred, map->sm_shmfd);
  662                         if (error)
  663                                 return (error);
  664 #endif
  665                         error = shm_access(map->sm_shmfd, ucred,
  666                             FREAD | FWRITE);
  667                         if (error)
  668                                 return (error);
  669                         map->sm_shmfd->shm_path = NULL;
  670                         LIST_REMOVE(map, sm_link);
  671                         shm_drop(map->sm_shmfd);
  672                         free(map->sm_path, M_SHMFD);
  673                         free(map, M_SHMFD);
  674                         return (0);
  675                 }
  676         }
  677 
  678         return (ENOENT);
  679 }
  680 
  681 int
  682 kern_shm_open(struct thread *td, const char *userpath, int flags, mode_t mode,
  683     struct filecaps *fcaps)
  684 {
  685         struct filedesc *fdp;
  686         struct shmfd *shmfd;
  687         struct file *fp;
  688         char *path;
  689         const char *pr_path;
  690         size_t pr_pathlen;
  691         Fnv32_t fnv;
  692         mode_t cmode;
  693         int fd, error;
  694 
  695 #ifdef CAPABILITY_MODE
  696         /*
  697          * shm_open(2) is only allowed for anonymous objects.
  698          */
  699         if (IN_CAPABILITY_MODE(td) && (userpath != SHM_ANON))
  700                 return (ECAPMODE);
  701 #endif
  702 
  703         if ((flags & O_ACCMODE) != O_RDONLY && (flags & O_ACCMODE) != O_RDWR)
  704                 return (EINVAL);
  705 
  706         if ((flags & ~(O_ACCMODE | O_CREAT | O_EXCL | O_TRUNC | O_CLOEXEC)) != 0)
  707                 return (EINVAL);
  708 
  709         fdp = td->td_proc->p_fd;
  710         cmode = (mode & ~fdp->fd_cmask) & ACCESSPERMS;
  711 
  712         error = falloc_caps(td, &fp, &fd, O_CLOEXEC, fcaps);
  713         if (error)
  714                 return (error);
  715 
  716         /* A SHM_ANON path pointer creates an anonymous object. */
  717         if (userpath == SHM_ANON) {
  718                 /* A read-only anonymous object is pointless. */
  719                 if ((flags & O_ACCMODE) == O_RDONLY) {
  720                         fdclose(td, fp, fd);
  721                         fdrop(fp, td);
  722                         return (EINVAL);
  723                 }
  724                 shmfd = shm_alloc(td->td_ucred, cmode);
  725         } else {
  726                 path = malloc(MAXPATHLEN, M_SHMFD, M_WAITOK);
  727                 pr_path = td->td_ucred->cr_prison->pr_path;
  728 
  729                 /* Construct a full pathname for jailed callers. */
  730                 pr_pathlen = strcmp(pr_path, "/") == 0 ? 0
  731                     : strlcpy(path, pr_path, MAXPATHLEN);
  732                 error = copyinstr(userpath, path + pr_pathlen,
  733                     MAXPATHLEN - pr_pathlen, NULL);
  734 #ifdef KTRACE
  735                 if (error == 0 && KTRPOINT(curthread, KTR_NAMEI))
  736                         ktrnamei(path);
  737 #endif
  738                 /* Require paths to start with a '/' character. */
  739                 if (error == 0 && path[pr_pathlen] != '/')
  740                         error = EINVAL;
  741                 if (error) {
  742                         fdclose(td, fp, fd);
  743                         fdrop(fp, td);
  744                         free(path, M_SHMFD);
  745                         return (error);
  746                 }
  747 
  748                 fnv = fnv_32_str(path, FNV1_32_INIT);
  749                 sx_xlock(&shm_dict_lock);
  750                 shmfd = shm_lookup(path, fnv);
  751                 if (shmfd == NULL) {
  752                         /* Object does not yet exist, create it if requested. */
  753                         if (flags & O_CREAT) {
  754 #ifdef MAC
  755                                 error = mac_posixshm_check_create(td->td_ucred,
  756                                     path);
  757                                 if (error == 0) {
  758 #endif
  759                                         shmfd = shm_alloc(td->td_ucred, cmode);
  760                                         shm_insert(path, fnv, shmfd);
  761 #ifdef MAC
  762                                 }
  763 #endif
  764                         } else {
  765                                 free(path, M_SHMFD);
  766                                 error = ENOENT;
  767                         }
  768                 } else {
  769                         /*
  770                          * Object already exists, obtain a new
  771                          * reference if requested and permitted.
  772                          */
  773                         free(path, M_SHMFD);
  774                         if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
  775                                 error = EEXIST;
  776                         else {
  777 #ifdef MAC
  778                                 error = mac_posixshm_check_open(td->td_ucred,
  779                                     shmfd, FFLAGS(flags & O_ACCMODE));
  780                                 if (error == 0)
  781 #endif
  782                                 error = shm_access(shmfd, td->td_ucred,
  783                                     FFLAGS(flags & O_ACCMODE));
  784                         }
  785 
  786                         /*
  787                          * Truncate the file back to zero length if
  788                          * O_TRUNC was specified and the object was
  789                          * opened with read/write.
  790                          */
  791                         if (error == 0 &&
  792                             (flags & (O_ACCMODE | O_TRUNC)) ==
  793                             (O_RDWR | O_TRUNC)) {
  794 #ifdef MAC
  795                                 error = mac_posixshm_check_truncate(
  796                                         td->td_ucred, fp->f_cred, shmfd);
  797                                 if (error == 0)
  798 #endif
  799                                         shm_dotruncate(shmfd, 0);
  800                         }
  801                         if (error == 0)
  802                                 shm_hold(shmfd);
  803                 }
  804                 sx_xunlock(&shm_dict_lock);
  805 
  806                 if (error) {
  807                         fdclose(td, fp, fd);
  808                         fdrop(fp, td);
  809                         return (error);
  810                 }
  811         }
  812 
  813         finit(fp, FFLAGS(flags & O_ACCMODE), DTYPE_SHM, shmfd, &shm_ops);
  814 
  815         td->td_retval[0] = fd;
  816         fdrop(fp, td);
  817 
  818         return (0);
  819 }
  820 
  821 /* System calls. */
  822 int
  823 sys_shm_open(struct thread *td, struct shm_open_args *uap)
  824 {
  825 
  826         return (kern_shm_open(td, uap->path, uap->flags, uap->mode, NULL));
  827 }
  828 
  829 int
  830 sys_shm_unlink(struct thread *td, struct shm_unlink_args *uap)
  831 {
  832         char *path;
  833         const char *pr_path;
  834         size_t pr_pathlen;
  835         Fnv32_t fnv;
  836         int error;
  837 
  838         path = malloc(MAXPATHLEN, M_TEMP, M_WAITOK);
  839         pr_path = td->td_ucred->cr_prison->pr_path;
  840         pr_pathlen = strcmp(pr_path, "/") == 0 ? 0
  841             : strlcpy(path, pr_path, MAXPATHLEN);
  842         error = copyinstr(uap->path, path + pr_pathlen, MAXPATHLEN - pr_pathlen,
  843             NULL);
  844         if (error) {
  845                 free(path, M_TEMP);
  846                 return (error);
  847         }
  848 #ifdef KTRACE
  849         if (KTRPOINT(curthread, KTR_NAMEI))
  850                 ktrnamei(path);
  851 #endif
  852         fnv = fnv_32_str(path, FNV1_32_INIT);
  853         sx_xlock(&shm_dict_lock);
  854         error = shm_remove(path, fnv, td->td_ucred);
  855         sx_xunlock(&shm_dict_lock);
  856         free(path, M_TEMP);
  857 
  858         return (error);
  859 }
  860 
  861 int
  862 shm_mmap(struct file *fp, vm_map_t map, vm_offset_t *addr, vm_size_t objsize,
  863     vm_prot_t prot, vm_prot_t cap_maxprot, int flags,
  864     vm_ooffset_t foff, struct thread *td)
  865 {
  866         struct shmfd *shmfd;
  867         vm_prot_t maxprot;
  868         int error;
  869 
  870         shmfd = fp->f_data;
  871         maxprot = VM_PROT_NONE;
  872 
  873         /* FREAD should always be set. */
  874         if ((fp->f_flag & FREAD) != 0)
  875                 maxprot |= VM_PROT_EXECUTE | VM_PROT_READ;
  876         if ((fp->f_flag & FWRITE) != 0)
  877                 maxprot |= VM_PROT_WRITE;
  878 
  879         /* Don't permit shared writable mappings on read-only descriptors. */
  880         if ((flags & MAP_SHARED) != 0 &&
  881             (maxprot & VM_PROT_WRITE) == 0 &&
  882             (prot & VM_PROT_WRITE) != 0)
  883                 return (EACCES);
  884         maxprot &= cap_maxprot;
  885 
  886         /* See comment in vn_mmap(). */
  887         if (
  888 #ifdef _LP64
  889             objsize > OFF_MAX ||
  890 #endif
  891             foff < 0 || foff > OFF_MAX - objsize)
  892                 return (EINVAL);
  893 
  894 #ifdef MAC
  895         error = mac_posixshm_check_mmap(td->td_ucred, shmfd, prot, flags);
  896         if (error != 0)
  897                 return (error);
  898 #endif
  899         
  900         mtx_lock(&shm_timestamp_lock);
  901         vfs_timestamp(&shmfd->shm_atime);
  902         mtx_unlock(&shm_timestamp_lock);
  903         vm_object_reference(shmfd->shm_object);
  904 
  905         error = vm_mmap_object(map, addr, objsize, prot, maxprot, flags,
  906             shmfd->shm_object, foff, FALSE, td);
  907         if (error != 0)
  908                 vm_object_deallocate(shmfd->shm_object);
  909         return (error);
  910 }
  911 
  912 static int
  913 shm_chmod(struct file *fp, mode_t mode, struct ucred *active_cred,
  914     struct thread *td)
  915 {
  916         struct shmfd *shmfd;
  917         int error;
  918 
  919         error = 0;
  920         shmfd = fp->f_data;
  921         mtx_lock(&shm_timestamp_lock);
  922         /*
  923          * SUSv4 says that x bits of permission need not be affected.
  924          * Be consistent with our shm_open there.
  925          */
  926 #ifdef MAC
  927         error = mac_posixshm_check_setmode(active_cred, shmfd, mode);
  928         if (error != 0)
  929                 goto out;
  930 #endif
  931         error = vaccess(VREG, shmfd->shm_mode, shmfd->shm_uid,
  932             shmfd->shm_gid, VADMIN, active_cred, NULL);
  933         if (error != 0)
  934                 goto out;
  935         shmfd->shm_mode = mode & ACCESSPERMS;
  936 out:
  937         mtx_unlock(&shm_timestamp_lock);
  938         return (error);
  939 }
  940 
  941 static int
  942 shm_chown(struct file *fp, uid_t uid, gid_t gid, struct ucred *active_cred,
  943     struct thread *td)
  944 {
  945         struct shmfd *shmfd;
  946         int error;
  947 
  948         error = 0;
  949         shmfd = fp->f_data;
  950         mtx_lock(&shm_timestamp_lock);
  951 #ifdef MAC
  952         error = mac_posixshm_check_setowner(active_cred, shmfd, uid, gid);
  953         if (error != 0)
  954                 goto out;
  955 #endif
  956         if (uid == (uid_t)-1)
  957                 uid = shmfd->shm_uid;
  958         if (gid == (gid_t)-1)
  959                  gid = shmfd->shm_gid;
  960         if (((uid != shmfd->shm_uid && uid != active_cred->cr_uid) ||
  961             (gid != shmfd->shm_gid && !groupmember(gid, active_cred))) &&
  962             (error = priv_check_cred(active_cred, PRIV_VFS_CHOWN, 0)))
  963                 goto out;
  964         shmfd->shm_uid = uid;
  965         shmfd->shm_gid = gid;
  966 out:
  967         mtx_unlock(&shm_timestamp_lock);
  968         return (error);
  969 }
  970 
  971 /*
  972  * Helper routines to allow the backing object of a shared memory file
  973  * descriptor to be mapped in the kernel.
  974  */
  975 int
  976 shm_map(struct file *fp, size_t size, off_t offset, void **memp)
  977 {
  978         struct shmfd *shmfd;
  979         vm_offset_t kva, ofs;
  980         vm_object_t obj;
  981         int rv;
  982 
  983         if (fp->f_type != DTYPE_SHM)
  984                 return (EINVAL);
  985         shmfd = fp->f_data;
  986         obj = shmfd->shm_object;
  987         VM_OBJECT_WLOCK(obj);
  988         /*
  989          * XXXRW: This validation is probably insufficient, and subject to
  990          * sign errors.  It should be fixed.
  991          */
  992         if (offset >= shmfd->shm_size ||
  993             offset + size > round_page(shmfd->shm_size)) {
  994                 VM_OBJECT_WUNLOCK(obj);
  995                 return (EINVAL);
  996         }
  997 
  998         shmfd->shm_kmappings++;
  999         vm_object_reference_locked(obj);
 1000         VM_OBJECT_WUNLOCK(obj);
 1001 
 1002         /* Map the object into the kernel_map and wire it. */
 1003         kva = vm_map_min(kernel_map);
 1004         ofs = offset & PAGE_MASK;
 1005         offset = trunc_page(offset);
 1006         size = round_page(size + ofs);
 1007         rv = vm_map_find(kernel_map, obj, offset, &kva, size, 0,
 1008             VMFS_OPTIMAL_SPACE, VM_PROT_READ | VM_PROT_WRITE,
 1009             VM_PROT_READ | VM_PROT_WRITE, 0);
 1010         if (rv == KERN_SUCCESS) {
 1011                 rv = vm_map_wire(kernel_map, kva, kva + size,
 1012                     VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES);
 1013                 if (rv == KERN_SUCCESS) {
 1014                         *memp = (void *)(kva + ofs);
 1015                         return (0);
 1016                 }
 1017                 vm_map_remove(kernel_map, kva, kva + size);
 1018         } else
 1019                 vm_object_deallocate(obj);
 1020 
 1021         /* On failure, drop our mapping reference. */
 1022         VM_OBJECT_WLOCK(obj);
 1023         shmfd->shm_kmappings--;
 1024         VM_OBJECT_WUNLOCK(obj);
 1025 
 1026         return (vm_mmap_to_errno(rv));
 1027 }
 1028 
 1029 /*
 1030  * We require the caller to unmap the entire entry.  This allows us to
 1031  * safely decrement shm_kmappings when a mapping is removed.
 1032  */
 1033 int
 1034 shm_unmap(struct file *fp, void *mem, size_t size)
 1035 {
 1036         struct shmfd *shmfd;
 1037         vm_map_entry_t entry;
 1038         vm_offset_t kva, ofs;
 1039         vm_object_t obj;
 1040         vm_pindex_t pindex;
 1041         vm_prot_t prot;
 1042         boolean_t wired;
 1043         vm_map_t map;
 1044         int rv;
 1045 
 1046         if (fp->f_type != DTYPE_SHM)
 1047                 return (EINVAL);
 1048         shmfd = fp->f_data;
 1049         kva = (vm_offset_t)mem;
 1050         ofs = kva & PAGE_MASK;
 1051         kva = trunc_page(kva);
 1052         size = round_page(size + ofs);
 1053         map = kernel_map;
 1054         rv = vm_map_lookup(&map, kva, VM_PROT_READ | VM_PROT_WRITE, &entry,
 1055             &obj, &pindex, &prot, &wired);
 1056         if (rv != KERN_SUCCESS)
 1057                 return (EINVAL);
 1058         if (entry->start != kva || entry->end != kva + size) {
 1059                 vm_map_lookup_done(map, entry);
 1060                 return (EINVAL);
 1061         }
 1062         vm_map_lookup_done(map, entry);
 1063         if (obj != shmfd->shm_object)
 1064                 return (EINVAL);
 1065         vm_map_remove(map, kva, kva + size);
 1066         VM_OBJECT_WLOCK(obj);
 1067         KASSERT(shmfd->shm_kmappings > 0, ("shm_unmap: object not mapped"));
 1068         shmfd->shm_kmappings--;
 1069         VM_OBJECT_WUNLOCK(obj);
 1070         return (0);
 1071 }
 1072 
 1073 static int
 1074 shm_fill_kinfo(struct file *fp, struct kinfo_file *kif, struct filedesc *fdp)
 1075 {
 1076         const char *path, *pr_path;
 1077         struct shmfd *shmfd;
 1078         size_t pr_pathlen;
 1079 
 1080         kif->kf_type = KF_TYPE_SHM;
 1081         shmfd = fp->f_data;
 1082 
 1083         mtx_lock(&shm_timestamp_lock);
 1084         kif->kf_un.kf_file.kf_file_mode = S_IFREG | shmfd->shm_mode;    /* XXX */
 1085         mtx_unlock(&shm_timestamp_lock);
 1086         kif->kf_un.kf_file.kf_file_size = shmfd->shm_size;
 1087         if (shmfd->shm_path != NULL) {
 1088                 sx_slock(&shm_dict_lock);
 1089                 if (shmfd->shm_path != NULL) {
 1090                         path = shmfd->shm_path;
 1091                         pr_path = curthread->td_ucred->cr_prison->pr_path;
 1092                         if (strcmp(pr_path, "/") != 0) {
 1093                                 /* Return the jail-rooted pathname. */
 1094                                 pr_pathlen = strlen(pr_path);
 1095                                 if (strncmp(path, pr_path, pr_pathlen) == 0 &&
 1096                                     path[pr_pathlen] == '/')
 1097                                         path += pr_pathlen;
 1098                         }
 1099                         strlcpy(kif->kf_path, path, sizeof(kif->kf_path));
 1100                 }
 1101                 sx_sunlock(&shm_dict_lock);
 1102         }
 1103         return (0);
 1104 }

Cache object: d99c647fb883278ac05cb4ed12cd78f3


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.