The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/kern/uipc_shm.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 2006, 2011 Robert N. M. Watson
    3  * All rights reserved.
    4  *
    5  * Redistribution and use in source and binary forms, with or without
    6  * modification, are permitted provided that the following conditions
    7  * are met:
    8  * 1. Redistributions of source code must retain the above copyright
    9  *    notice, this list of conditions and the following disclaimer.
   10  * 2. Redistributions in binary form must reproduce the above copyright
   11  *    notice, this list of conditions and the following disclaimer in the
   12  *    documentation and/or other materials provided with the distribution.
   13  *
   14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   24  * SUCH DAMAGE.
   25  */
   26 
   27 /*
   28  * Support for shared swap-backed anonymous memory objects via
   29  * shm_open(2) and shm_unlink(2).  While most of the implementation is
   30  * here, vm_mmap.c contains mapping logic changes.
   31  *
   32  * TODO:
   33  *
   34  * (1) Need to export data to a userland tool via a sysctl.  Should ipcs(1)
   35  *     and ipcrm(1) be expanded or should new tools to manage both POSIX
   36  *     kernel semaphores and POSIX shared memory be written?
   37  *
   38  * (2) Add support for this file type to fstat(1).
   39  *
   40  * (3) Resource limits?  Does this need its own resource limits or are the
   41  *     existing limits in mmap(2) sufficient?
   42  */
   43 
   44 #include <sys/cdefs.h>
   45 __FBSDID("$FreeBSD: releng/10.1/sys/kern/uipc_shm.c 271399 2014-09-10 15:45:18Z jhb $");
   46 
   47 #include "opt_capsicum.h"
   48 #include "opt_ktrace.h"
   49 
   50 #include <sys/param.h>
   51 #include <sys/capability.h>
   52 #include <sys/conf.h>
   53 #include <sys/fcntl.h>
   54 #include <sys/file.h>
   55 #include <sys/filedesc.h>
   56 #include <sys/fnv_hash.h>
   57 #include <sys/kernel.h>
   58 #include <sys/uio.h>
   59 #include <sys/signal.h>
   60 #include <sys/ktrace.h>
   61 #include <sys/lock.h>
   62 #include <sys/malloc.h>
   63 #include <sys/mman.h>
   64 #include <sys/mutex.h>
   65 #include <sys/priv.h>
   66 #include <sys/proc.h>
   67 #include <sys/refcount.h>
   68 #include <sys/resourcevar.h>
   69 #include <sys/rwlock.h>
   70 #include <sys/stat.h>
   71 #include <sys/sysctl.h>
   72 #include <sys/sysproto.h>
   73 #include <sys/systm.h>
   74 #include <sys/sx.h>
   75 #include <sys/time.h>
   76 #include <sys/vnode.h>
   77 #include <sys/unistd.h>
   78 
   79 #include <security/mac/mac_framework.h>
   80 
   81 #include <vm/vm.h>
   82 #include <vm/vm_param.h>
   83 #include <vm/pmap.h>
   84 #include <vm/vm_extern.h>
   85 #include <vm/vm_map.h>
   86 #include <vm/vm_kern.h>
   87 #include <vm/vm_object.h>
   88 #include <vm/vm_page.h>
   89 #include <vm/vm_pageout.h>
   90 #include <vm/vm_pager.h>
   91 #include <vm/swap_pager.h>
   92 
   93 struct shm_mapping {
   94         char            *sm_path;
   95         Fnv32_t         sm_fnv;
   96         struct shmfd    *sm_shmfd;
   97         LIST_ENTRY(shm_mapping) sm_link;
   98 };
   99 
  100 static MALLOC_DEFINE(M_SHMFD, "shmfd", "shared memory file descriptor");
  101 static LIST_HEAD(, shm_mapping) *shm_dictionary;
  102 static struct sx shm_dict_lock;
  103 static struct mtx shm_timestamp_lock;
  104 static u_long shm_hash;
  105 static struct unrhdr *shm_ino_unr;
  106 static dev_t shm_dev_ino;
  107 
  108 #define SHM_HASH(fnv)   (&shm_dictionary[(fnv) & shm_hash])
  109 
  110 static int      shm_access(struct shmfd *shmfd, struct ucred *ucred, int flags);
  111 static struct shmfd *shm_alloc(struct ucred *ucred, mode_t mode);
  112 static void     shm_init(void *arg);
  113 static void     shm_drop(struct shmfd *shmfd);
  114 static struct shmfd *shm_hold(struct shmfd *shmfd);
  115 static void     shm_insert(char *path, Fnv32_t fnv, struct shmfd *shmfd);
  116 static struct shmfd *shm_lookup(char *path, Fnv32_t fnv);
  117 static int      shm_remove(char *path, Fnv32_t fnv, struct ucred *ucred);
  118 static int      shm_dotruncate(struct shmfd *shmfd, off_t length);
  119 
  120 static fo_rdwr_t        shm_read;
  121 static fo_rdwr_t        shm_write;
  122 static fo_truncate_t    shm_truncate;
  123 static fo_ioctl_t       shm_ioctl;
  124 static fo_poll_t        shm_poll;
  125 static fo_kqfilter_t    shm_kqfilter;
  126 static fo_stat_t        shm_stat;
  127 static fo_close_t       shm_close;
  128 static fo_chmod_t       shm_chmod;
  129 static fo_chown_t       shm_chown;
  130 static fo_seek_t        shm_seek;
  131 
  132 /* File descriptor operations. */
  133 static struct fileops shm_ops = {
  134         .fo_read = shm_read,
  135         .fo_write = shm_write,
  136         .fo_truncate = shm_truncate,
  137         .fo_ioctl = shm_ioctl,
  138         .fo_poll = shm_poll,
  139         .fo_kqfilter = shm_kqfilter,
  140         .fo_stat = shm_stat,
  141         .fo_close = shm_close,
  142         .fo_chmod = shm_chmod,
  143         .fo_chown = shm_chown,
  144         .fo_sendfile = vn_sendfile,
  145         .fo_seek = shm_seek,
  146         .fo_flags = DFLAG_PASSABLE | DFLAG_SEEKABLE
  147 };
  148 
  149 FEATURE(posix_shm, "POSIX shared memory");
  150 
  151 static int
  152 uiomove_object_page(vm_object_t obj, size_t len, struct uio *uio)
  153 {
  154         vm_page_t m;
  155         vm_pindex_t idx;
  156         size_t tlen;
  157         int error, offset, rv;
  158 
  159         idx = OFF_TO_IDX(uio->uio_offset);
  160         offset = uio->uio_offset & PAGE_MASK;
  161         tlen = MIN(PAGE_SIZE - offset, len);
  162 
  163         VM_OBJECT_WLOCK(obj);
  164 
  165         /*
  166          * Parallel reads of the page content from disk are prevented
  167          * by exclusive busy.
  168          *
  169          * Although the tmpfs vnode lock is held here, it is
  170          * nonetheless safe to sleep waiting for a free page.  The
  171          * pageout daemon does not need to acquire the tmpfs vnode
  172          * lock to page out tobj's pages because tobj is a OBJT_SWAP
  173          * type object.
  174          */
  175         m = vm_page_grab(obj, idx, VM_ALLOC_NORMAL);
  176         if (m->valid != VM_PAGE_BITS_ALL) {
  177                 if (vm_pager_has_page(obj, idx, NULL, NULL)) {
  178                         rv = vm_pager_get_pages(obj, &m, 1, 0);
  179                         m = vm_page_lookup(obj, idx);
  180                         if (m == NULL) {
  181                                 printf(
  182                     "uiomove_object: vm_obj %p idx %jd null lookup rv %d\n",
  183                                     obj, idx, rv);
  184                                 VM_OBJECT_WUNLOCK(obj);
  185                                 return (EIO);
  186                         }
  187                         if (rv != VM_PAGER_OK) {
  188                                 printf(
  189             "uiomove_object: vm_obj %p idx %jd valid %x pager error %d\n",
  190                                     obj, idx, m->valid, rv);
  191                                 vm_page_lock(m);
  192                                 vm_page_free(m);
  193                                 vm_page_unlock(m);
  194                                 VM_OBJECT_WUNLOCK(obj);
  195                                 return (EIO);
  196                         }
  197                 } else
  198                         vm_page_zero_invalid(m, TRUE);
  199         }
  200         vm_page_xunbusy(m);
  201         vm_page_lock(m);
  202         vm_page_hold(m);
  203         if (m->queue == PQ_NONE) {
  204                 vm_page_deactivate(m);
  205         } else {
  206                 /* Requeue to maintain LRU ordering. */
  207                 vm_page_requeue(m);
  208         }
  209         vm_page_unlock(m);
  210         VM_OBJECT_WUNLOCK(obj);
  211         error = uiomove_fromphys(&m, offset, tlen, uio);
  212         if (uio->uio_rw == UIO_WRITE && error == 0) {
  213                 VM_OBJECT_WLOCK(obj);
  214                 vm_page_dirty(m);
  215                 vm_pager_page_unswapped(m);
  216                 VM_OBJECT_WUNLOCK(obj);
  217         }
  218         vm_page_lock(m);
  219         vm_page_unhold(m);
  220         vm_page_unlock(m);
  221 
  222         return (error);
  223 }
  224 
  225 int
  226 uiomove_object(vm_object_t obj, off_t obj_size, struct uio *uio)
  227 {
  228         ssize_t resid;
  229         size_t len;
  230         int error;
  231 
  232         error = 0;
  233         while ((resid = uio->uio_resid) > 0) {
  234                 if (obj_size <= uio->uio_offset)
  235                         break;
  236                 len = MIN(obj_size - uio->uio_offset, resid);
  237                 if (len == 0)
  238                         break;
  239                 error = uiomove_object_page(obj, len, uio);
  240                 if (error != 0 || resid == uio->uio_resid)
  241                         break;
  242         }
  243         return (error);
  244 }
  245 
  246 static int
  247 shm_seek(struct file *fp, off_t offset, int whence, struct thread *td)
  248 {
  249         struct shmfd *shmfd;
  250         off_t foffset;
  251         int error;
  252 
  253         shmfd = fp->f_data;
  254         foffset = foffset_lock(fp, 0);
  255         error = 0;
  256         switch (whence) {
  257         case L_INCR:
  258                 if (foffset < 0 ||
  259                     (offset > 0 && foffset > OFF_MAX - offset)) {
  260                         error = EOVERFLOW;
  261                         break;
  262                 }
  263                 offset += foffset;
  264                 break;
  265         case L_XTND:
  266                 if (offset > 0 && shmfd->shm_size > OFF_MAX - offset) {
  267                         error = EOVERFLOW;
  268                         break;
  269                 }
  270                 offset += shmfd->shm_size;
  271                 break;
  272         case L_SET:
  273                 break;
  274         default:
  275                 error = EINVAL;
  276         }
  277         if (error == 0) {
  278                 if (offset < 0 || offset > shmfd->shm_size)
  279                         error = EINVAL;
  280                 else
  281                         *(off_t *)(td->td_retval) = offset;
  282         }
  283         foffset_unlock(fp, offset, error != 0 ? FOF_NOUPDATE : 0);
  284         return (error);
  285 }
  286 
  287 static int
  288 shm_read(struct file *fp, struct uio *uio, struct ucred *active_cred,
  289     int flags, struct thread *td)
  290 {
  291         struct shmfd *shmfd;
  292         void *rl_cookie;
  293         int error;
  294 
  295         shmfd = fp->f_data;
  296         foffset_lock_uio(fp, uio, flags);
  297         rl_cookie = rangelock_rlock(&shmfd->shm_rl, uio->uio_offset,
  298             uio->uio_offset + uio->uio_resid, &shmfd->shm_mtx);
  299 #ifdef MAC
  300         error = mac_posixshm_check_read(active_cred, fp->f_cred, shmfd);
  301         if (error)
  302                 return (error);
  303 #endif
  304         error = uiomove_object(shmfd->shm_object, shmfd->shm_size, uio);
  305         rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx);
  306         foffset_unlock_uio(fp, uio, flags);
  307         return (error);
  308 }
  309 
  310 static int
  311 shm_write(struct file *fp, struct uio *uio, struct ucred *active_cred,
  312     int flags, struct thread *td)
  313 {
  314         struct shmfd *shmfd;
  315         void *rl_cookie;
  316         int error;
  317 
  318         shmfd = fp->f_data;
  319 #ifdef MAC
  320         error = mac_posixshm_check_write(active_cred, fp->f_cred, shmfd);
  321         if (error)
  322                 return (error);
  323 #endif
  324         foffset_lock_uio(fp, uio, flags);
  325         if ((flags & FOF_OFFSET) == 0) {
  326                 rl_cookie = rangelock_wlock(&shmfd->shm_rl, 0, OFF_MAX,
  327                     &shmfd->shm_mtx);
  328         } else {
  329                 rl_cookie = rangelock_wlock(&shmfd->shm_rl, uio->uio_offset,
  330                     uio->uio_offset + uio->uio_resid, &shmfd->shm_mtx);
  331         }
  332 
  333         error = uiomove_object(shmfd->shm_object, shmfd->shm_size, uio);
  334         rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx);
  335         foffset_unlock_uio(fp, uio, flags);
  336         return (error);
  337 }
  338 
  339 static int
  340 shm_truncate(struct file *fp, off_t length, struct ucred *active_cred,
  341     struct thread *td)
  342 {
  343         struct shmfd *shmfd;
  344 #ifdef MAC
  345         int error;
  346 #endif
  347 
  348         shmfd = fp->f_data;
  349 #ifdef MAC
  350         error = mac_posixshm_check_truncate(active_cred, fp->f_cred, shmfd);
  351         if (error)
  352                 return (error);
  353 #endif
  354         return (shm_dotruncate(shmfd, length));
  355 }
  356 
  357 static int
  358 shm_ioctl(struct file *fp, u_long com, void *data,
  359     struct ucred *active_cred, struct thread *td)
  360 {
  361 
  362         return (EOPNOTSUPP);
  363 }
  364 
  365 static int
  366 shm_poll(struct file *fp, int events, struct ucred *active_cred,
  367     struct thread *td)
  368 {
  369 
  370         return (EOPNOTSUPP);
  371 }
  372 
  373 static int
  374 shm_kqfilter(struct file *fp, struct knote *kn)
  375 {
  376 
  377         return (EOPNOTSUPP);
  378 }
  379 
  380 static int
  381 shm_stat(struct file *fp, struct stat *sb, struct ucred *active_cred,
  382     struct thread *td)
  383 {
  384         struct shmfd *shmfd;
  385 #ifdef MAC
  386         int error;
  387 #endif
  388 
  389         shmfd = fp->f_data;
  390 
  391 #ifdef MAC
  392         error = mac_posixshm_check_stat(active_cred, fp->f_cred, shmfd);
  393         if (error)
  394                 return (error);
  395 #endif
  396         
  397         /*
  398          * Attempt to return sanish values for fstat() on a memory file
  399          * descriptor.
  400          */
  401         bzero(sb, sizeof(*sb));
  402         sb->st_blksize = PAGE_SIZE;
  403         sb->st_size = shmfd->shm_size;
  404         sb->st_blocks = (sb->st_size + sb->st_blksize - 1) / sb->st_blksize;
  405         mtx_lock(&shm_timestamp_lock);
  406         sb->st_atim = shmfd->shm_atime;
  407         sb->st_ctim = shmfd->shm_ctime;
  408         sb->st_mtim = shmfd->shm_mtime;
  409         sb->st_birthtim = shmfd->shm_birthtime;
  410         sb->st_mode = S_IFREG | shmfd->shm_mode;                /* XXX */
  411         sb->st_uid = shmfd->shm_uid;
  412         sb->st_gid = shmfd->shm_gid;
  413         mtx_unlock(&shm_timestamp_lock);
  414         sb->st_dev = shm_dev_ino;
  415         sb->st_ino = shmfd->shm_ino;
  416 
  417         return (0);
  418 }
  419 
  420 static int
  421 shm_close(struct file *fp, struct thread *td)
  422 {
  423         struct shmfd *shmfd;
  424 
  425         shmfd = fp->f_data;
  426         fp->f_data = NULL;
  427         shm_drop(shmfd);
  428 
  429         return (0);
  430 }
  431 
  432 static int
  433 shm_dotruncate(struct shmfd *shmfd, off_t length)
  434 {
  435         vm_object_t object;
  436         vm_page_t m, ma[1];
  437         vm_pindex_t idx, nobjsize;
  438         vm_ooffset_t delta;
  439         int base, rv;
  440 
  441         object = shmfd->shm_object;
  442         VM_OBJECT_WLOCK(object);
  443         if (length == shmfd->shm_size) {
  444                 VM_OBJECT_WUNLOCK(object);
  445                 return (0);
  446         }
  447         nobjsize = OFF_TO_IDX(length + PAGE_MASK);
  448 
  449         /* Are we shrinking?  If so, trim the end. */
  450         if (length < shmfd->shm_size) {
  451                 /*
  452                  * Disallow any requests to shrink the size if this
  453                  * object is mapped into the kernel.
  454                  */
  455                 if (shmfd->shm_kmappings > 0) {
  456                         VM_OBJECT_WUNLOCK(object);
  457                         return (EBUSY);
  458                 }
  459 
  460                 /*
  461                  * Zero the truncated part of the last page.
  462                  */
  463                 base = length & PAGE_MASK;
  464                 if (base != 0) {
  465                         idx = OFF_TO_IDX(length);
  466 retry:
  467                         m = vm_page_lookup(object, idx);
  468                         if (m != NULL) {
  469                                 if (vm_page_sleep_if_busy(m, "shmtrc"))
  470                                         goto retry;
  471                         } else if (vm_pager_has_page(object, idx, NULL, NULL)) {
  472                                 m = vm_page_alloc(object, idx, VM_ALLOC_NORMAL);
  473                                 if (m == NULL) {
  474                                         VM_OBJECT_WUNLOCK(object);
  475                                         VM_WAIT;
  476                                         VM_OBJECT_WLOCK(object);
  477                                         goto retry;
  478                                 } else if (m->valid != VM_PAGE_BITS_ALL) {
  479                                         ma[0] = m;
  480                                         rv = vm_pager_get_pages(object, ma, 1,
  481                                             0);
  482                                         m = vm_page_lookup(object, idx);
  483                                 } else
  484                                         /* A cached page was reactivated. */
  485                                         rv = VM_PAGER_OK;
  486                                 vm_page_lock(m);
  487                                 if (rv == VM_PAGER_OK) {
  488                                         vm_page_deactivate(m);
  489                                         vm_page_unlock(m);
  490                                         vm_page_xunbusy(m);
  491                                 } else {
  492                                         vm_page_free(m);
  493                                         vm_page_unlock(m);
  494                                         VM_OBJECT_WUNLOCK(object);
  495                                         return (EIO);
  496                                 }
  497                         }
  498                         if (m != NULL) {
  499                                 pmap_zero_page_area(m, base, PAGE_SIZE - base);
  500                                 KASSERT(m->valid == VM_PAGE_BITS_ALL,
  501                                     ("shm_dotruncate: page %p is invalid", m));
  502                                 vm_page_dirty(m);
  503                                 vm_pager_page_unswapped(m);
  504                         }
  505                 }
  506                 delta = ptoa(object->size - nobjsize);
  507 
  508                 /* Toss in memory pages. */
  509                 if (nobjsize < object->size)
  510                         vm_object_page_remove(object, nobjsize, object->size,
  511                             0);
  512 
  513                 /* Toss pages from swap. */
  514                 if (object->type == OBJT_SWAP)
  515                         swap_pager_freespace(object, nobjsize, delta);
  516 
  517                 /* Free the swap accounted for shm */
  518                 swap_release_by_cred(delta, object->cred);
  519                 object->charge -= delta;
  520         } else {
  521                 /* Attempt to reserve the swap */
  522                 delta = ptoa(nobjsize - object->size);
  523                 if (!swap_reserve_by_cred(delta, object->cred)) {
  524                         VM_OBJECT_WUNLOCK(object);
  525                         return (ENOMEM);
  526                 }
  527                 object->charge += delta;
  528         }
  529         shmfd->shm_size = length;
  530         mtx_lock(&shm_timestamp_lock);
  531         vfs_timestamp(&shmfd->shm_ctime);
  532         shmfd->shm_mtime = shmfd->shm_ctime;
  533         mtx_unlock(&shm_timestamp_lock);
  534         object->size = nobjsize;
  535         VM_OBJECT_WUNLOCK(object);
  536         return (0);
  537 }
  538 
  539 /*
  540  * shmfd object management including creation and reference counting
  541  * routines.
  542  */
  543 static struct shmfd *
  544 shm_alloc(struct ucred *ucred, mode_t mode)
  545 {
  546         struct shmfd *shmfd;
  547         int ino;
  548 
  549         shmfd = malloc(sizeof(*shmfd), M_SHMFD, M_WAITOK | M_ZERO);
  550         shmfd->shm_size = 0;
  551         shmfd->shm_uid = ucred->cr_uid;
  552         shmfd->shm_gid = ucred->cr_gid;
  553         shmfd->shm_mode = mode;
  554         shmfd->shm_object = vm_pager_allocate(OBJT_DEFAULT, NULL,
  555             shmfd->shm_size, VM_PROT_DEFAULT, 0, ucred);
  556         KASSERT(shmfd->shm_object != NULL, ("shm_create: vm_pager_allocate"));
  557         VM_OBJECT_WLOCK(shmfd->shm_object);
  558         vm_object_clear_flag(shmfd->shm_object, OBJ_ONEMAPPING);
  559         vm_object_set_flag(shmfd->shm_object, OBJ_NOSPLIT);
  560         VM_OBJECT_WUNLOCK(shmfd->shm_object);
  561         vfs_timestamp(&shmfd->shm_birthtime);
  562         shmfd->shm_atime = shmfd->shm_mtime = shmfd->shm_ctime =
  563             shmfd->shm_birthtime;
  564         ino = alloc_unr(shm_ino_unr);
  565         if (ino == -1)
  566                 shmfd->shm_ino = 0;
  567         else
  568                 shmfd->shm_ino = ino;
  569         refcount_init(&shmfd->shm_refs, 1);
  570         mtx_init(&shmfd->shm_mtx, "shmrl", NULL, MTX_DEF);
  571         rangelock_init(&shmfd->shm_rl);
  572 #ifdef MAC
  573         mac_posixshm_init(shmfd);
  574         mac_posixshm_create(ucred, shmfd);
  575 #endif
  576 
  577         return (shmfd);
  578 }
  579 
  580 static struct shmfd *
  581 shm_hold(struct shmfd *shmfd)
  582 {
  583 
  584         refcount_acquire(&shmfd->shm_refs);
  585         return (shmfd);
  586 }
  587 
  588 static void
  589 shm_drop(struct shmfd *shmfd)
  590 {
  591 
  592         if (refcount_release(&shmfd->shm_refs)) {
  593 #ifdef MAC
  594                 mac_posixshm_destroy(shmfd);
  595 #endif
  596                 rangelock_destroy(&shmfd->shm_rl);
  597                 mtx_destroy(&shmfd->shm_mtx);
  598                 vm_object_deallocate(shmfd->shm_object);
  599                 if (shmfd->shm_ino != 0)
  600                         free_unr(shm_ino_unr, shmfd->shm_ino);
  601                 free(shmfd, M_SHMFD);
  602         }
  603 }
  604 
  605 /*
  606  * Determine if the credentials have sufficient permissions for a
  607  * specified combination of FREAD and FWRITE.
  608  */
  609 static int
  610 shm_access(struct shmfd *shmfd, struct ucred *ucred, int flags)
  611 {
  612         accmode_t accmode;
  613         int error;
  614 
  615         accmode = 0;
  616         if (flags & FREAD)
  617                 accmode |= VREAD;
  618         if (flags & FWRITE)
  619                 accmode |= VWRITE;
  620         mtx_lock(&shm_timestamp_lock);
  621         error = vaccess(VREG, shmfd->shm_mode, shmfd->shm_uid, shmfd->shm_gid,
  622             accmode, ucred, NULL);
  623         mtx_unlock(&shm_timestamp_lock);
  624         return (error);
  625 }
  626 
  627 /*
  628  * Dictionary management.  We maintain an in-kernel dictionary to map
  629  * paths to shmfd objects.  We use the FNV hash on the path to store
  630  * the mappings in a hash table.
  631  */
  632 static void
  633 shm_init(void *arg)
  634 {
  635 
  636         mtx_init(&shm_timestamp_lock, "shm timestamps", NULL, MTX_DEF);
  637         sx_init(&shm_dict_lock, "shm dictionary");
  638         shm_dictionary = hashinit(1024, M_SHMFD, &shm_hash);
  639         shm_ino_unr = new_unrhdr(1, INT32_MAX, NULL);
  640         KASSERT(shm_ino_unr != NULL, ("shm fake inodes not initialized"));
  641         shm_dev_ino = devfs_alloc_cdp_inode();
  642         KASSERT(shm_dev_ino > 0, ("shm dev inode not initialized"));
  643 }
  644 SYSINIT(shm_init, SI_SUB_SYSV_SHM, SI_ORDER_ANY, shm_init, NULL);
  645 
  646 static struct shmfd *
  647 shm_lookup(char *path, Fnv32_t fnv)
  648 {
  649         struct shm_mapping *map;
  650 
  651         LIST_FOREACH(map, SHM_HASH(fnv), sm_link) {
  652                 if (map->sm_fnv != fnv)
  653                         continue;
  654                 if (strcmp(map->sm_path, path) == 0)
  655                         return (map->sm_shmfd);
  656         }
  657 
  658         return (NULL);
  659 }
  660 
  661 static void
  662 shm_insert(char *path, Fnv32_t fnv, struct shmfd *shmfd)
  663 {
  664         struct shm_mapping *map;
  665 
  666         map = malloc(sizeof(struct shm_mapping), M_SHMFD, M_WAITOK);
  667         map->sm_path = path;
  668         map->sm_fnv = fnv;
  669         map->sm_shmfd = shm_hold(shmfd);
  670         shmfd->shm_path = path;
  671         LIST_INSERT_HEAD(SHM_HASH(fnv), map, sm_link);
  672 }
  673 
  674 static int
  675 shm_remove(char *path, Fnv32_t fnv, struct ucred *ucred)
  676 {
  677         struct shm_mapping *map;
  678         int error;
  679 
  680         LIST_FOREACH(map, SHM_HASH(fnv), sm_link) {
  681                 if (map->sm_fnv != fnv)
  682                         continue;
  683                 if (strcmp(map->sm_path, path) == 0) {
  684 #ifdef MAC
  685                         error = mac_posixshm_check_unlink(ucred, map->sm_shmfd);
  686                         if (error)
  687                                 return (error);
  688 #endif
  689                         error = shm_access(map->sm_shmfd, ucred,
  690                             FREAD | FWRITE);
  691                         if (error)
  692                                 return (error);
  693                         map->sm_shmfd->shm_path = NULL;
  694                         LIST_REMOVE(map, sm_link);
  695                         shm_drop(map->sm_shmfd);
  696                         free(map->sm_path, M_SHMFD);
  697                         free(map, M_SHMFD);
  698                         return (0);
  699                 }
  700         }
  701 
  702         return (ENOENT);
  703 }
  704 
  705 /* System calls. */
  706 int
  707 sys_shm_open(struct thread *td, struct shm_open_args *uap)
  708 {
  709         struct filedesc *fdp;
  710         struct shmfd *shmfd;
  711         struct file *fp;
  712         char *path;
  713         Fnv32_t fnv;
  714         mode_t cmode;
  715         int fd, error;
  716 
  717 #ifdef CAPABILITY_MODE
  718         /*
  719          * shm_open(2) is only allowed for anonymous objects.
  720          */
  721         if (IN_CAPABILITY_MODE(td) && (uap->path != SHM_ANON))
  722                 return (ECAPMODE);
  723 #endif
  724 
  725         if ((uap->flags & O_ACCMODE) != O_RDONLY &&
  726             (uap->flags & O_ACCMODE) != O_RDWR)
  727                 return (EINVAL);
  728 
  729         if ((uap->flags & ~(O_ACCMODE | O_CREAT | O_EXCL | O_TRUNC | O_CLOEXEC)) != 0)
  730                 return (EINVAL);
  731 
  732         fdp = td->td_proc->p_fd;
  733         cmode = (uap->mode & ~fdp->fd_cmask) & ACCESSPERMS;
  734 
  735         error = falloc(td, &fp, &fd, O_CLOEXEC);
  736         if (error)
  737                 return (error);
  738 
  739         /* A SHM_ANON path pointer creates an anonymous object. */
  740         if (uap->path == SHM_ANON) {
  741                 /* A read-only anonymous object is pointless. */
  742                 if ((uap->flags & O_ACCMODE) == O_RDONLY) {
  743                         fdclose(fdp, fp, fd, td);
  744                         fdrop(fp, td);
  745                         return (EINVAL);
  746                 }
  747                 shmfd = shm_alloc(td->td_ucred, cmode);
  748         } else {
  749                 path = malloc(MAXPATHLEN, M_SHMFD, M_WAITOK);
  750                 error = copyinstr(uap->path, path, MAXPATHLEN, NULL);
  751 #ifdef KTRACE
  752                 if (error == 0 && KTRPOINT(curthread, KTR_NAMEI))
  753                         ktrnamei(path);
  754 #endif
  755                 /* Require paths to start with a '/' character. */
  756                 if (error == 0 && path[0] != '/')
  757                         error = EINVAL;
  758                 if (error) {
  759                         fdclose(fdp, fp, fd, td);
  760                         fdrop(fp, td);
  761                         free(path, M_SHMFD);
  762                         return (error);
  763                 }
  764 
  765                 fnv = fnv_32_str(path, FNV1_32_INIT);
  766                 sx_xlock(&shm_dict_lock);
  767                 shmfd = shm_lookup(path, fnv);
  768                 if (shmfd == NULL) {
  769                         /* Object does not yet exist, create it if requested. */
  770                         if (uap->flags & O_CREAT) {
  771 #ifdef MAC
  772                                 error = mac_posixshm_check_create(td->td_ucred,
  773                                     path);
  774                                 if (error == 0) {
  775 #endif
  776                                         shmfd = shm_alloc(td->td_ucred, cmode);
  777                                         shm_insert(path, fnv, shmfd);
  778 #ifdef MAC
  779                                 }
  780 #endif
  781                         } else {
  782                                 free(path, M_SHMFD);
  783                                 error = ENOENT;
  784                         }
  785                 } else {
  786                         /*
  787                          * Object already exists, obtain a new
  788                          * reference if requested and permitted.
  789                          */
  790                         free(path, M_SHMFD);
  791                         if ((uap->flags & (O_CREAT | O_EXCL)) ==
  792                             (O_CREAT | O_EXCL))
  793                                 error = EEXIST;
  794                         else {
  795 #ifdef MAC
  796                                 error = mac_posixshm_check_open(td->td_ucred,
  797                                     shmfd, FFLAGS(uap->flags & O_ACCMODE));
  798                                 if (error == 0)
  799 #endif
  800                                 error = shm_access(shmfd, td->td_ucred,
  801                                     FFLAGS(uap->flags & O_ACCMODE));
  802                         }
  803 
  804                         /*
  805                          * Truncate the file back to zero length if
  806                          * O_TRUNC was specified and the object was
  807                          * opened with read/write.
  808                          */
  809                         if (error == 0 &&
  810                             (uap->flags & (O_ACCMODE | O_TRUNC)) ==
  811                             (O_RDWR | O_TRUNC)) {
  812 #ifdef MAC
  813                                 error = mac_posixshm_check_truncate(
  814                                         td->td_ucred, fp->f_cred, shmfd);
  815                                 if (error == 0)
  816 #endif
  817                                         shm_dotruncate(shmfd, 0);
  818                         }
  819                         if (error == 0)
  820                                 shm_hold(shmfd);
  821                 }
  822                 sx_xunlock(&shm_dict_lock);
  823 
  824                 if (error) {
  825                         fdclose(fdp, fp, fd, td);
  826                         fdrop(fp, td);
  827                         return (error);
  828                 }
  829         }
  830 
  831         finit(fp, FFLAGS(uap->flags & O_ACCMODE), DTYPE_SHM, shmfd, &shm_ops);
  832 
  833         td->td_retval[0] = fd;
  834         fdrop(fp, td);
  835 
  836         return (0);
  837 }
  838 
  839 int
  840 sys_shm_unlink(struct thread *td, struct shm_unlink_args *uap)
  841 {
  842         char *path;
  843         Fnv32_t fnv;
  844         int error;
  845 
  846         path = malloc(MAXPATHLEN, M_TEMP, M_WAITOK);
  847         error = copyinstr(uap->path, path, MAXPATHLEN, NULL);
  848         if (error) {
  849                 free(path, M_TEMP);
  850                 return (error);
  851         }
  852 #ifdef KTRACE
  853         if (KTRPOINT(curthread, KTR_NAMEI))
  854                 ktrnamei(path);
  855 #endif
  856         fnv = fnv_32_str(path, FNV1_32_INIT);
  857         sx_xlock(&shm_dict_lock);
  858         error = shm_remove(path, fnv, td->td_ucred);
  859         sx_xunlock(&shm_dict_lock);
  860         free(path, M_TEMP);
  861 
  862         return (error);
  863 }
  864 
  865 /*
  866  * mmap() helper to validate mmap() requests against shm object state
  867  * and give mmap() the vm_object to use for the mapping.
  868  */
  869 int
  870 shm_mmap(struct shmfd *shmfd, vm_size_t objsize, vm_ooffset_t foff,
  871     vm_object_t *obj)
  872 {
  873 
  874         /*
  875          * XXXRW: This validation is probably insufficient, and subject to
  876          * sign errors.  It should be fixed.
  877          */
  878         if (foff >= shmfd->shm_size ||
  879             foff + objsize > round_page(shmfd->shm_size))
  880                 return (EINVAL);
  881 
  882         mtx_lock(&shm_timestamp_lock);
  883         vfs_timestamp(&shmfd->shm_atime);
  884         mtx_unlock(&shm_timestamp_lock);
  885         vm_object_reference(shmfd->shm_object);
  886         *obj = shmfd->shm_object;
  887         return (0);
  888 }
  889 
  890 static int
  891 shm_chmod(struct file *fp, mode_t mode, struct ucred *active_cred,
  892     struct thread *td)
  893 {
  894         struct shmfd *shmfd;
  895         int error;
  896 
  897         error = 0;
  898         shmfd = fp->f_data;
  899         mtx_lock(&shm_timestamp_lock);
  900         /*
  901          * SUSv4 says that x bits of permission need not be affected.
  902          * Be consistent with our shm_open there.
  903          */
  904 #ifdef MAC
  905         error = mac_posixshm_check_setmode(active_cred, shmfd, mode);
  906         if (error != 0)
  907                 goto out;
  908 #endif
  909         error = vaccess(VREG, shmfd->shm_mode, shmfd->shm_uid,
  910             shmfd->shm_gid, VADMIN, active_cred, NULL);
  911         if (error != 0)
  912                 goto out;
  913         shmfd->shm_mode = mode & ACCESSPERMS;
  914 out:
  915         mtx_unlock(&shm_timestamp_lock);
  916         return (error);
  917 }
  918 
  919 static int
  920 shm_chown(struct file *fp, uid_t uid, gid_t gid, struct ucred *active_cred,
  921     struct thread *td)
  922 {
  923         struct shmfd *shmfd;
  924         int error;
  925 
  926         error = 0;
  927         shmfd = fp->f_data;
  928         mtx_lock(&shm_timestamp_lock);
  929 #ifdef MAC
  930         error = mac_posixshm_check_setowner(active_cred, shmfd, uid, gid);
  931         if (error != 0)
  932                 goto out;
  933 #endif
  934         if (uid == (uid_t)-1)
  935                 uid = shmfd->shm_uid;
  936         if (gid == (gid_t)-1)
  937                  gid = shmfd->shm_gid;
  938         if (((uid != shmfd->shm_uid && uid != active_cred->cr_uid) ||
  939             (gid != shmfd->shm_gid && !groupmember(gid, active_cred))) &&
  940             (error = priv_check_cred(active_cred, PRIV_VFS_CHOWN, 0)))
  941                 goto out;
  942         shmfd->shm_uid = uid;
  943         shmfd->shm_gid = gid;
  944 out:
  945         mtx_unlock(&shm_timestamp_lock);
  946         return (error);
  947 }
  948 
  949 /*
  950  * Helper routines to allow the backing object of a shared memory file
  951  * descriptor to be mapped in the kernel.
  952  */
  953 int
  954 shm_map(struct file *fp, size_t size, off_t offset, void **memp)
  955 {
  956         struct shmfd *shmfd;
  957         vm_offset_t kva, ofs;
  958         vm_object_t obj;
  959         int rv;
  960 
  961         if (fp->f_type != DTYPE_SHM)
  962                 return (EINVAL);
  963         shmfd = fp->f_data;
  964         obj = shmfd->shm_object;
  965         VM_OBJECT_WLOCK(obj);
  966         /*
  967          * XXXRW: This validation is probably insufficient, and subject to
  968          * sign errors.  It should be fixed.
  969          */
  970         if (offset >= shmfd->shm_size ||
  971             offset + size > round_page(shmfd->shm_size)) {
  972                 VM_OBJECT_WUNLOCK(obj);
  973                 return (EINVAL);
  974         }
  975 
  976         shmfd->shm_kmappings++;
  977         vm_object_reference_locked(obj);
  978         VM_OBJECT_WUNLOCK(obj);
  979 
  980         /* Map the object into the kernel_map and wire it. */
  981         kva = vm_map_min(kernel_map);
  982         ofs = offset & PAGE_MASK;
  983         offset = trunc_page(offset);
  984         size = round_page(size + ofs);
  985         rv = vm_map_find(kernel_map, obj, offset, &kva, size, 0,
  986             VMFS_OPTIMAL_SPACE, VM_PROT_READ | VM_PROT_WRITE,
  987             VM_PROT_READ | VM_PROT_WRITE, 0);
  988         if (rv == KERN_SUCCESS) {
  989                 rv = vm_map_wire(kernel_map, kva, kva + size,
  990                     VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES);
  991                 if (rv == KERN_SUCCESS) {
  992                         *memp = (void *)(kva + ofs);
  993                         return (0);
  994                 }
  995                 vm_map_remove(kernel_map, kva, kva + size);
  996         } else
  997                 vm_object_deallocate(obj);
  998 
  999         /* On failure, drop our mapping reference. */
 1000         VM_OBJECT_WLOCK(obj);
 1001         shmfd->shm_kmappings--;
 1002         VM_OBJECT_WUNLOCK(obj);
 1003 
 1004         return (vm_mmap_to_errno(rv));
 1005 }
 1006 
 1007 /*
 1008  * We require the caller to unmap the entire entry.  This allows us to
 1009  * safely decrement shm_kmappings when a mapping is removed.
 1010  */
 1011 int
 1012 shm_unmap(struct file *fp, void *mem, size_t size)
 1013 {
 1014         struct shmfd *shmfd;
 1015         vm_map_entry_t entry;
 1016         vm_offset_t kva, ofs;
 1017         vm_object_t obj;
 1018         vm_pindex_t pindex;
 1019         vm_prot_t prot;
 1020         boolean_t wired;
 1021         vm_map_t map;
 1022         int rv;
 1023 
 1024         if (fp->f_type != DTYPE_SHM)
 1025                 return (EINVAL);
 1026         shmfd = fp->f_data;
 1027         kva = (vm_offset_t)mem;
 1028         ofs = kva & PAGE_MASK;
 1029         kva = trunc_page(kva);
 1030         size = round_page(size + ofs);
 1031         map = kernel_map;
 1032         rv = vm_map_lookup(&map, kva, VM_PROT_READ | VM_PROT_WRITE, &entry,
 1033             &obj, &pindex, &prot, &wired);
 1034         if (rv != KERN_SUCCESS)
 1035                 return (EINVAL);
 1036         if (entry->start != kva || entry->end != kva + size) {
 1037                 vm_map_lookup_done(map, entry);
 1038                 return (EINVAL);
 1039         }
 1040         vm_map_lookup_done(map, entry);
 1041         if (obj != shmfd->shm_object)
 1042                 return (EINVAL);
 1043         vm_map_remove(map, kva, kva + size);
 1044         VM_OBJECT_WLOCK(obj);
 1045         KASSERT(shmfd->shm_kmappings > 0, ("shm_unmap: object not mapped"));
 1046         shmfd->shm_kmappings--;
 1047         VM_OBJECT_WUNLOCK(obj);
 1048         return (0);
 1049 }
 1050 
 1051 void
 1052 shm_path(struct shmfd *shmfd, char *path, size_t size)
 1053 {
 1054 
 1055         if (shmfd->shm_path == NULL)
 1056                 return;
 1057         sx_slock(&shm_dict_lock);
 1058         if (shmfd->shm_path != NULL)
 1059                 strlcpy(path, shmfd->shm_path, size);
 1060         sx_sunlock(&shm_dict_lock);
 1061 }

Cache object: 92c63868f67268221252e5a0185aa92d


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.