The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/kern/uipc_shm.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 2006, 2011 Robert N. M. Watson
    3  * All rights reserved.
    4  *
    5  * Redistribution and use in source and binary forms, with or without
    6  * modification, are permitted provided that the following conditions
    7  * are met:
    8  * 1. Redistributions of source code must retain the above copyright
    9  *    notice, this list of conditions and the following disclaimer.
   10  * 2. Redistributions in binary form must reproduce the above copyright
   11  *    notice, this list of conditions and the following disclaimer in the
   12  *    documentation and/or other materials provided with the distribution.
   13  *
   14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   24  * SUCH DAMAGE.
   25  */
   26 
   27 /*
   28  * Support for shared swap-backed anonymous memory objects via
   29  * shm_open(2) and shm_unlink(2).  While most of the implementation is
   30  * here, vm_mmap.c contains mapping logic changes.
   31  *
   32  * TODO:
   33  *
   34  * (1) Need to export data to a userland tool via a sysctl.  Should ipcs(1)
   35  *     and ipcrm(1) be expanded or should new tools to manage both POSIX
   36  *     kernel semaphores and POSIX shared memory be written?
   37  *
   38  * (2) Add support for this file type to fstat(1).
   39  *
   40  * (3) Resource limits?  Does this need its own resource limits or are the
   41  *     existing limits in mmap(2) sufficient?
   42  */
   43 
   44 #include <sys/cdefs.h>
   45 __FBSDID("$FreeBSD: releng/11.2/sys/kern/uipc_shm.c 327785 2018-01-10 20:39:26Z markj $");
   46 
   47 #include "opt_capsicum.h"
   48 #include "opt_ktrace.h"
   49 
   50 #include <sys/param.h>
   51 #include <sys/capsicum.h>
   52 #include <sys/conf.h>
   53 #include <sys/fcntl.h>
   54 #include <sys/file.h>
   55 #include <sys/filedesc.h>
   56 #include <sys/fnv_hash.h>
   57 #include <sys/kernel.h>
   58 #include <sys/uio.h>
   59 #include <sys/signal.h>
   60 #include <sys/jail.h>
   61 #include <sys/ktrace.h>
   62 #include <sys/lock.h>
   63 #include <sys/malloc.h>
   64 #include <sys/mman.h>
   65 #include <sys/mutex.h>
   66 #include <sys/priv.h>
   67 #include <sys/proc.h>
   68 #include <sys/refcount.h>
   69 #include <sys/resourcevar.h>
   70 #include <sys/rwlock.h>
   71 #include <sys/stat.h>
   72 #include <sys/syscallsubr.h>
   73 #include <sys/sysctl.h>
   74 #include <sys/sysproto.h>
   75 #include <sys/systm.h>
   76 #include <sys/sx.h>
   77 #include <sys/time.h>
   78 #include <sys/vnode.h>
   79 #include <sys/unistd.h>
   80 #include <sys/user.h>
   81 
   82 #include <security/mac/mac_framework.h>
   83 
   84 #include <vm/vm.h>
   85 #include <vm/vm_param.h>
   86 #include <vm/pmap.h>
   87 #include <vm/vm_extern.h>
   88 #include <vm/vm_map.h>
   89 #include <vm/vm_kern.h>
   90 #include <vm/vm_object.h>
   91 #include <vm/vm_page.h>
   92 #include <vm/vm_pageout.h>
   93 #include <vm/vm_pager.h>
   94 #include <vm/swap_pager.h>
   95 
   96 struct shm_mapping {
   97         char            *sm_path;
   98         Fnv32_t         sm_fnv;
   99         struct shmfd    *sm_shmfd;
  100         LIST_ENTRY(shm_mapping) sm_link;
  101 };
  102 
  103 static MALLOC_DEFINE(M_SHMFD, "shmfd", "shared memory file descriptor");
  104 static LIST_HEAD(, shm_mapping) *shm_dictionary;
  105 static struct sx shm_dict_lock;
  106 static struct mtx shm_timestamp_lock;
  107 static u_long shm_hash;
  108 static struct unrhdr *shm_ino_unr;
  109 static dev_t shm_dev_ino;
  110 
  111 #define SHM_HASH(fnv)   (&shm_dictionary[(fnv) & shm_hash])
  112 
  113 static void     shm_init(void *arg);
  114 static void     shm_insert(char *path, Fnv32_t fnv, struct shmfd *shmfd);
  115 static struct shmfd *shm_lookup(char *path, Fnv32_t fnv);
  116 static int      shm_remove(char *path, Fnv32_t fnv, struct ucred *ucred);
  117 
  118 static fo_rdwr_t        shm_read;
  119 static fo_rdwr_t        shm_write;
  120 static fo_truncate_t    shm_truncate;
  121 static fo_stat_t        shm_stat;
  122 static fo_close_t       shm_close;
  123 static fo_chmod_t       shm_chmod;
  124 static fo_chown_t       shm_chown;
  125 static fo_seek_t        shm_seek;
  126 static fo_fill_kinfo_t  shm_fill_kinfo;
  127 static fo_mmap_t        shm_mmap;
  128 
  129 /* File descriptor operations. */
  130 struct fileops shm_ops = {
  131         .fo_read = shm_read,
  132         .fo_write = shm_write,
  133         .fo_truncate = shm_truncate,
  134         .fo_ioctl = invfo_ioctl,
  135         .fo_poll = invfo_poll,
  136         .fo_kqfilter = invfo_kqfilter,
  137         .fo_stat = shm_stat,
  138         .fo_close = shm_close,
  139         .fo_chmod = shm_chmod,
  140         .fo_chown = shm_chown,
  141         .fo_sendfile = vn_sendfile,
  142         .fo_seek = shm_seek,
  143         .fo_fill_kinfo = shm_fill_kinfo,
  144         .fo_mmap = shm_mmap,
  145         .fo_flags = DFLAG_PASSABLE | DFLAG_SEEKABLE
  146 };
  147 
  148 FEATURE(posix_shm, "POSIX shared memory");
  149 
  150 static int
  151 uiomove_object_page(vm_object_t obj, size_t len, struct uio *uio)
  152 {
  153         vm_page_t m;
  154         vm_pindex_t idx;
  155         size_t tlen;
  156         int error, offset, rv;
  157 
  158         idx = OFF_TO_IDX(uio->uio_offset);
  159         offset = uio->uio_offset & PAGE_MASK;
  160         tlen = MIN(PAGE_SIZE - offset, len);
  161 
  162         VM_OBJECT_WLOCK(obj);
  163 
  164         /*
  165          * Read I/O without either a corresponding resident page or swap
  166          * page: use zero_region.  This is intended to avoid instantiating
  167          * pages on read from a sparse region.
  168          */
  169         if (uio->uio_rw == UIO_READ && vm_page_lookup(obj, idx) == NULL &&
  170             !vm_pager_has_page(obj, idx, NULL, NULL)) {
  171                 VM_OBJECT_WUNLOCK(obj);
  172                 return (uiomove(__DECONST(void *, zero_region), tlen, uio));
  173         }
  174 
  175         /*
  176          * Parallel reads of the page content from disk are prevented
  177          * by exclusive busy.
  178          *
  179          * Although the tmpfs vnode lock is held here, it is
  180          * nonetheless safe to sleep waiting for a free page.  The
  181          * pageout daemon does not need to acquire the tmpfs vnode
  182          * lock to page out tobj's pages because tobj is a OBJT_SWAP
  183          * type object.
  184          */
  185         m = vm_page_grab(obj, idx, VM_ALLOC_NORMAL | VM_ALLOC_NOBUSY);
  186         if (m->valid != VM_PAGE_BITS_ALL) {
  187                 vm_page_xbusy(m);
  188                 if (vm_pager_has_page(obj, idx, NULL, NULL)) {
  189                         rv = vm_pager_get_pages(obj, &m, 1, NULL, NULL);
  190                         if (rv != VM_PAGER_OK) {
  191                                 printf(
  192             "uiomove_object: vm_obj %p idx %jd valid %x pager error %d\n",
  193                                     obj, idx, m->valid, rv);
  194                                 vm_page_lock(m);
  195                                 vm_page_free(m);
  196                                 vm_page_unlock(m);
  197                                 VM_OBJECT_WUNLOCK(obj);
  198                                 return (EIO);
  199                         }
  200                 } else
  201                         vm_page_zero_invalid(m, TRUE);
  202                 vm_page_xunbusy(m);
  203         }
  204         vm_page_lock(m);
  205         vm_page_hold(m);
  206         if (m->queue != PQ_ACTIVE)
  207                 vm_page_activate(m);
  208         else
  209                 vm_page_reference(m);
  210         vm_page_unlock(m);
  211         VM_OBJECT_WUNLOCK(obj);
  212         error = uiomove_fromphys(&m, offset, tlen, uio);
  213         if (uio->uio_rw == UIO_WRITE && error == 0) {
  214                 VM_OBJECT_WLOCK(obj);
  215                 vm_page_dirty(m);
  216                 vm_pager_page_unswapped(m);
  217                 VM_OBJECT_WUNLOCK(obj);
  218         }
  219         vm_page_lock(m);
  220         vm_page_unhold(m);
  221         vm_page_unlock(m);
  222 
  223         return (error);
  224 }
  225 
  226 int
  227 uiomove_object(vm_object_t obj, off_t obj_size, struct uio *uio)
  228 {
  229         ssize_t resid;
  230         size_t len;
  231         int error;
  232 
  233         error = 0;
  234         while ((resid = uio->uio_resid) > 0) {
  235                 if (obj_size <= uio->uio_offset)
  236                         break;
  237                 len = MIN(obj_size - uio->uio_offset, resid);
  238                 if (len == 0)
  239                         break;
  240                 error = uiomove_object_page(obj, len, uio);
  241                 if (error != 0 || resid == uio->uio_resid)
  242                         break;
  243         }
  244         return (error);
  245 }
  246 
  247 static int
  248 shm_seek(struct file *fp, off_t offset, int whence, struct thread *td)
  249 {
  250         struct shmfd *shmfd;
  251         off_t foffset;
  252         int error;
  253 
  254         shmfd = fp->f_data;
  255         foffset = foffset_lock(fp, 0);
  256         error = 0;
  257         switch (whence) {
  258         case L_INCR:
  259                 if (foffset < 0 ||
  260                     (offset > 0 && foffset > OFF_MAX - offset)) {
  261                         error = EOVERFLOW;
  262                         break;
  263                 }
  264                 offset += foffset;
  265                 break;
  266         case L_XTND:
  267                 if (offset > 0 && shmfd->shm_size > OFF_MAX - offset) {
  268                         error = EOVERFLOW;
  269                         break;
  270                 }
  271                 offset += shmfd->shm_size;
  272                 break;
  273         case L_SET:
  274                 break;
  275         default:
  276                 error = EINVAL;
  277         }
  278         if (error == 0) {
  279                 if (offset < 0 || offset > shmfd->shm_size)
  280                         error = EINVAL;
  281                 else
  282                         td->td_uretoff.tdu_off = offset;
  283         }
  284         foffset_unlock(fp, offset, error != 0 ? FOF_NOUPDATE : 0);
  285         return (error);
  286 }
  287 
  288 static int
  289 shm_read(struct file *fp, struct uio *uio, struct ucred *active_cred,
  290     int flags, struct thread *td)
  291 {
  292         struct shmfd *shmfd;
  293         void *rl_cookie;
  294         int error;
  295 
  296         shmfd = fp->f_data;
  297 #ifdef MAC
  298         error = mac_posixshm_check_read(active_cred, fp->f_cred, shmfd);
  299         if (error)
  300                 return (error);
  301 #endif
  302         foffset_lock_uio(fp, uio, flags);
  303         rl_cookie = rangelock_rlock(&shmfd->shm_rl, uio->uio_offset,
  304             uio->uio_offset + uio->uio_resid, &shmfd->shm_mtx);
  305         error = uiomove_object(shmfd->shm_object, shmfd->shm_size, uio);
  306         rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx);
  307         foffset_unlock_uio(fp, uio, flags);
  308         return (error);
  309 }
  310 
  311 static int
  312 shm_write(struct file *fp, struct uio *uio, struct ucred *active_cred,
  313     int flags, struct thread *td)
  314 {
  315         struct shmfd *shmfd;
  316         void *rl_cookie;
  317         int error;
  318 
  319         shmfd = fp->f_data;
  320 #ifdef MAC
  321         error = mac_posixshm_check_write(active_cred, fp->f_cred, shmfd);
  322         if (error)
  323                 return (error);
  324 #endif
  325         foffset_lock_uio(fp, uio, flags);
  326         if ((flags & FOF_OFFSET) == 0) {
  327                 rl_cookie = rangelock_wlock(&shmfd->shm_rl, 0, OFF_MAX,
  328                     &shmfd->shm_mtx);
  329         } else {
  330                 rl_cookie = rangelock_wlock(&shmfd->shm_rl, uio->uio_offset,
  331                     uio->uio_offset + uio->uio_resid, &shmfd->shm_mtx);
  332         }
  333 
  334         error = uiomove_object(shmfd->shm_object, shmfd->shm_size, uio);
  335         rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx);
  336         foffset_unlock_uio(fp, uio, flags);
  337         return (error);
  338 }
  339 
  340 static int
  341 shm_truncate(struct file *fp, off_t length, struct ucred *active_cred,
  342     struct thread *td)
  343 {
  344         struct shmfd *shmfd;
  345 #ifdef MAC
  346         int error;
  347 #endif
  348 
  349         shmfd = fp->f_data;
  350 #ifdef MAC
  351         error = mac_posixshm_check_truncate(active_cred, fp->f_cred, shmfd);
  352         if (error)
  353                 return (error);
  354 #endif
  355         return (shm_dotruncate(shmfd, length));
  356 }
  357 
  358 static int
  359 shm_stat(struct file *fp, struct stat *sb, struct ucred *active_cred,
  360     struct thread *td)
  361 {
  362         struct shmfd *shmfd;
  363 #ifdef MAC
  364         int error;
  365 #endif
  366 
  367         shmfd = fp->f_data;
  368 
  369 #ifdef MAC
  370         error = mac_posixshm_check_stat(active_cred, fp->f_cred, shmfd);
  371         if (error)
  372                 return (error);
  373 #endif
  374         
  375         /*
  376          * Attempt to return sanish values for fstat() on a memory file
  377          * descriptor.
  378          */
  379         bzero(sb, sizeof(*sb));
  380         sb->st_blksize = PAGE_SIZE;
  381         sb->st_size = shmfd->shm_size;
  382         sb->st_blocks = howmany(sb->st_size, sb->st_blksize);
  383         mtx_lock(&shm_timestamp_lock);
  384         sb->st_atim = shmfd->shm_atime;
  385         sb->st_ctim = shmfd->shm_ctime;
  386         sb->st_mtim = shmfd->shm_mtime;
  387         sb->st_birthtim = shmfd->shm_birthtime;
  388         sb->st_mode = S_IFREG | shmfd->shm_mode;                /* XXX */
  389         sb->st_uid = shmfd->shm_uid;
  390         sb->st_gid = shmfd->shm_gid;
  391         mtx_unlock(&shm_timestamp_lock);
  392         sb->st_dev = shm_dev_ino;
  393         sb->st_ino = shmfd->shm_ino;
  394 
  395         return (0);
  396 }
  397 
  398 static int
  399 shm_close(struct file *fp, struct thread *td)
  400 {
  401         struct shmfd *shmfd;
  402 
  403         shmfd = fp->f_data;
  404         fp->f_data = NULL;
  405         shm_drop(shmfd);
  406 
  407         return (0);
  408 }
  409 
  410 int
  411 shm_dotruncate(struct shmfd *shmfd, off_t length)
  412 {
  413         vm_object_t object;
  414         vm_page_t m;
  415         vm_pindex_t idx, nobjsize;
  416         vm_ooffset_t delta;
  417         int base, rv;
  418 
  419         KASSERT(length >= 0, ("shm_dotruncate: length < 0"));
  420         object = shmfd->shm_object;
  421         VM_OBJECT_WLOCK(object);
  422         if (length == shmfd->shm_size) {
  423                 VM_OBJECT_WUNLOCK(object);
  424                 return (0);
  425         }
  426         nobjsize = OFF_TO_IDX(length + PAGE_MASK);
  427 
  428         /* Are we shrinking?  If so, trim the end. */
  429         if (length < shmfd->shm_size) {
  430                 /*
  431                  * Disallow any requests to shrink the size if this
  432                  * object is mapped into the kernel.
  433                  */
  434                 if (shmfd->shm_kmappings > 0) {
  435                         VM_OBJECT_WUNLOCK(object);
  436                         return (EBUSY);
  437                 }
  438 
  439                 /*
  440                  * Zero the truncated part of the last page.
  441                  */
  442                 base = length & PAGE_MASK;
  443                 if (base != 0) {
  444                         idx = OFF_TO_IDX(length);
  445 retry:
  446                         m = vm_page_lookup(object, idx);
  447                         if (m != NULL) {
  448                                 if (vm_page_sleep_if_busy(m, "shmtrc"))
  449                                         goto retry;
  450                         } else if (vm_pager_has_page(object, idx, NULL, NULL)) {
  451                                 m = vm_page_alloc(object, idx,
  452                                     VM_ALLOC_NORMAL | VM_ALLOC_WAITFAIL);
  453                                 if (m == NULL)
  454                                         goto retry;
  455                                 rv = vm_pager_get_pages(object, &m, 1, NULL,
  456                                     NULL);
  457                                 vm_page_lock(m);
  458                                 if (rv == VM_PAGER_OK) {
  459                                         vm_page_deactivate(m);
  460                                         vm_page_unlock(m);
  461                                         vm_page_xunbusy(m);
  462                                 } else {
  463                                         vm_page_free(m);
  464                                         vm_page_unlock(m);
  465                                         VM_OBJECT_WUNLOCK(object);
  466                                         return (EIO);
  467                                 }
  468                         }
  469                         if (m != NULL) {
  470                                 pmap_zero_page_area(m, base, PAGE_SIZE - base);
  471                                 KASSERT(m->valid == VM_PAGE_BITS_ALL,
  472                                     ("shm_dotruncate: page %p is invalid", m));
  473                                 vm_page_dirty(m);
  474                                 vm_pager_page_unswapped(m);
  475                         }
  476                 }
  477                 delta = IDX_TO_OFF(object->size - nobjsize);
  478 
  479                 /* Toss in memory pages. */
  480                 if (nobjsize < object->size)
  481                         vm_object_page_remove(object, nobjsize, object->size,
  482                             0);
  483 
  484                 /* Toss pages from swap. */
  485                 if (object->type == OBJT_SWAP)
  486                         swap_pager_freespace(object, nobjsize, delta);
  487 
  488                 /* Free the swap accounted for shm */
  489                 swap_release_by_cred(delta, object->cred);
  490                 object->charge -= delta;
  491         } else {
  492                 /* Try to reserve additional swap space. */
  493                 delta = IDX_TO_OFF(nobjsize - object->size);
  494                 if (!swap_reserve_by_cred(delta, object->cred)) {
  495                         VM_OBJECT_WUNLOCK(object);
  496                         return (ENOMEM);
  497                 }
  498                 object->charge += delta;
  499         }
  500         shmfd->shm_size = length;
  501         mtx_lock(&shm_timestamp_lock);
  502         vfs_timestamp(&shmfd->shm_ctime);
  503         shmfd->shm_mtime = shmfd->shm_ctime;
  504         mtx_unlock(&shm_timestamp_lock);
  505         object->size = nobjsize;
  506         VM_OBJECT_WUNLOCK(object);
  507         return (0);
  508 }
  509 
  510 /*
  511  * shmfd object management including creation and reference counting
  512  * routines.
  513  */
  514 struct shmfd *
  515 shm_alloc(struct ucred *ucred, mode_t mode)
  516 {
  517         struct shmfd *shmfd;
  518         int ino;
  519 
  520         shmfd = malloc(sizeof(*shmfd), M_SHMFD, M_WAITOK | M_ZERO);
  521         shmfd->shm_size = 0;
  522         shmfd->shm_uid = ucred->cr_uid;
  523         shmfd->shm_gid = ucred->cr_gid;
  524         shmfd->shm_mode = mode;
  525         shmfd->shm_object = vm_pager_allocate(OBJT_DEFAULT, NULL,
  526             shmfd->shm_size, VM_PROT_DEFAULT, 0, ucred);
  527         KASSERT(shmfd->shm_object != NULL, ("shm_create: vm_pager_allocate"));
  528         shmfd->shm_object->pg_color = 0;
  529         VM_OBJECT_WLOCK(shmfd->shm_object);
  530         vm_object_clear_flag(shmfd->shm_object, OBJ_ONEMAPPING);
  531         vm_object_set_flag(shmfd->shm_object, OBJ_COLORED | OBJ_NOSPLIT);
  532         VM_OBJECT_WUNLOCK(shmfd->shm_object);
  533         vfs_timestamp(&shmfd->shm_birthtime);
  534         shmfd->shm_atime = shmfd->shm_mtime = shmfd->shm_ctime =
  535             shmfd->shm_birthtime;
  536         ino = alloc_unr(shm_ino_unr);
  537         if (ino == -1)
  538                 shmfd->shm_ino = 0;
  539         else
  540                 shmfd->shm_ino = ino;
  541         refcount_init(&shmfd->shm_refs, 1);
  542         mtx_init(&shmfd->shm_mtx, "shmrl", NULL, MTX_DEF);
  543         rangelock_init(&shmfd->shm_rl);
  544 #ifdef MAC
  545         mac_posixshm_init(shmfd);
  546         mac_posixshm_create(ucred, shmfd);
  547 #endif
  548 
  549         return (shmfd);
  550 }
  551 
  552 struct shmfd *
  553 shm_hold(struct shmfd *shmfd)
  554 {
  555 
  556         refcount_acquire(&shmfd->shm_refs);
  557         return (shmfd);
  558 }
  559 
  560 void
  561 shm_drop(struct shmfd *shmfd)
  562 {
  563 
  564         if (refcount_release(&shmfd->shm_refs)) {
  565 #ifdef MAC
  566                 mac_posixshm_destroy(shmfd);
  567 #endif
  568                 rangelock_destroy(&shmfd->shm_rl);
  569                 mtx_destroy(&shmfd->shm_mtx);
  570                 vm_object_deallocate(shmfd->shm_object);
  571                 if (shmfd->shm_ino != 0)
  572                         free_unr(shm_ino_unr, shmfd->shm_ino);
  573                 free(shmfd, M_SHMFD);
  574         }
  575 }
  576 
  577 /*
  578  * Determine if the credentials have sufficient permissions for a
  579  * specified combination of FREAD and FWRITE.
  580  */
  581 int
  582 shm_access(struct shmfd *shmfd, struct ucred *ucred, int flags)
  583 {
  584         accmode_t accmode;
  585         int error;
  586 
  587         accmode = 0;
  588         if (flags & FREAD)
  589                 accmode |= VREAD;
  590         if (flags & FWRITE)
  591                 accmode |= VWRITE;
  592         mtx_lock(&shm_timestamp_lock);
  593         error = vaccess(VREG, shmfd->shm_mode, shmfd->shm_uid, shmfd->shm_gid,
  594             accmode, ucred, NULL);
  595         mtx_unlock(&shm_timestamp_lock);
  596         return (error);
  597 }
  598 
  599 /*
  600  * Dictionary management.  We maintain an in-kernel dictionary to map
  601  * paths to shmfd objects.  We use the FNV hash on the path to store
  602  * the mappings in a hash table.
  603  */
  604 static void
  605 shm_init(void *arg)
  606 {
  607 
  608         mtx_init(&shm_timestamp_lock, "shm timestamps", NULL, MTX_DEF);
  609         sx_init(&shm_dict_lock, "shm dictionary");
  610         shm_dictionary = hashinit(1024, M_SHMFD, &shm_hash);
  611         shm_ino_unr = new_unrhdr(1, INT32_MAX, NULL);
  612         KASSERT(shm_ino_unr != NULL, ("shm fake inodes not initialized"));
  613         shm_dev_ino = devfs_alloc_cdp_inode();
  614         KASSERT(shm_dev_ino > 0, ("shm dev inode not initialized"));
  615 }
  616 SYSINIT(shm_init, SI_SUB_SYSV_SHM, SI_ORDER_ANY, shm_init, NULL);
  617 
  618 static struct shmfd *
  619 shm_lookup(char *path, Fnv32_t fnv)
  620 {
  621         struct shm_mapping *map;
  622 
  623         LIST_FOREACH(map, SHM_HASH(fnv), sm_link) {
  624                 if (map->sm_fnv != fnv)
  625                         continue;
  626                 if (strcmp(map->sm_path, path) == 0)
  627                         return (map->sm_shmfd);
  628         }
  629 
  630         return (NULL);
  631 }
  632 
  633 static void
  634 shm_insert(char *path, Fnv32_t fnv, struct shmfd *shmfd)
  635 {
  636         struct shm_mapping *map;
  637 
  638         map = malloc(sizeof(struct shm_mapping), M_SHMFD, M_WAITOK);
  639         map->sm_path = path;
  640         map->sm_fnv = fnv;
  641         map->sm_shmfd = shm_hold(shmfd);
  642         shmfd->shm_path = path;
  643         LIST_INSERT_HEAD(SHM_HASH(fnv), map, sm_link);
  644 }
  645 
  646 static int
  647 shm_remove(char *path, Fnv32_t fnv, struct ucred *ucred)
  648 {
  649         struct shm_mapping *map;
  650         int error;
  651 
  652         LIST_FOREACH(map, SHM_HASH(fnv), sm_link) {
  653                 if (map->sm_fnv != fnv)
  654                         continue;
  655                 if (strcmp(map->sm_path, path) == 0) {
  656 #ifdef MAC
  657                         error = mac_posixshm_check_unlink(ucred, map->sm_shmfd);
  658                         if (error)
  659                                 return (error);
  660 #endif
  661                         error = shm_access(map->sm_shmfd, ucred,
  662                             FREAD | FWRITE);
  663                         if (error)
  664                                 return (error);
  665                         map->sm_shmfd->shm_path = NULL;
  666                         LIST_REMOVE(map, sm_link);
  667                         shm_drop(map->sm_shmfd);
  668                         free(map->sm_path, M_SHMFD);
  669                         free(map, M_SHMFD);
  670                         return (0);
  671                 }
  672         }
  673 
  674         return (ENOENT);
  675 }
  676 
  677 int
  678 kern_shm_open(struct thread *td, const char *userpath, int flags, mode_t mode,
  679     struct filecaps *fcaps)
  680 {
  681         struct filedesc *fdp;
  682         struct shmfd *shmfd;
  683         struct file *fp;
  684         char *path;
  685         const char *pr_path;
  686         size_t pr_pathlen;
  687         Fnv32_t fnv;
  688         mode_t cmode;
  689         int fd, error;
  690 
  691 #ifdef CAPABILITY_MODE
  692         /*
  693          * shm_open(2) is only allowed for anonymous objects.
  694          */
  695         if (IN_CAPABILITY_MODE(td) && (userpath != SHM_ANON))
  696                 return (ECAPMODE);
  697 #endif
  698 
  699         if ((flags & O_ACCMODE) != O_RDONLY && (flags & O_ACCMODE) != O_RDWR)
  700                 return (EINVAL);
  701 
  702         if ((flags & ~(O_ACCMODE | O_CREAT | O_EXCL | O_TRUNC | O_CLOEXEC)) != 0)
  703                 return (EINVAL);
  704 
  705         fdp = td->td_proc->p_fd;
  706         cmode = (mode & ~fdp->fd_cmask) & ACCESSPERMS;
  707 
  708         error = falloc_caps(td, &fp, &fd, O_CLOEXEC, fcaps);
  709         if (error)
  710                 return (error);
  711 
  712         /* A SHM_ANON path pointer creates an anonymous object. */
  713         if (userpath == SHM_ANON) {
  714                 /* A read-only anonymous object is pointless. */
  715                 if ((flags & O_ACCMODE) == O_RDONLY) {
  716                         fdclose(td, fp, fd);
  717                         fdrop(fp, td);
  718                         return (EINVAL);
  719                 }
  720                 shmfd = shm_alloc(td->td_ucred, cmode);
  721         } else {
  722                 path = malloc(MAXPATHLEN, M_SHMFD, M_WAITOK);
  723                 pr_path = td->td_ucred->cr_prison->pr_path;
  724 
  725                 /* Construct a full pathname for jailed callers. */
  726                 pr_pathlen = strcmp(pr_path, "/") == 0 ? 0
  727                     : strlcpy(path, pr_path, MAXPATHLEN);
  728                 error = copyinstr(userpath, path + pr_pathlen,
  729                     MAXPATHLEN - pr_pathlen, NULL);
  730 #ifdef KTRACE
  731                 if (error == 0 && KTRPOINT(curthread, KTR_NAMEI))
  732                         ktrnamei(path);
  733 #endif
  734                 /* Require paths to start with a '/' character. */
  735                 if (error == 0 && path[pr_pathlen] != '/')
  736                         error = EINVAL;
  737                 if (error) {
  738                         fdclose(td, fp, fd);
  739                         fdrop(fp, td);
  740                         free(path, M_SHMFD);
  741                         return (error);
  742                 }
  743 
  744                 fnv = fnv_32_str(path, FNV1_32_INIT);
  745                 sx_xlock(&shm_dict_lock);
  746                 shmfd = shm_lookup(path, fnv);
  747                 if (shmfd == NULL) {
  748                         /* Object does not yet exist, create it if requested. */
  749                         if (flags & O_CREAT) {
  750 #ifdef MAC
  751                                 error = mac_posixshm_check_create(td->td_ucred,
  752                                     path);
  753                                 if (error == 0) {
  754 #endif
  755                                         shmfd = shm_alloc(td->td_ucred, cmode);
  756                                         shm_insert(path, fnv, shmfd);
  757 #ifdef MAC
  758                                 }
  759 #endif
  760                         } else {
  761                                 free(path, M_SHMFD);
  762                                 error = ENOENT;
  763                         }
  764                 } else {
  765                         /*
  766                          * Object already exists, obtain a new
  767                          * reference if requested and permitted.
  768                          */
  769                         free(path, M_SHMFD);
  770                         if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
  771                                 error = EEXIST;
  772                         else {
  773 #ifdef MAC
  774                                 error = mac_posixshm_check_open(td->td_ucred,
  775                                     shmfd, FFLAGS(flags & O_ACCMODE));
  776                                 if (error == 0)
  777 #endif
  778                                 error = shm_access(shmfd, td->td_ucred,
  779                                     FFLAGS(flags & O_ACCMODE));
  780                         }
  781 
  782                         /*
  783                          * Truncate the file back to zero length if
  784                          * O_TRUNC was specified and the object was
  785                          * opened with read/write.
  786                          */
  787                         if (error == 0 &&
  788                             (flags & (O_ACCMODE | O_TRUNC)) ==
  789                             (O_RDWR | O_TRUNC)) {
  790 #ifdef MAC
  791                                 error = mac_posixshm_check_truncate(
  792                                         td->td_ucred, fp->f_cred, shmfd);
  793                                 if (error == 0)
  794 #endif
  795                                         shm_dotruncate(shmfd, 0);
  796                         }
  797                         if (error == 0)
  798                                 shm_hold(shmfd);
  799                 }
  800                 sx_xunlock(&shm_dict_lock);
  801 
  802                 if (error) {
  803                         fdclose(td, fp, fd);
  804                         fdrop(fp, td);
  805                         return (error);
  806                 }
  807         }
  808 
  809         finit(fp, FFLAGS(flags & O_ACCMODE), DTYPE_SHM, shmfd, &shm_ops);
  810 
  811         td->td_retval[0] = fd;
  812         fdrop(fp, td);
  813 
  814         return (0);
  815 }
  816 
  817 /* System calls. */
  818 int
  819 sys_shm_open(struct thread *td, struct shm_open_args *uap)
  820 {
  821 
  822         return (kern_shm_open(td, uap->path, uap->flags, uap->mode, NULL));
  823 }
  824 
  825 int
  826 sys_shm_unlink(struct thread *td, struct shm_unlink_args *uap)
  827 {
  828         char *path;
  829         const char *pr_path;
  830         size_t pr_pathlen;
  831         Fnv32_t fnv;
  832         int error;
  833 
  834         path = malloc(MAXPATHLEN, M_TEMP, M_WAITOK);
  835         pr_path = td->td_ucred->cr_prison->pr_path;
  836         pr_pathlen = strcmp(pr_path, "/") == 0 ? 0
  837             : strlcpy(path, pr_path, MAXPATHLEN);
  838         error = copyinstr(uap->path, path + pr_pathlen, MAXPATHLEN - pr_pathlen,
  839             NULL);
  840         if (error) {
  841                 free(path, M_TEMP);
  842                 return (error);
  843         }
  844 #ifdef KTRACE
  845         if (KTRPOINT(curthread, KTR_NAMEI))
  846                 ktrnamei(path);
  847 #endif
  848         fnv = fnv_32_str(path, FNV1_32_INIT);
  849         sx_xlock(&shm_dict_lock);
  850         error = shm_remove(path, fnv, td->td_ucred);
  851         sx_xunlock(&shm_dict_lock);
  852         free(path, M_TEMP);
  853 
  854         return (error);
  855 }
  856 
  857 int
  858 shm_mmap(struct file *fp, vm_map_t map, vm_offset_t *addr, vm_size_t objsize,
  859     vm_prot_t prot, vm_prot_t cap_maxprot, int flags,
  860     vm_ooffset_t foff, struct thread *td)
  861 {
  862         struct shmfd *shmfd;
  863         vm_prot_t maxprot;
  864         int error;
  865 
  866         shmfd = fp->f_data;
  867         maxprot = VM_PROT_NONE;
  868 
  869         /* FREAD should always be set. */
  870         if ((fp->f_flag & FREAD) != 0)
  871                 maxprot |= VM_PROT_EXECUTE | VM_PROT_READ;
  872         if ((fp->f_flag & FWRITE) != 0)
  873                 maxprot |= VM_PROT_WRITE;
  874 
  875         /* Don't permit shared writable mappings on read-only descriptors. */
  876         if ((flags & MAP_SHARED) != 0 &&
  877             (maxprot & VM_PROT_WRITE) == 0 &&
  878             (prot & VM_PROT_WRITE) != 0)
  879                 return (EACCES);
  880         maxprot &= cap_maxprot;
  881 
  882         /* See comment in vn_mmap(). */
  883         if (
  884 #ifdef _LP64
  885             objsize > OFF_MAX ||
  886 #endif
  887             foff < 0 || foff > OFF_MAX - objsize)
  888                 return (EINVAL);
  889 
  890 #ifdef MAC
  891         error = mac_posixshm_check_mmap(td->td_ucred, shmfd, prot, flags);
  892         if (error != 0)
  893                 return (error);
  894 #endif
  895         
  896         mtx_lock(&shm_timestamp_lock);
  897         vfs_timestamp(&shmfd->shm_atime);
  898         mtx_unlock(&shm_timestamp_lock);
  899         vm_object_reference(shmfd->shm_object);
  900 
  901         error = vm_mmap_object(map, addr, objsize, prot, maxprot, flags,
  902             shmfd->shm_object, foff, FALSE, td);
  903         if (error != 0)
  904                 vm_object_deallocate(shmfd->shm_object);
  905         return (error);
  906 }
  907 
  908 static int
  909 shm_chmod(struct file *fp, mode_t mode, struct ucred *active_cred,
  910     struct thread *td)
  911 {
  912         struct shmfd *shmfd;
  913         int error;
  914 
  915         error = 0;
  916         shmfd = fp->f_data;
  917         mtx_lock(&shm_timestamp_lock);
  918         /*
  919          * SUSv4 says that x bits of permission need not be affected.
  920          * Be consistent with our shm_open there.
  921          */
  922 #ifdef MAC
  923         error = mac_posixshm_check_setmode(active_cred, shmfd, mode);
  924         if (error != 0)
  925                 goto out;
  926 #endif
  927         error = vaccess(VREG, shmfd->shm_mode, shmfd->shm_uid,
  928             shmfd->shm_gid, VADMIN, active_cred, NULL);
  929         if (error != 0)
  930                 goto out;
  931         shmfd->shm_mode = mode & ACCESSPERMS;
  932 out:
  933         mtx_unlock(&shm_timestamp_lock);
  934         return (error);
  935 }
  936 
  937 static int
  938 shm_chown(struct file *fp, uid_t uid, gid_t gid, struct ucred *active_cred,
  939     struct thread *td)
  940 {
  941         struct shmfd *shmfd;
  942         int error;
  943 
  944         error = 0;
  945         shmfd = fp->f_data;
  946         mtx_lock(&shm_timestamp_lock);
  947 #ifdef MAC
  948         error = mac_posixshm_check_setowner(active_cred, shmfd, uid, gid);
  949         if (error != 0)
  950                 goto out;
  951 #endif
  952         if (uid == (uid_t)-1)
  953                 uid = shmfd->shm_uid;
  954         if (gid == (gid_t)-1)
  955                  gid = shmfd->shm_gid;
  956         if (((uid != shmfd->shm_uid && uid != active_cred->cr_uid) ||
  957             (gid != shmfd->shm_gid && !groupmember(gid, active_cred))) &&
  958             (error = priv_check_cred(active_cred, PRIV_VFS_CHOWN, 0)))
  959                 goto out;
  960         shmfd->shm_uid = uid;
  961         shmfd->shm_gid = gid;
  962 out:
  963         mtx_unlock(&shm_timestamp_lock);
  964         return (error);
  965 }
  966 
  967 /*
  968  * Helper routines to allow the backing object of a shared memory file
  969  * descriptor to be mapped in the kernel.
  970  */
  971 int
  972 shm_map(struct file *fp, size_t size, off_t offset, void **memp)
  973 {
  974         struct shmfd *shmfd;
  975         vm_offset_t kva, ofs;
  976         vm_object_t obj;
  977         int rv;
  978 
  979         if (fp->f_type != DTYPE_SHM)
  980                 return (EINVAL);
  981         shmfd = fp->f_data;
  982         obj = shmfd->shm_object;
  983         VM_OBJECT_WLOCK(obj);
  984         /*
  985          * XXXRW: This validation is probably insufficient, and subject to
  986          * sign errors.  It should be fixed.
  987          */
  988         if (offset >= shmfd->shm_size ||
  989             offset + size > round_page(shmfd->shm_size)) {
  990                 VM_OBJECT_WUNLOCK(obj);
  991                 return (EINVAL);
  992         }
  993 
  994         shmfd->shm_kmappings++;
  995         vm_object_reference_locked(obj);
  996         VM_OBJECT_WUNLOCK(obj);
  997 
  998         /* Map the object into the kernel_map and wire it. */
  999         kva = vm_map_min(kernel_map);
 1000         ofs = offset & PAGE_MASK;
 1001         offset = trunc_page(offset);
 1002         size = round_page(size + ofs);
 1003         rv = vm_map_find(kernel_map, obj, offset, &kva, size, 0,
 1004             VMFS_OPTIMAL_SPACE, VM_PROT_READ | VM_PROT_WRITE,
 1005             VM_PROT_READ | VM_PROT_WRITE, 0);
 1006         if (rv == KERN_SUCCESS) {
 1007                 rv = vm_map_wire(kernel_map, kva, kva + size,
 1008                     VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES);
 1009                 if (rv == KERN_SUCCESS) {
 1010                         *memp = (void *)(kva + ofs);
 1011                         return (0);
 1012                 }
 1013                 vm_map_remove(kernel_map, kva, kva + size);
 1014         } else
 1015                 vm_object_deallocate(obj);
 1016 
 1017         /* On failure, drop our mapping reference. */
 1018         VM_OBJECT_WLOCK(obj);
 1019         shmfd->shm_kmappings--;
 1020         VM_OBJECT_WUNLOCK(obj);
 1021 
 1022         return (vm_mmap_to_errno(rv));
 1023 }
 1024 
 1025 /*
 1026  * We require the caller to unmap the entire entry.  This allows us to
 1027  * safely decrement shm_kmappings when a mapping is removed.
 1028  */
 1029 int
 1030 shm_unmap(struct file *fp, void *mem, size_t size)
 1031 {
 1032         struct shmfd *shmfd;
 1033         vm_map_entry_t entry;
 1034         vm_offset_t kva, ofs;
 1035         vm_object_t obj;
 1036         vm_pindex_t pindex;
 1037         vm_prot_t prot;
 1038         boolean_t wired;
 1039         vm_map_t map;
 1040         int rv;
 1041 
 1042         if (fp->f_type != DTYPE_SHM)
 1043                 return (EINVAL);
 1044         shmfd = fp->f_data;
 1045         kva = (vm_offset_t)mem;
 1046         ofs = kva & PAGE_MASK;
 1047         kva = trunc_page(kva);
 1048         size = round_page(size + ofs);
 1049         map = kernel_map;
 1050         rv = vm_map_lookup(&map, kva, VM_PROT_READ | VM_PROT_WRITE, &entry,
 1051             &obj, &pindex, &prot, &wired);
 1052         if (rv != KERN_SUCCESS)
 1053                 return (EINVAL);
 1054         if (entry->start != kva || entry->end != kva + size) {
 1055                 vm_map_lookup_done(map, entry);
 1056                 return (EINVAL);
 1057         }
 1058         vm_map_lookup_done(map, entry);
 1059         if (obj != shmfd->shm_object)
 1060                 return (EINVAL);
 1061         vm_map_remove(map, kva, kva + size);
 1062         VM_OBJECT_WLOCK(obj);
 1063         KASSERT(shmfd->shm_kmappings > 0, ("shm_unmap: object not mapped"));
 1064         shmfd->shm_kmappings--;
 1065         VM_OBJECT_WUNLOCK(obj);
 1066         return (0);
 1067 }
 1068 
 1069 static int
 1070 shm_fill_kinfo(struct file *fp, struct kinfo_file *kif, struct filedesc *fdp)
 1071 {
 1072         const char *path, *pr_path;
 1073         struct shmfd *shmfd;
 1074         size_t pr_pathlen;
 1075 
 1076         kif->kf_type = KF_TYPE_SHM;
 1077         shmfd = fp->f_data;
 1078 
 1079         mtx_lock(&shm_timestamp_lock);
 1080         kif->kf_un.kf_file.kf_file_mode = S_IFREG | shmfd->shm_mode;    /* XXX */
 1081         mtx_unlock(&shm_timestamp_lock);
 1082         kif->kf_un.kf_file.kf_file_size = shmfd->shm_size;
 1083         if (shmfd->shm_path != NULL) {
 1084                 sx_slock(&shm_dict_lock);
 1085                 if (shmfd->shm_path != NULL) {
 1086                         path = shmfd->shm_path;
 1087                         pr_path = curthread->td_ucred->cr_prison->pr_path;
 1088                         if (strcmp(pr_path, "/") != 0) {
 1089                                 /* Return the jail-rooted pathname. */
 1090                                 pr_pathlen = strlen(pr_path);
 1091                                 if (strncmp(path, pr_path, pr_pathlen) == 0 &&
 1092                                     path[pr_pathlen] == '/')
 1093                                         path += pr_pathlen;
 1094                         }
 1095                         strlcpy(kif->kf_path, path, sizeof(kif->kf_path));
 1096                 }
 1097                 sx_sunlock(&shm_dict_lock);
 1098         }
 1099         return (0);
 1100 }

Cache object: 3eae3bf1a2fc534a4a029b894b097a39


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.