The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/kern/sysv_shm.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*      $NetBSD: sysv_shm.c,v 1.113.2.1 2009/01/14 17:54:21 snj Exp $   */
    2 
    3 /*-
    4  * Copyright (c) 1999, 2007 The NetBSD Foundation, Inc.
    5  * All rights reserved.
    6  *
    7  * This code is derived from software contributed to The NetBSD Foundation
    8  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
    9  * NASA Ames Research Center, and by Mindaugas Rasiukevicius.
   10  *
   11  * Redistribution and use in source and binary forms, with or without
   12  * modification, are permitted provided that the following conditions
   13  * are met:
   14  * 1. Redistributions of source code must retain the above copyright
   15  *    notice, this list of conditions and the following disclaimer.
   16  * 2. Redistributions in binary form must reproduce the above copyright
   17  *    notice, this list of conditions and the following disclaimer in the
   18  *    documentation and/or other materials provided with the distribution.
   19  *
   20  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
   21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
   22  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
   23  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
   24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
   25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
   26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
   27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
   28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
   29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
   30  * POSSIBILITY OF SUCH DAMAGE.
   31  */
   32 
   33 /*
   34  * Copyright (c) 1994 Adam Glass and Charles M. Hannum.  All rights reserved.
   35  *
   36  * Redistribution and use in source and binary forms, with or without
   37  * modification, are permitted provided that the following conditions
   38  * are met:
   39  * 1. Redistributions of source code must retain the above copyright
   40  *    notice, this list of conditions and the following disclaimer.
   41  * 2. Redistributions in binary form must reproduce the above copyright
   42  *    notice, this list of conditions and the following disclaimer in the
   43  *    documentation and/or other materials provided with the distribution.
   44  * 3. All advertising materials mentioning features or use of this software
   45  *    must display the following acknowledgement:
   46  *      This product includes software developed by Adam Glass and Charles M.
   47  *      Hannum.
   48  * 4. The names of the authors may not be used to endorse or promote products
   49  *    derived from this software without specific prior written permission.
   50  *
   51  * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
   52  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
   53  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
   54  * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
   55  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
   56  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
   57  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
   58  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
   59  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
   60  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
   61  */
   62 
   63 #include <sys/cdefs.h>
   64 __KERNEL_RCSID(0, "$NetBSD: sysv_shm.c,v 1.113.2.1 2009/01/14 17:54:21 snj Exp $");
   65 
   66 #define SYSVSHM
   67 
   68 #include <sys/param.h>
   69 #include <sys/kernel.h>
   70 #include <sys/kmem.h>
   71 #include <sys/shm.h>
   72 #include <sys/mutex.h>
   73 #include <sys/mman.h>
   74 #include <sys/stat.h>
   75 #include <sys/sysctl.h>
   76 #include <sys/mount.h>          /* XXX for <sys/syscallargs.h> */
   77 #include <sys/syscallargs.h>
   78 #include <sys/queue.h>
   79 #include <sys/pool.h>
   80 #include <sys/kauth.h>
   81 
   82 #include <uvm/uvm_extern.h>
   83 #include <uvm/uvm_object.h>
   84 
   85 int shm_nused;
   86 struct  shmid_ds *shmsegs;
   87 
   88 struct shmmap_entry {
   89         SLIST_ENTRY(shmmap_entry) next;
   90         vaddr_t va;
   91         int shmid;
   92 };
   93 
   94 static kmutex_t         shm_lock;
   95 static kcondvar_t *     shm_cv;
   96 static struct pool      shmmap_entry_pool;
   97 static int              shm_last_free, shm_use_phys;
   98 static size_t           shm_committed;
   99 
  100 static kcondvar_t       shm_realloc_cv;
  101 static bool             shm_realloc_state;
  102 static u_int            shm_realloc_disable;
  103 
  104 struct shmmap_state {
  105         unsigned int nitems;
  106         unsigned int nrefs;
  107         SLIST_HEAD(, shmmap_entry) entries;
  108 };
  109 
  110 #ifdef SHMDEBUG
  111 #define SHMPRINTF(a) printf a
  112 #else
  113 #define SHMPRINTF(a)
  114 #endif
  115 
  116 static int shmrealloc(int);
  117 
  118 /*
  119  * Find the shared memory segment by the identifier.
  120  *  => must be called with shm_lock held;
  121  */
  122 static struct shmid_ds *
  123 shm_find_segment_by_shmid(int shmid)
  124 {
  125         int segnum;
  126         struct shmid_ds *shmseg;
  127 
  128         KASSERT(mutex_owned(&shm_lock));
  129 
  130         segnum = IPCID_TO_IX(shmid);
  131         if (segnum < 0 || segnum >= shminfo.shmmni)
  132                 return NULL;
  133         shmseg = &shmsegs[segnum];
  134         if ((shmseg->shm_perm.mode & SHMSEG_ALLOCATED) == 0)
  135                 return NULL;
  136         if ((shmseg->shm_perm.mode &
  137             (SHMSEG_REMOVED|SHMSEG_RMLINGER)) == SHMSEG_REMOVED)
  138                 return NULL;
  139         if (shmseg->shm_perm._seq != IPCID_TO_SEQ(shmid))
  140                 return NULL;
  141 
  142         return shmseg;
  143 }
  144 
  145 /*
  146  * Free memory segment.
  147  *  => must be called with shm_lock held;
  148  */
  149 static void
  150 shm_free_segment(int segnum)
  151 {
  152         struct shmid_ds *shmseg;
  153         size_t size;
  154         bool wanted;
  155 
  156         KASSERT(mutex_owned(&shm_lock));
  157 
  158         shmseg = &shmsegs[segnum];
  159         SHMPRINTF(("shm freeing key 0x%lx seq 0x%x\n",
  160             shmseg->shm_perm._key, shmseg->shm_perm._seq));
  161 
  162         size = (shmseg->shm_segsz + PGOFSET) & ~PGOFSET;
  163         wanted = (shmseg->shm_perm.mode & SHMSEG_WANTED);
  164 
  165         shmseg->_shm_internal = NULL;
  166         shm_committed -= btoc(size);
  167         shm_nused--;
  168         shmseg->shm_perm.mode = SHMSEG_FREE;
  169         shm_last_free = segnum;
  170         if (wanted == true)
  171                 cv_broadcast(&shm_cv[segnum]);
  172 }
  173 
  174 /*
  175  * Delete entry from the shm map.
  176  *  => must be called with shm_lock held;
  177  */
  178 static struct uvm_object *
  179 shm_delete_mapping(struct shmmap_state *shmmap_s,
  180     struct shmmap_entry *shmmap_se)
  181 {
  182         struct uvm_object *uobj = NULL;
  183         struct shmid_ds *shmseg;
  184         int segnum;
  185 
  186         KASSERT(mutex_owned(&shm_lock));
  187 
  188         segnum = IPCID_TO_IX(shmmap_se->shmid);
  189         shmseg = &shmsegs[segnum];
  190         SLIST_REMOVE(&shmmap_s->entries, shmmap_se, shmmap_entry, next);
  191         shmmap_s->nitems--;
  192         shmseg->shm_dtime = time_second;
  193         if ((--shmseg->shm_nattch <= 0) &&
  194             (shmseg->shm_perm.mode & SHMSEG_REMOVED)) {
  195                 uobj = shmseg->_shm_internal;
  196                 shm_free_segment(segnum);
  197         }
  198 
  199         return uobj;
  200 }
  201 
  202 /*
  203  * Get a non-shared shm map for that vmspace.  Note, that memory
  204  * allocation might be performed with lock held.
  205  */
  206 static struct shmmap_state *
  207 shmmap_getprivate(struct proc *p)
  208 {
  209         struct shmmap_state *oshmmap_s, *shmmap_s;
  210         struct shmmap_entry *oshmmap_se, *shmmap_se;
  211 
  212         KASSERT(mutex_owned(&shm_lock));
  213 
  214         /* 1. A shm map with refcnt = 1, used by ourselves, thus return */
  215         oshmmap_s = (struct shmmap_state *)p->p_vmspace->vm_shm;
  216         if (oshmmap_s && oshmmap_s->nrefs == 1)
  217                 return oshmmap_s;
  218 
  219         /* 2. No shm map preset - create a fresh one */
  220         shmmap_s = kmem_zalloc(sizeof(struct shmmap_state), KM_SLEEP);
  221         shmmap_s->nrefs = 1;
  222         SLIST_INIT(&shmmap_s->entries);
  223         p->p_vmspace->vm_shm = (void *)shmmap_s;
  224 
  225         if (oshmmap_s == NULL)
  226                 return shmmap_s;
  227 
  228         SHMPRINTF(("shmmap_getprivate: vm %p split (%d entries), was used by %d\n",
  229             p->p_vmspace, oshmmap_s->nitems, oshmmap_s->nrefs));
  230 
  231         /* 3. A shared shm map, copy to a fresh one and adjust refcounts */
  232         SLIST_FOREACH(oshmmap_se, &oshmmap_s->entries, next) {
  233                 shmmap_se = pool_get(&shmmap_entry_pool, PR_WAITOK);
  234                 shmmap_se->va = oshmmap_se->va;
  235                 shmmap_se->shmid = oshmmap_se->shmid;
  236                 SLIST_INSERT_HEAD(&shmmap_s->entries, shmmap_se, next);
  237         }
  238         shmmap_s->nitems = oshmmap_s->nitems;
  239         oshmmap_s->nrefs--;
  240 
  241         return shmmap_s;
  242 }
  243 
  244 /*
  245  * Lock/unlock the memory.
  246  *  => must be called with shm_lock held;
  247  *  => called from one place, thus, inline;
  248  */
  249 static inline int
  250 shm_memlock(struct lwp *l, struct shmid_ds *shmseg, int shmid, int cmd)
  251 {
  252         struct proc *p = l->l_proc;
  253         struct shmmap_entry *shmmap_se;
  254         struct shmmap_state *shmmap_s;
  255         size_t size;
  256         int error;
  257 
  258         KASSERT(mutex_owned(&shm_lock));
  259         shmmap_s = shmmap_getprivate(p);
  260 
  261         /* Find our shared memory address by shmid */
  262         SLIST_FOREACH(shmmap_se, &shmmap_s->entries, next) {
  263                 if (shmmap_se->shmid != shmid)
  264                         continue;
  265 
  266                 size = (shmseg->shm_segsz + PGOFSET) & ~PGOFSET;
  267 
  268                 if (cmd == SHM_LOCK &&
  269                     (shmseg->shm_perm.mode & SHMSEG_WIRED) == 0) {
  270                         /* Wire the object and map, then tag it */
  271                         error = uobj_wirepages(shmseg->_shm_internal, 0, size);
  272                         if (error)
  273                                 return EIO;
  274                         error = uvm_map_pageable(&p->p_vmspace->vm_map,
  275                             shmmap_se->va, shmmap_se->va + size, false, 0);
  276                         if (error) {
  277                                 uobj_unwirepages(shmseg->_shm_internal, 0, size);
  278                                 if (error == EFAULT)
  279                                         error = ENOMEM;
  280                                 return error;
  281                         }
  282                         shmseg->shm_perm.mode |= SHMSEG_WIRED;
  283 
  284                 } else if (cmd == SHM_UNLOCK &&
  285                     (shmseg->shm_perm.mode & SHMSEG_WIRED) != 0) {
  286                         /* Unwire the object and map, then untag it */
  287                         uobj_unwirepages(shmseg->_shm_internal, 0, size);
  288                         error = uvm_map_pageable(&p->p_vmspace->vm_map,
  289                             shmmap_se->va, shmmap_se->va + size, true, 0);
  290                         if (error)
  291                                 return EIO;
  292                         shmseg->shm_perm.mode &= ~SHMSEG_WIRED;
  293                 }
  294         }
  295 
  296         return 0;
  297 }
  298 
  299 /*
  300  * Unmap shared memory.
  301  */
  302 int
  303 sys_shmdt(struct lwp *l, const struct sys_shmdt_args *uap, register_t *retval)
  304 {
  305         /* {
  306                 syscallarg(const void *) shmaddr;
  307         } */
  308         struct proc *p = l->l_proc;
  309         struct shmmap_state *shmmap_s1, *shmmap_s;
  310         struct shmmap_entry *shmmap_se;
  311         struct uvm_object *uobj;
  312         struct shmid_ds *shmseg;
  313         size_t size;
  314 
  315         mutex_enter(&shm_lock);
  316         /* In case of reallocation, we will wait for completion */
  317         while (__predict_false(shm_realloc_state))
  318                 cv_wait(&shm_realloc_cv, &shm_lock);
  319 
  320         shmmap_s1 = (struct shmmap_state *)p->p_vmspace->vm_shm;
  321         if (shmmap_s1 == NULL) {
  322                 mutex_exit(&shm_lock);
  323                 return EINVAL;
  324         }
  325 
  326         /* Find the map entry */
  327         SLIST_FOREACH(shmmap_se, &shmmap_s1->entries, next)
  328                 if (shmmap_se->va == (vaddr_t)SCARG(uap, shmaddr))
  329                         break;
  330         if (shmmap_se == NULL) {
  331                 mutex_exit(&shm_lock);
  332                 return EINVAL;
  333         }
  334 
  335         shmmap_s = shmmap_getprivate(p);
  336         if (shmmap_s != shmmap_s1) {
  337                 /* Map has been copied, lookup entry in new map */
  338                 SLIST_FOREACH(shmmap_se, &shmmap_s->entries, next)
  339                         if (shmmap_se->va == (vaddr_t)SCARG(uap, shmaddr))
  340                                 break;
  341                 if (shmmap_se == NULL) {
  342                         mutex_exit(&shm_lock);
  343                         return EINVAL;
  344                 }
  345         }
  346 
  347         SHMPRINTF(("shmdt: vm %p: remove %d @%lx\n",
  348             p->p_vmspace, shmmap_se->shmid, shmmap_se->va));
  349 
  350         /* Delete the entry from shm map */
  351         uobj = shm_delete_mapping(shmmap_s, shmmap_se);
  352         shmseg = &shmsegs[IPCID_TO_IX(shmmap_se->shmid)];
  353         size = (shmseg->shm_segsz + PGOFSET) & ~PGOFSET;
  354         mutex_exit(&shm_lock);
  355 
  356         uvm_deallocate(&p->p_vmspace->vm_map, shmmap_se->va, size);
  357         if (uobj != NULL)
  358                 uao_detach(uobj);
  359         pool_put(&shmmap_entry_pool, shmmap_se);
  360 
  361         return 0;
  362 }
  363 
  364 /*
  365  * Map shared memory.
  366  */
  367 int
  368 sys_shmat(struct lwp *l, const struct sys_shmat_args *uap, register_t *retval)
  369 {
  370         /* {
  371                 syscallarg(int) shmid;
  372                 syscallarg(const void *) shmaddr;
  373                 syscallarg(int) shmflg;
  374         } */
  375         int error, flags = 0;
  376         struct proc *p = l->l_proc;
  377         kauth_cred_t cred = l->l_cred;
  378         struct shmid_ds *shmseg;
  379         struct shmmap_state *shmmap_s;
  380         struct shmmap_entry *shmmap_se;
  381         struct uvm_object *uobj;
  382         struct vmspace *vm;
  383         vaddr_t attach_va;
  384         vm_prot_t prot;
  385         vsize_t size;
  386 
  387         /* Allocate a new map entry and set it */
  388         shmmap_se = pool_get(&shmmap_entry_pool, PR_WAITOK);
  389         shmmap_se->shmid = SCARG(uap, shmid);
  390 
  391         mutex_enter(&shm_lock);
  392         /* In case of reallocation, we will wait for completion */
  393         while (__predict_false(shm_realloc_state))
  394                 cv_wait(&shm_realloc_cv, &shm_lock);
  395 
  396         shmseg = shm_find_segment_by_shmid(SCARG(uap, shmid));
  397         if (shmseg == NULL) {
  398                 error = EINVAL;
  399                 goto err;
  400         }
  401         error = ipcperm(cred, &shmseg->shm_perm,
  402             (SCARG(uap, shmflg) & SHM_RDONLY) ? IPC_R : IPC_R|IPC_W);
  403         if (error)
  404                 goto err;
  405 
  406         vm = p->p_vmspace;
  407         shmmap_s = (struct shmmap_state *)vm->vm_shm;
  408         if (shmmap_s && shmmap_s->nitems >= shminfo.shmseg) {
  409                 error = EMFILE;
  410                 goto err;
  411         }
  412 
  413         size = (shmseg->shm_segsz + PGOFSET) & ~PGOFSET;
  414         prot = VM_PROT_READ;
  415         if ((SCARG(uap, shmflg) & SHM_RDONLY) == 0)
  416                 prot |= VM_PROT_WRITE;
  417         if (SCARG(uap, shmaddr)) {
  418                 flags |= UVM_FLAG_FIXED;
  419                 if (SCARG(uap, shmflg) & SHM_RND)
  420                         attach_va =
  421                             (vaddr_t)SCARG(uap, shmaddr) & ~(SHMLBA-1);
  422                 else if (((vaddr_t)SCARG(uap, shmaddr) & (SHMLBA-1)) == 0)
  423                         attach_va = (vaddr_t)SCARG(uap, shmaddr);
  424                 else {
  425                         error = EINVAL;
  426                         goto err;
  427                 }
  428         } else {
  429                 /* This is just a hint to uvm_map() about where to put it. */
  430                 attach_va = p->p_emul->e_vm_default_addr(p,
  431                     (vaddr_t)vm->vm_daddr, size);
  432         }
  433 
  434         /*
  435          * Create a map entry, add it to the list and increase the counters.
  436          * The lock will be dropped before the mapping, disable reallocation.
  437          */
  438         shmmap_s = shmmap_getprivate(p);
  439         SLIST_INSERT_HEAD(&shmmap_s->entries, shmmap_se, next);
  440         shmmap_s->nitems++;
  441         shmseg->shm_lpid = p->p_pid;
  442         shmseg->shm_nattch++;
  443         shm_realloc_disable++;
  444         mutex_exit(&shm_lock);
  445 
  446         /*
  447          * Add a reference to the memory object, map it to the
  448          * address space, and lock the memory, if needed.
  449          */
  450         uobj = shmseg->_shm_internal;
  451         uao_reference(uobj);
  452         error = uvm_map(&vm->vm_map, &attach_va, size, uobj, 0, 0,
  453             UVM_MAPFLAG(prot, prot, UVM_INH_SHARE, UVM_ADV_RANDOM, flags));
  454         if (error)
  455                 goto err_detach;
  456         if (shm_use_phys || (shmseg->shm_perm.mode & SHMSEG_WIRED)) {
  457                 error = uvm_map_pageable(&vm->vm_map, attach_va,
  458                     attach_va + size, false, 0);
  459                 if (error) {
  460                         if (error == EFAULT)
  461                                 error = ENOMEM;
  462                         uvm_deallocate(&vm->vm_map, attach_va, size);
  463                         goto err_detach;
  464                 }
  465         }
  466 
  467         /* Set the new address, and update the time */
  468         mutex_enter(&shm_lock);
  469         shmmap_se->va = attach_va;
  470         shmseg->shm_atime = time_second;
  471         shm_realloc_disable--;
  472         retval[0] = attach_va;
  473         SHMPRINTF(("shmat: vm %p: add %d @%lx\n",
  474             p->p_vmspace, shmmap_se->shmid, attach_va));
  475 err:
  476         cv_broadcast(&shm_realloc_cv);
  477         mutex_exit(&shm_lock);
  478         if (error && shmmap_se)
  479                 pool_put(&shmmap_entry_pool, shmmap_se);
  480         return error;
  481 
  482 err_detach:
  483         uao_detach(uobj);
  484         mutex_enter(&shm_lock);
  485         uobj = shm_delete_mapping(shmmap_s, shmmap_se);
  486         shm_realloc_disable--;
  487         cv_broadcast(&shm_realloc_cv);
  488         mutex_exit(&shm_lock);
  489         if (uobj != NULL)
  490                 uao_detach(uobj);
  491         pool_put(&shmmap_entry_pool, shmmap_se);
  492         return error;
  493 }
  494 
  495 /*
  496  * Shared memory control operations.
  497  */
  498 int
  499 sys___shmctl13(struct lwp *l, const struct sys___shmctl13_args *uap, register_t *retval)
  500 {
  501         /* {
  502                 syscallarg(int) shmid;
  503                 syscallarg(int) cmd;
  504                 syscallarg(struct shmid_ds *) buf;
  505         } */
  506         struct shmid_ds shmbuf;
  507         int cmd, error;
  508 
  509         cmd = SCARG(uap, cmd);
  510         if (cmd == IPC_SET) {
  511                 error = copyin(SCARG(uap, buf), &shmbuf, sizeof(shmbuf));
  512                 if (error)
  513                         return error;
  514         }
  515 
  516         error = shmctl1(l, SCARG(uap, shmid), cmd,
  517             (cmd == IPC_SET || cmd == IPC_STAT) ? &shmbuf : NULL);
  518 
  519         if (error == 0 && cmd == IPC_STAT)
  520                 error = copyout(&shmbuf, SCARG(uap, buf), sizeof(shmbuf));
  521 
  522         return error;
  523 }
  524 
  525 int
  526 shmctl1(struct lwp *l, int shmid, int cmd, struct shmid_ds *shmbuf)
  527 {
  528         struct uvm_object *uobj = NULL;
  529         kauth_cred_t cred = l->l_cred;
  530         struct shmid_ds *shmseg;
  531         int error = 0;
  532 
  533         mutex_enter(&shm_lock);
  534         /* In case of reallocation, we will wait for completion */
  535         while (__predict_false(shm_realloc_state))
  536                 cv_wait(&shm_realloc_cv, &shm_lock);
  537 
  538         shmseg = shm_find_segment_by_shmid(shmid);
  539         if (shmseg == NULL) {
  540                 mutex_exit(&shm_lock);
  541                 return EINVAL;
  542         }
  543 
  544         switch (cmd) {
  545         case IPC_STAT:
  546                 if ((error = ipcperm(cred, &shmseg->shm_perm, IPC_R)) != 0)
  547                         break;
  548                 memcpy(shmbuf, shmseg, sizeof(struct shmid_ds));
  549                 break;
  550         case IPC_SET:
  551                 if ((error = ipcperm(cred, &shmseg->shm_perm, IPC_M)) != 0)
  552                         break;
  553                 shmseg->shm_perm.uid = shmbuf->shm_perm.uid;
  554                 shmseg->shm_perm.gid = shmbuf->shm_perm.gid;
  555                 shmseg->shm_perm.mode =
  556                     (shmseg->shm_perm.mode & ~ACCESSPERMS) |
  557                     (shmbuf->shm_perm.mode & ACCESSPERMS);
  558                 shmseg->shm_ctime = time_second;
  559                 break;
  560         case IPC_RMID:
  561                 if ((error = ipcperm(cred, &shmseg->shm_perm, IPC_M)) != 0)
  562                         break;
  563                 shmseg->shm_perm._key = IPC_PRIVATE;
  564                 shmseg->shm_perm.mode |= SHMSEG_REMOVED;
  565                 if (shmseg->shm_nattch <= 0) {
  566                         uobj = shmseg->_shm_internal;
  567                         shm_free_segment(IPCID_TO_IX(shmid));
  568                 }
  569                 break;
  570         case SHM_LOCK:
  571         case SHM_UNLOCK:
  572                 if ((error = kauth_authorize_generic(cred,
  573                     KAUTH_GENERIC_ISSUSER, NULL)) != 0)
  574                         break;
  575                 error = shm_memlock(l, shmseg, shmid, cmd);
  576                 break;
  577         default:
  578                 error = EINVAL;
  579         }
  580 
  581         mutex_exit(&shm_lock);
  582         if (uobj != NULL)
  583                 uao_detach(uobj);
  584         return error;
  585 }
  586 
  587 /*
  588  * Try to take an already existing segment.
  589  *  => must be called with shm_lock held;
  590  *  => called from one place, thus, inline;
  591  */
  592 static inline int
  593 shmget_existing(struct lwp *l, const struct sys_shmget_args *uap, int mode,
  594     register_t *retval)
  595 {
  596         struct shmid_ds *shmseg;
  597         kauth_cred_t cred = l->l_cred;
  598         int segnum, error;
  599 again:
  600         KASSERT(mutex_owned(&shm_lock));
  601 
  602         /* Find segment by key */
  603         for (segnum = 0; segnum < shminfo.shmmni; segnum++)
  604                 if ((shmsegs[segnum].shm_perm.mode & SHMSEG_ALLOCATED) &&
  605                     shmsegs[segnum].shm_perm._key == SCARG(uap, key))
  606                         break;
  607         if (segnum == shminfo.shmmni) {
  608                 /* Not found */
  609                 return -1;
  610         }
  611 
  612         shmseg = &shmsegs[segnum];
  613         if (shmseg->shm_perm.mode & SHMSEG_REMOVED) {
  614                 /*
  615                  * This segment is in the process of being allocated.  Wait
  616                  * until it's done, and look the key up again (in case the
  617                  * allocation failed or it was freed).
  618                  */
  619                 shmseg->shm_perm.mode |= SHMSEG_WANTED;
  620                 error = cv_wait_sig(&shm_cv[segnum], &shm_lock);
  621                 if (error)
  622                         return error;
  623                 goto again;
  624         }
  625 
  626         /*
  627          * First check the flags, to generate a useful error when a
  628          * segment already exists.
  629          */
  630         if ((SCARG(uap, shmflg) & (IPC_CREAT | IPC_EXCL)) ==
  631             (IPC_CREAT | IPC_EXCL))
  632                 return EEXIST;
  633 
  634         /* Check the permission and segment size. */
  635         error = ipcperm(cred, &shmseg->shm_perm, mode);
  636         if (error)
  637                 return error;
  638         if (SCARG(uap, size) && SCARG(uap, size) > shmseg->shm_segsz)
  639                 return EINVAL;
  640 
  641         *retval = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm);
  642         return 0;
  643 }
  644 
  645 int
  646 sys_shmget(struct lwp *l, const struct sys_shmget_args *uap, register_t *retval)
  647 {
  648         /* {
  649                 syscallarg(key_t) key;
  650                 syscallarg(size_t) size;
  651                 syscallarg(int) shmflg;
  652         } */
  653         struct shmid_ds *shmseg;
  654         kauth_cred_t cred = l->l_cred;
  655         key_t key = SCARG(uap, key);
  656         size_t size;
  657         int error, mode, segnum;
  658         bool lockmem;
  659 
  660         mode = SCARG(uap, shmflg) & ACCESSPERMS;
  661         if (SCARG(uap, shmflg) & _SHM_RMLINGER)
  662                 mode |= SHMSEG_RMLINGER;
  663 
  664         SHMPRINTF(("shmget: key 0x%lx size 0x%x shmflg 0x%x mode 0x%x\n",
  665             SCARG(uap, key), SCARG(uap, size), SCARG(uap, shmflg), mode));
  666 
  667         mutex_enter(&shm_lock);
  668         /* In case of reallocation, we will wait for completion */
  669         while (__predict_false(shm_realloc_state))
  670                 cv_wait(&shm_realloc_cv, &shm_lock);
  671 
  672         if (key != IPC_PRIVATE) {
  673                 error = shmget_existing(l, uap, mode, retval);
  674                 if (error != -1) {
  675                         mutex_exit(&shm_lock);
  676                         return error;
  677                 }
  678                 if ((SCARG(uap, shmflg) & IPC_CREAT) == 0) {
  679                         mutex_exit(&shm_lock);
  680                         return ENOENT;
  681                 }
  682         }
  683         error = 0;
  684 
  685         /*
  686          * Check the for the limits.
  687          */
  688         size = SCARG(uap, size);
  689         if (size < shminfo.shmmin || size > shminfo.shmmax) {
  690                 mutex_exit(&shm_lock);
  691                 return EINVAL;
  692         }
  693         if (shm_nused >= shminfo.shmmni) {
  694                 mutex_exit(&shm_lock);
  695                 return ENOSPC;
  696         }
  697         size = (size + PGOFSET) & ~PGOFSET;
  698         if (shm_committed + btoc(size) > shminfo.shmall) {
  699                 mutex_exit(&shm_lock);
  700                 return ENOMEM;
  701         }
  702 
  703         /* Find the first available segment */
  704         if (shm_last_free < 0) {
  705                 for (segnum = 0; segnum < shminfo.shmmni; segnum++)
  706                         if (shmsegs[segnum].shm_perm.mode & SHMSEG_FREE)
  707                                 break;
  708                 KASSERT(segnum < shminfo.shmmni);
  709         } else {
  710                 segnum = shm_last_free;
  711                 shm_last_free = -1;
  712         }
  713 
  714         /*
  715          * Initialize the segment.
  716          * We will drop the lock while allocating the memory, thus mark the
  717          * segment present, but removed, that no other thread could take it.
  718          * Also, disable reallocation, while lock is dropped.
  719          */
  720         shmseg = &shmsegs[segnum];
  721         shmseg->shm_perm.mode = SHMSEG_ALLOCATED | SHMSEG_REMOVED;
  722         shm_committed += btoc(size);
  723         shm_nused++;
  724         lockmem = shm_use_phys;
  725         shm_realloc_disable++;
  726         mutex_exit(&shm_lock);
  727 
  728         /* Allocate the memory object and lock it if needed */
  729         shmseg->_shm_internal = uao_create(size, 0);
  730         if (lockmem) {
  731                 /* Wire the pages and tag it */
  732                 error = uobj_wirepages(shmseg->_shm_internal, 0, size);
  733                 if (error) {
  734                         uao_detach(shmseg->_shm_internal);
  735                         mutex_enter(&shm_lock);
  736                         shm_free_segment(segnum);
  737                         shm_realloc_disable--;
  738                         mutex_exit(&shm_lock);
  739                         return error;
  740                 }
  741         }
  742 
  743         /*
  744          * Please note, while segment is marked, there are no need to hold the
  745          * lock, while setting it (except shm_perm.mode).
  746          */
  747         shmseg->shm_perm._key = SCARG(uap, key);
  748         shmseg->shm_perm._seq = (shmseg->shm_perm._seq + 1) & 0x7fff;
  749         *retval = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm);
  750 
  751         shmseg->shm_perm.cuid = shmseg->shm_perm.uid = kauth_cred_geteuid(cred);
  752         shmseg->shm_perm.cgid = shmseg->shm_perm.gid = kauth_cred_getegid(cred);
  753         shmseg->shm_segsz = SCARG(uap, size);
  754         shmseg->shm_cpid = l->l_proc->p_pid;
  755         shmseg->shm_lpid = shmseg->shm_nattch = 0;
  756         shmseg->shm_atime = shmseg->shm_dtime = 0;
  757         shmseg->shm_ctime = time_second;
  758 
  759         /*
  760          * Segment is initialized.
  761          * Enter the lock, mark as allocated, and notify waiters (if any).
  762          * Also, unmark the state of reallocation.
  763          */
  764         mutex_enter(&shm_lock);
  765         shmseg->shm_perm.mode = (shmseg->shm_perm.mode & SHMSEG_WANTED) |
  766             (mode & (ACCESSPERMS | SHMSEG_RMLINGER)) |
  767             SHMSEG_ALLOCATED | (lockmem ? SHMSEG_WIRED : 0);
  768         if (shmseg->shm_perm.mode & SHMSEG_WANTED) {
  769                 shmseg->shm_perm.mode &= ~SHMSEG_WANTED;
  770                 cv_broadcast(&shm_cv[segnum]);
  771         }
  772         shm_realloc_disable--;
  773         cv_broadcast(&shm_realloc_cv);
  774         mutex_exit(&shm_lock);
  775 
  776         return error;
  777 }
  778 
  779 void
  780 shmfork(struct vmspace *vm1, struct vmspace *vm2)
  781 {
  782         struct shmmap_state *shmmap_s;
  783         struct shmmap_entry *shmmap_se;
  784 
  785         SHMPRINTF(("shmfork %p->%p\n", vm1, vm2));
  786         mutex_enter(&shm_lock);
  787         vm2->vm_shm = vm1->vm_shm;
  788         if (vm1->vm_shm) {
  789                 shmmap_s = (struct shmmap_state *)vm1->vm_shm;
  790                 SLIST_FOREACH(shmmap_se, &shmmap_s->entries, next)
  791                         shmsegs[IPCID_TO_IX(shmmap_se->shmid)].shm_nattch++;
  792                 shmmap_s->nrefs++;
  793         }
  794         mutex_exit(&shm_lock);
  795 }
  796 
  797 void
  798 shmexit(struct vmspace *vm)
  799 {
  800         struct shmmap_state *shmmap_s;
  801         struct shmmap_entry *shmmap_se;
  802         struct uvm_object **uobj;
  803         size_t *size;
  804         u_int i, n;
  805 
  806         SLIST_HEAD(, shmmap_entry) tmp_entries;
  807 
  808         mutex_enter(&shm_lock);
  809         shmmap_s = (struct shmmap_state *)vm->vm_shm;
  810         if (shmmap_s == NULL) {
  811                 mutex_exit(&shm_lock);
  812                 return;
  813         }
  814 
  815         vm->vm_shm = NULL;
  816 
  817         if (--shmmap_s->nrefs > 0) {
  818                 SHMPRINTF(("shmexit: vm %p drop ref (%d entries), refs = %d\n",
  819                     vm, shmmap_s->nitems, shmmap_s->nrefs));
  820                 SLIST_FOREACH(shmmap_se, &shmmap_s->entries, next)
  821                         shmsegs[IPCID_TO_IX(shmmap_se->shmid)].shm_nattch--;
  822                 mutex_exit(&shm_lock);
  823                 return;
  824         }
  825 
  826         KASSERT(shmmap_s->nrefs == 0);
  827         n = shmmap_s->nitems;
  828         SHMPRINTF(("shmexit: vm %p cleanup (%d entries)\n", vm, n));
  829         mutex_exit(&shm_lock);
  830         if (n == 0) {
  831                 kmem_free(shmmap_s, sizeof(struct shmmap_state));
  832                 return;
  833         }
  834 
  835         /* Allocate the arrays */
  836         SLIST_INIT(&tmp_entries);
  837         uobj = kmem_zalloc(n * sizeof(void *), KM_SLEEP);
  838         size = kmem_zalloc(n * sizeof(size_t), KM_SLEEP);
  839 
  840         /* Delete the entry from shm map */
  841         i = 0;
  842         mutex_enter(&shm_lock);
  843         while (!SLIST_EMPTY(&shmmap_s->entries)) {
  844                 struct shmid_ds *shmseg;
  845 
  846                 shmmap_se = SLIST_FIRST(&shmmap_s->entries);
  847                 shmseg = &shmsegs[IPCID_TO_IX(shmmap_se->shmid)];
  848                 size[i] = (shmseg->shm_segsz + PGOFSET) & ~PGOFSET;
  849                 uobj[i] = shm_delete_mapping(shmmap_s, shmmap_se);
  850                 SLIST_INSERT_HEAD(&tmp_entries, shmmap_se, next);
  851                 i++;
  852         }
  853         mutex_exit(&shm_lock);
  854 
  855         /* Unmap all segments, free the entries */
  856         i = 0;
  857         while (!SLIST_EMPTY(&tmp_entries)) {
  858                 KASSERT(i < n);
  859                 shmmap_se = SLIST_FIRST(&tmp_entries);
  860                 SLIST_REMOVE(&tmp_entries, shmmap_se, shmmap_entry, next);
  861                 uvm_deallocate(&vm->vm_map, shmmap_se->va, size[i]);
  862                 if (uobj[i] != NULL)
  863                         uao_detach(uobj[i]);
  864                 pool_put(&shmmap_entry_pool, shmmap_se);
  865                 i++;
  866         }
  867 
  868         kmem_free(uobj, n * sizeof(void *));
  869         kmem_free(size, n * sizeof(size_t));
  870         kmem_free(shmmap_s, sizeof(struct shmmap_state));
  871 }
  872 
  873 static int
  874 shmrealloc(int newshmni)
  875 {
  876         vaddr_t v;
  877         struct shmid_ds *oldshmsegs, *newshmsegs;
  878         kcondvar_t *newshm_cv, *oldshm_cv;
  879         size_t sz;
  880         int i, lsegid, oldshmni;
  881 
  882         if (newshmni < 1)
  883                 return EINVAL;
  884 
  885         /* Allocate new memory area */
  886         sz = ALIGN(newshmni * sizeof(struct shmid_ds)) +
  887             ALIGN(newshmni * sizeof(kcondvar_t));
  888         v = uvm_km_alloc(kernel_map, round_page(sz), 0,
  889             UVM_KMF_WIRED|UVM_KMF_ZERO);
  890         if (v == 0)
  891                 return ENOMEM;
  892 
  893         mutex_enter(&shm_lock);
  894         while (shm_realloc_state || shm_realloc_disable)
  895                 cv_wait(&shm_realloc_cv, &shm_lock);
  896 
  897         /*
  898          * Get the number of last segment.  Fail we are trying to
  899          * reallocate less memory than we use.
  900          */
  901         lsegid = 0;
  902         for (i = 0; i < shminfo.shmmni; i++)
  903                 if ((shmsegs[i].shm_perm.mode & SHMSEG_FREE) == 0)
  904                         lsegid = i;
  905         if (lsegid >= newshmni) {
  906                 mutex_exit(&shm_lock);
  907                 uvm_km_free(kernel_map, v, sz, UVM_KMF_WIRED);
  908                 return EBUSY;
  909         }
  910         shm_realloc_state = true;
  911 
  912         newshmsegs = (void *)v;
  913         newshm_cv = (void *)((uintptr_t)newshmsegs +
  914             ALIGN(newshmni * sizeof(struct shmid_ds)));
  915 
  916         /* Copy all memory to the new area */
  917         for (i = 0; i < shm_nused; i++)
  918                 (void)memcpy(&newshmsegs[i], &shmsegs[i],
  919                     sizeof(newshmsegs[0]));
  920 
  921         /* Mark as free all new segments, if there is any */
  922         for (; i < newshmni; i++) {
  923                 cv_init(&newshm_cv[i], "shmwait");
  924                 newshmsegs[i].shm_perm.mode = SHMSEG_FREE;
  925                 newshmsegs[i].shm_perm._seq = 0;
  926         }
  927 
  928         oldshmsegs = shmsegs;
  929         oldshmni = shminfo.shmmni;
  930         shminfo.shmmni = newshmni;
  931         shmsegs = newshmsegs;
  932         shm_cv = newshm_cv;
  933 
  934         /* Reallocation completed - notify all waiters, if any */
  935         shm_realloc_state = false;
  936         cv_broadcast(&shm_realloc_cv);
  937         mutex_exit(&shm_lock);
  938 
  939         /* Release now unused resources. */
  940         oldshm_cv = (void *)((uintptr_t)oldshmsegs +
  941             ALIGN(oldshmni * sizeof(struct shmid_ds)));
  942         for (i = 0; i < oldshmni; i++)
  943                 cv_destroy(&oldshm_cv[i]);
  944 
  945         sz = ALIGN(oldshmni * sizeof(struct shmid_ds)) +
  946             ALIGN(oldshmni * sizeof(kcondvar_t));
  947         uvm_km_free(kernel_map, (vaddr_t)oldshmsegs, sz, UVM_KMF_WIRED);
  948 
  949         return 0;
  950 }
  951 
  952 void
  953 shminit(void)
  954 {
  955         vaddr_t v;
  956         size_t sz;
  957         int i;
  958 
  959         mutex_init(&shm_lock, MUTEX_DEFAULT, IPL_NONE);
  960         pool_init(&shmmap_entry_pool, sizeof(struct shmmap_entry), 0, 0, 0,
  961             "shmmp", &pool_allocator_nointr, IPL_NONE);
  962         cv_init(&shm_realloc_cv, "shmrealc");
  963 
  964         /* Allocate the wired memory for our structures */
  965         sz = ALIGN(shminfo.shmmni * sizeof(struct shmid_ds)) +
  966             ALIGN(shminfo.shmmni * sizeof(kcondvar_t));
  967         v = uvm_km_alloc(kernel_map, round_page(sz), 0,
  968             UVM_KMF_WIRED|UVM_KMF_ZERO);
  969         if (v == 0)
  970                 panic("sysv_shm: cannot allocate memory");
  971         shmsegs = (void *)v;
  972         shm_cv = (void *)((uintptr_t)shmsegs +
  973             ALIGN(shminfo.shmmni * sizeof(struct shmid_ds)));
  974 
  975         shminfo.shmmax *= PAGE_SIZE;
  976 
  977         for (i = 0; i < shminfo.shmmni; i++) {
  978                 cv_init(&shm_cv[i], "shmwait");
  979                 shmsegs[i].shm_perm.mode = SHMSEG_FREE;
  980                 shmsegs[i].shm_perm._seq = 0;
  981         }
  982         shm_last_free = 0;
  983         shm_nused = 0;
  984         shm_committed = 0;
  985         shm_realloc_disable = 0;
  986         shm_realloc_state = false;
  987 }
  988 
  989 static int
  990 sysctl_ipc_shmmni(SYSCTLFN_ARGS)
  991 {
  992         int newsize, error;
  993         struct sysctlnode node;
  994         node = *rnode;
  995         node.sysctl_data = &newsize;
  996 
  997         newsize = shminfo.shmmni;
  998         error = sysctl_lookup(SYSCTLFN_CALL(&node));
  999         if (error || newp == NULL)
 1000                 return error;
 1001 
 1002         sysctl_unlock();
 1003         error = shmrealloc(newsize);
 1004         sysctl_relock();
 1005         return error;
 1006 }
 1007 
 1008 static int
 1009 sysctl_ipc_shmmaxpgs(SYSCTLFN_ARGS)
 1010 {
 1011         uint32_t newsize;
 1012         int error;
 1013         struct sysctlnode node;
 1014         node = *rnode;
 1015         node.sysctl_data = &newsize;
 1016 
 1017         newsize = shminfo.shmall;
 1018         error = sysctl_lookup(SYSCTLFN_CALL(&node));
 1019         if (error || newp == NULL)
 1020                 return error;
 1021 
 1022         if (newsize < 1)
 1023                 return EINVAL;
 1024 
 1025         shminfo.shmall = newsize;
 1026         shminfo.shmmax = (uint64_t)shminfo.shmall * PAGE_SIZE;
 1027 
 1028         return 0;
 1029 }
 1030 
 1031 static int
 1032 sysctl_ipc_shmmax(SYSCTLFN_ARGS)
 1033 {
 1034         uint64_t newsize;
 1035         int error;
 1036         struct sysctlnode node;
 1037         node = *rnode;
 1038         node.sysctl_data = &newsize;
 1039 
 1040         newsize = shminfo.shmmax;
 1041         error = sysctl_lookup(SYSCTLFN_CALL(&node));
 1042         if (error || newp == NULL)
 1043                 return error;
 1044 
 1045         if (newsize < PAGE_SIZE)
 1046                 return EINVAL;
 1047 
 1048         shminfo.shmmax = round_page(newsize);
 1049         shminfo.shmall = shminfo.shmmax >> PAGE_SHIFT;
 1050 
 1051         return 0;
 1052 }
 1053 
 1054 SYSCTL_SETUP(sysctl_ipc_shm_setup, "sysctl kern.ipc subtree setup")
 1055 {
 1056 
 1057         sysctl_createv(clog, 0, NULL, NULL,
 1058                 CTLFLAG_PERMANENT,
 1059                 CTLTYPE_NODE, "kern", NULL,
 1060                 NULL, 0, NULL, 0,
 1061                 CTL_KERN, CTL_EOL);
 1062         sysctl_createv(clog, 0, NULL, NULL,
 1063                 CTLFLAG_PERMANENT,
 1064                 CTLTYPE_NODE, "ipc",
 1065                 SYSCTL_DESCR("SysV IPC options"),
 1066                 NULL, 0, NULL, 0,
 1067                 CTL_KERN, KERN_SYSVIPC, CTL_EOL);
 1068         sysctl_createv(clog, 0, NULL, NULL,
 1069                 CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
 1070                 CTLTYPE_QUAD, "shmmax",
 1071                 SYSCTL_DESCR("Max shared memory segment size in bytes"),
 1072                 sysctl_ipc_shmmax, 0, &shminfo.shmmax, 0,
 1073                 CTL_KERN, KERN_SYSVIPC, KERN_SYSVIPC_SHMMAX, CTL_EOL);
 1074         sysctl_createv(clog, 0, NULL, NULL,
 1075                 CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
 1076                 CTLTYPE_INT, "shmmni",
 1077                 SYSCTL_DESCR("Max number of shared memory identifiers"),
 1078                 sysctl_ipc_shmmni, 0, &shminfo.shmmni, 0,
 1079                 CTL_KERN, KERN_SYSVIPC, KERN_SYSVIPC_SHMMNI, CTL_EOL);
 1080         sysctl_createv(clog, 0, NULL, NULL,
 1081                 CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
 1082                 CTLTYPE_INT, "shmseg",
 1083                 SYSCTL_DESCR("Max shared memory segments per process"),
 1084                 NULL, 0, &shminfo.shmseg, 0,
 1085                 CTL_KERN, KERN_SYSVIPC, KERN_SYSVIPC_SHMSEG, CTL_EOL);
 1086         sysctl_createv(clog, 0, NULL, NULL,
 1087                 CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
 1088                 CTLTYPE_INT, "shmmaxpgs",
 1089                 SYSCTL_DESCR("Max amount of shared memory in pages"),
 1090                 sysctl_ipc_shmmaxpgs, 0, &shminfo.shmall, 0,
 1091                 CTL_KERN, KERN_SYSVIPC, KERN_SYSVIPC_SHMMAXPGS, CTL_EOL);
 1092         sysctl_createv(clog, 0, NULL, NULL,
 1093                 CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
 1094                 CTLTYPE_INT, "shm_use_phys",
 1095                 SYSCTL_DESCR("Enable/disable locking of shared memory in "
 1096                     "physical memory"), NULL, 0, &shm_use_phys, 0,
 1097                 CTL_KERN, KERN_SYSVIPC, KERN_SYSVIPC_SHMUSEPHYS, CTL_EOL);
 1098 }

Cache object: bffc84db751d014734d940108081b207


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.