The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/kern/sysv_shm.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*      $NetBSD: sysv_shm.c,v 1.23 1994/07/04 23:25:12 glass Exp $      */
    2 /*
    3  * Copyright (c) 1994 Adam Glass and Charles Hannum.  All rights reserved.
    4  *
    5  * Redistribution and use in source and binary forms, with or without
    6  * modification, are permitted provided that the following conditions
    7  * are met:
    8  * 1. Redistributions of source code must retain the above copyright
    9  *    notice, this list of conditions and the following disclaimer.
   10  * 2. Redistributions in binary form must reproduce the above copyright
   11  *    notice, this list of conditions and the following disclaimer in the
   12  *    documentation and/or other materials provided with the distribution.
   13  * 3. All advertising materials mentioning features or use of this software
   14  *    must display the following acknowledgement:
   15  *      This product includes software developed by Adam Glass and Charles
   16  *      Hannum.
   17  * 4. The names of the authors may not be used to endorse or promote products
   18  *    derived from this software without specific prior written permission.
   19  *
   20  * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
   21  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
   22  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
   23  * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
   24  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
   25  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
   26  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
   27  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
   28  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
   29  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
   30  */
   31 
   32 #include <sys/cdefs.h>
   33 __FBSDID("$FreeBSD: releng/5.2/sys/kern/sysv_shm.c 125489 2004-02-05 18:01:26Z nectar $");
   34 
   35 #include "opt_compat.h"
   36 #include "opt_sysvipc.h"
   37 
   38 #include <sys/param.h>
   39 #include <sys/systm.h>
   40 #include <sys/kernel.h>
   41 #include <sys/lock.h>
   42 #include <sys/sysctl.h>
   43 #include <sys/shm.h>
   44 #include <sys/proc.h>
   45 #include <sys/malloc.h>
   46 #include <sys/mman.h>
   47 #include <sys/mutex.h>
   48 #include <sys/stat.h>
   49 #include <sys/syscall.h>
   50 #include <sys/syscallsubr.h>
   51 #include <sys/sysent.h>
   52 #include <sys/sysproto.h>
   53 #include <sys/jail.h>
   54 
   55 #include <vm/vm.h>
   56 #include <vm/vm_param.h>
   57 #include <vm/pmap.h>
   58 #include <vm/vm_object.h>
   59 #include <vm/vm_map.h>
   60 #include <vm/vm_page.h>
   61 #include <vm/vm_pager.h>
   62 
   63 static MALLOC_DEFINE(M_SHM, "shm", "SVID compatible shared memory segments");
   64 
   65 struct oshmctl_args;
   66 static int oshmctl(struct thread *td, struct oshmctl_args *uap);
   67 
   68 static int shmget_allocate_segment(struct thread *td,
   69     struct shmget_args *uap, int mode);
   70 static int shmget_existing(struct thread *td, struct shmget_args *uap,
   71     int mode, int segnum);
   72 
   73 /* XXX casting to (sy_call_t *) is bogus, as usual. */
   74 static sy_call_t *shmcalls[] = {
   75         (sy_call_t *)shmat, (sy_call_t *)oshmctl,
   76         (sy_call_t *)shmdt, (sy_call_t *)shmget,
   77         (sy_call_t *)shmctl
   78 };
   79 
   80 #define SHMSEG_FREE             0x0200
   81 #define SHMSEG_REMOVED          0x0400
   82 #define SHMSEG_ALLOCATED        0x0800
   83 #define SHMSEG_WANTED           0x1000
   84 
   85 static int shm_last_free, shm_nused, shm_committed, shmalloced;
   86 static struct shmid_ds  *shmsegs;
   87 
   88 struct shm_handle {
   89         /* vm_offset_t kva; */
   90         vm_object_t shm_object;
   91 };
   92 
   93 struct shmmap_state {
   94         vm_offset_t va;
   95         int shmid;
   96 };
   97 
   98 static void shm_deallocate_segment(struct shmid_ds *);
   99 static int shm_find_segment_by_key(key_t);
  100 static struct shmid_ds *shm_find_segment_by_shmid(int);
  101 static struct shmid_ds *shm_find_segment_by_shmidx(int);
  102 static int shm_delete_mapping(struct vmspace *vm, struct shmmap_state *);
  103 static void shmrealloc(void);
  104 static void shminit(void);
  105 static int sysvshm_modload(struct module *, int, void *);
  106 static int shmunload(void);
  107 static void shmexit_myhook(struct vmspace *vm);
  108 static void shmfork_myhook(struct proc *p1, struct proc *p2);
  109 static int sysctl_shmsegs(SYSCTL_HANDLER_ARGS);
  110 
  111 /*
  112  * Tuneable values.
  113  */
  114 #ifndef SHMMAXPGS
  115 #define SHMMAXPGS       8192    /* Note: sysv shared memory is swap backed. */
  116 #endif
  117 #ifndef SHMMAX
  118 #define SHMMAX  (SHMMAXPGS*PAGE_SIZE)
  119 #endif
  120 #ifndef SHMMIN
  121 #define SHMMIN  1
  122 #endif
  123 #ifndef SHMMNI
  124 #define SHMMNI  192
  125 #endif
  126 #ifndef SHMSEG
  127 #define SHMSEG  128
  128 #endif
  129 #ifndef SHMALL
  130 #define SHMALL  (SHMMAXPGS)
  131 #endif
  132 
  133 struct  shminfo shminfo = {
  134         SHMMAX,
  135         SHMMIN,
  136         SHMMNI,
  137         SHMSEG,
  138         SHMALL
  139 };
  140 
  141 static int shm_use_phys;
  142 static int shm_allow_removed;
  143 
  144 SYSCTL_DECL(_kern_ipc);
  145 SYSCTL_INT(_kern_ipc, OID_AUTO, shmmax, CTLFLAG_RW, &shminfo.shmmax, 0, "");
  146 SYSCTL_INT(_kern_ipc, OID_AUTO, shmmin, CTLFLAG_RW, &shminfo.shmmin, 0, "");
  147 SYSCTL_INT(_kern_ipc, OID_AUTO, shmmni, CTLFLAG_RDTUN, &shminfo.shmmni, 0, "");
  148 SYSCTL_INT(_kern_ipc, OID_AUTO, shmseg, CTLFLAG_RDTUN, &shminfo.shmseg, 0, "");
  149 SYSCTL_INT(_kern_ipc, OID_AUTO, shmall, CTLFLAG_RW, &shminfo.shmall, 0, "");
  150 SYSCTL_INT(_kern_ipc, OID_AUTO, shm_use_phys, CTLFLAG_RW,
  151     &shm_use_phys, 0, "");
  152 SYSCTL_INT(_kern_ipc, OID_AUTO, shm_allow_removed, CTLFLAG_RW,
  153     &shm_allow_removed, 0, "");
  154 SYSCTL_PROC(_kern_ipc, OID_AUTO, shmsegs, CTLFLAG_RD,
  155     NULL, 0, sysctl_shmsegs, "", "");
  156 
  157 static int
  158 shm_find_segment_by_key(key)
  159         key_t key;
  160 {
  161         int i;
  162 
  163         for (i = 0; i < shmalloced; i++)
  164                 if ((shmsegs[i].shm_perm.mode & SHMSEG_ALLOCATED) &&
  165                     shmsegs[i].shm_perm.key == key)
  166                         return (i);
  167         return (-1);
  168 }
  169 
  170 static struct shmid_ds *
  171 shm_find_segment_by_shmid(int shmid)
  172 {
  173         int segnum;
  174         struct shmid_ds *shmseg;
  175 
  176         segnum = IPCID_TO_IX(shmid);
  177         if (segnum < 0 || segnum >= shmalloced)
  178                 return (NULL);
  179         shmseg = &shmsegs[segnum];
  180         if ((shmseg->shm_perm.mode & SHMSEG_ALLOCATED) == 0 ||
  181             (!shm_allow_removed &&
  182              (shmseg->shm_perm.mode & SHMSEG_REMOVED) != 0) ||
  183             shmseg->shm_perm.seq != IPCID_TO_SEQ(shmid))
  184                 return (NULL);
  185         return (shmseg);
  186 }
  187 
  188 static struct shmid_ds *
  189 shm_find_segment_by_shmidx(int segnum)
  190 {
  191         struct shmid_ds *shmseg;
  192 
  193         if (segnum < 0 || segnum >= shmalloced)
  194                 return (NULL);
  195         shmseg = &shmsegs[segnum];
  196         if ((shmseg->shm_perm.mode & SHMSEG_ALLOCATED) == 0 ||
  197             (!shm_allow_removed &&
  198              (shmseg->shm_perm.mode & SHMSEG_REMOVED) != 0))
  199                 return (NULL);
  200         return (shmseg);
  201 }
  202 
  203 static void
  204 shm_deallocate_segment(shmseg)
  205         struct shmid_ds *shmseg;
  206 {
  207         struct shm_handle *shm_handle;
  208         size_t size;
  209 
  210         GIANT_REQUIRED;
  211 
  212         shm_handle = shmseg->shm_internal;
  213         vm_object_deallocate(shm_handle->shm_object);
  214         free(shm_handle, M_SHM);
  215         shmseg->shm_internal = NULL;
  216         size = round_page(shmseg->shm_segsz);
  217         shm_committed -= btoc(size);
  218         shm_nused--;
  219         shmseg->shm_perm.mode = SHMSEG_FREE;
  220 }
  221 
  222 static int
  223 shm_delete_mapping(struct vmspace *vm, struct shmmap_state *shmmap_s)
  224 {
  225         struct shmid_ds *shmseg;
  226         int segnum, result;
  227         size_t size;
  228 
  229         GIANT_REQUIRED;
  230 
  231         segnum = IPCID_TO_IX(shmmap_s->shmid);
  232         shmseg = &shmsegs[segnum];
  233         size = round_page(shmseg->shm_segsz);
  234         result = vm_map_remove(&vm->vm_map, shmmap_s->va, shmmap_s->va + size);
  235         if (result != KERN_SUCCESS)
  236                 return (EINVAL);
  237         shmmap_s->shmid = -1;
  238         shmseg->shm_dtime = time_second;
  239         if ((--shmseg->shm_nattch <= 0) &&
  240             (shmseg->shm_perm.mode & SHMSEG_REMOVED)) {
  241                 shm_deallocate_segment(shmseg);
  242                 shm_last_free = segnum;
  243         }
  244         return (0);
  245 }
  246 
  247 #ifndef _SYS_SYSPROTO_H_
  248 struct shmdt_args {
  249         const void *shmaddr;
  250 };
  251 #endif
  252 
  253 /*
  254  * MPSAFE
  255  */
  256 int
  257 shmdt(td, uap)
  258         struct thread *td;
  259         struct shmdt_args *uap;
  260 {
  261         struct proc *p = td->td_proc;
  262         struct shmmap_state *shmmap_s;
  263         int i;
  264         int error = 0;
  265 
  266         if (!jail_sysvipc_allowed && jailed(td->td_ucred))
  267                 return (ENOSYS);
  268         mtx_lock(&Giant);
  269         shmmap_s = p->p_vmspace->vm_shm;
  270         if (shmmap_s == NULL) {
  271                 error = EINVAL;
  272                 goto done2;
  273         }
  274         for (i = 0; i < shminfo.shmseg; i++, shmmap_s++) {
  275                 if (shmmap_s->shmid != -1 &&
  276                     shmmap_s->va == (vm_offset_t)uap->shmaddr) {
  277                         break;
  278                 }
  279         }
  280         if (i == shminfo.shmseg) {
  281                 error = EINVAL;
  282                 goto done2;
  283         }
  284         error = shm_delete_mapping(p->p_vmspace, shmmap_s);
  285 done2:
  286         mtx_unlock(&Giant);
  287         return (error);
  288 }
  289 
  290 #ifndef _SYS_SYSPROTO_H_
  291 struct shmat_args {
  292         int shmid;
  293         const void *shmaddr;
  294         int shmflg;
  295 };
  296 #endif
  297 
  298 /*
  299  * MPSAFE
  300  */
  301 int
  302 kern_shmat(td, shmid, shmaddr, shmflg)
  303         struct thread *td;
  304         int shmid;
  305         const void *shmaddr;
  306         int shmflg;
  307 {
  308         struct proc *p = td->td_proc;
  309         int i, flags;
  310         struct shmid_ds *shmseg;
  311         struct shmmap_state *shmmap_s = NULL;
  312         struct shm_handle *shm_handle;
  313         vm_offset_t attach_va;
  314         vm_prot_t prot;
  315         vm_size_t size;
  316         int rv;
  317         int error = 0;
  318 
  319         if (!jail_sysvipc_allowed && jailed(td->td_ucred))
  320                 return (ENOSYS);
  321         mtx_lock(&Giant);
  322         shmmap_s = p->p_vmspace->vm_shm;
  323         if (shmmap_s == NULL) {
  324                 size = shminfo.shmseg * sizeof(struct shmmap_state);
  325                 shmmap_s = malloc(size, M_SHM, M_WAITOK);
  326                 for (i = 0; i < shminfo.shmseg; i++)
  327                         shmmap_s[i].shmid = -1;
  328                 p->p_vmspace->vm_shm = shmmap_s;
  329         }
  330         shmseg = shm_find_segment_by_shmid(shmid);
  331         if (shmseg == NULL) {
  332                 error = EINVAL;
  333                 goto done2;
  334         }
  335         error = ipcperm(td, &shmseg->shm_perm,
  336             (shmflg & SHM_RDONLY) ? IPC_R : IPC_R|IPC_W);
  337         if (error)
  338                 goto done2;
  339         for (i = 0; i < shminfo.shmseg; i++) {
  340                 if (shmmap_s->shmid == -1)
  341                         break;
  342                 shmmap_s++;
  343         }
  344         if (i >= shminfo.shmseg) {
  345                 error = EMFILE;
  346                 goto done2;
  347         }
  348         size = round_page(shmseg->shm_segsz);
  349 #ifdef VM_PROT_READ_IS_EXEC
  350         prot = VM_PROT_READ | VM_PROT_EXECUTE;
  351 #else
  352         prot = VM_PROT_READ;
  353 #endif
  354         if ((shmflg & SHM_RDONLY) == 0)
  355                 prot |= VM_PROT_WRITE;
  356         flags = MAP_ANON | MAP_SHARED;
  357         if (shmaddr) {
  358                 flags |= MAP_FIXED;
  359                 if (shmflg & SHM_RND) {
  360                         attach_va = (vm_offset_t)shmaddr & ~(SHMLBA-1);
  361                 } else if (((vm_offset_t)shmaddr & (SHMLBA-1)) == 0) {
  362                         attach_va = (vm_offset_t)shmaddr;
  363                 } else {
  364                         error = EINVAL;
  365                         goto done2;
  366                 }
  367         } else {
  368                 /*
  369                  * This is just a hint to vm_map_find() about where to
  370                  * put it.
  371                  */
  372                 attach_va = round_page((vm_offset_t)p->p_vmspace->vm_taddr
  373                     + maxtsiz + maxdsiz);
  374         }
  375 
  376         shm_handle = shmseg->shm_internal;
  377         vm_object_reference(shm_handle->shm_object);
  378         rv = vm_map_find(&p->p_vmspace->vm_map, shm_handle->shm_object,
  379                 0, &attach_va, size, (flags & MAP_FIXED)?0:1, prot, prot, 0);
  380         if (rv != KERN_SUCCESS) {
  381                 vm_object_deallocate(shm_handle->shm_object);
  382                 error = ENOMEM;
  383                 goto done2;
  384         }
  385         vm_map_inherit(&p->p_vmspace->vm_map,
  386                 attach_va, attach_va + size, VM_INHERIT_SHARE);
  387 
  388         shmmap_s->va = attach_va;
  389         shmmap_s->shmid = shmid;
  390         shmseg->shm_lpid = p->p_pid;
  391         shmseg->shm_atime = time_second;
  392         shmseg->shm_nattch++;
  393         td->td_retval[0] = attach_va;
  394 done2:
  395         mtx_unlock(&Giant);
  396         return (error);
  397 }
  398 
  399 int
  400 shmat(td, uap)
  401         struct thread *td;
  402         struct shmat_args *uap;
  403 {
  404         return kern_shmat(td, uap->shmid, uap->shmaddr, uap->shmflg);
  405 }
  406 
  407 struct oshmid_ds {
  408         struct  ipc_perm shm_perm;      /* operation perms */
  409         int     shm_segsz;              /* size of segment (bytes) */
  410         u_short shm_cpid;               /* pid, creator */
  411         u_short shm_lpid;               /* pid, last operation */
  412         short   shm_nattch;             /* no. of current attaches */
  413         time_t  shm_atime;              /* last attach time */
  414         time_t  shm_dtime;              /* last detach time */
  415         time_t  shm_ctime;              /* last change time */
  416         void    *shm_handle;            /* internal handle for shm segment */
  417 };
  418 
  419 struct oshmctl_args {
  420         int shmid;
  421         int cmd;
  422         struct oshmid_ds *ubuf;
  423 };
  424 
  425 /*
  426  * MPSAFE
  427  */
  428 static int
  429 oshmctl(td, uap)
  430         struct thread *td;
  431         struct oshmctl_args *uap;
  432 {
  433 #ifdef COMPAT_43
  434         int error = 0;
  435         struct shmid_ds *shmseg;
  436         struct oshmid_ds outbuf;
  437 
  438         if (!jail_sysvipc_allowed && jailed(td->td_ucred))
  439                 return (ENOSYS);
  440         mtx_lock(&Giant);
  441         shmseg = shm_find_segment_by_shmid(uap->shmid);
  442         if (shmseg == NULL) {
  443                 error = EINVAL;
  444                 goto done2;
  445         }
  446         switch (uap->cmd) {
  447         case IPC_STAT:
  448                 error = ipcperm(td, &shmseg->shm_perm, IPC_R);
  449                 if (error)
  450                         goto done2;
  451                 outbuf.shm_perm = shmseg->shm_perm;
  452                 outbuf.shm_segsz = shmseg->shm_segsz;
  453                 outbuf.shm_cpid = shmseg->shm_cpid;
  454                 outbuf.shm_lpid = shmseg->shm_lpid;
  455                 outbuf.shm_nattch = shmseg->shm_nattch;
  456                 outbuf.shm_atime = shmseg->shm_atime;
  457                 outbuf.shm_dtime = shmseg->shm_dtime;
  458                 outbuf.shm_ctime = shmseg->shm_ctime;
  459                 outbuf.shm_handle = shmseg->shm_internal;
  460                 error = copyout(&outbuf, uap->ubuf, sizeof(outbuf));
  461                 if (error)
  462                         goto done2;
  463                 break;
  464         default:
  465                 /* XXX casting to (sy_call_t *) is bogus, as usual. */
  466                 error = ((sy_call_t *)shmctl)(td, uap);
  467                 break;
  468         }
  469 done2:
  470         mtx_unlock(&Giant);
  471         return (error);
  472 #else
  473         return (EINVAL);
  474 #endif
  475 }
  476 
  477 #ifndef _SYS_SYSPROTO_H_
  478 struct shmctl_args {
  479         int shmid;
  480         int cmd;
  481         struct shmid_ds *buf;
  482 };
  483 #endif
  484 
  485 /*
  486  * MPSAFE
  487  */
  488 int
  489 kern_shmctl(td, shmid, cmd, buf, bufsz)
  490         struct thread *td;
  491         int shmid;
  492         int cmd;
  493         void *buf;
  494         size_t *bufsz;
  495 {
  496         int error = 0;
  497         struct shmid_ds *shmseg;
  498 
  499         if (!jail_sysvipc_allowed && jailed(td->td_ucred))
  500                 return (ENOSYS);
  501 
  502         mtx_lock(&Giant);
  503         switch (cmd) {
  504         case IPC_INFO:
  505                 memcpy(buf, &shminfo, sizeof(shminfo));
  506                 if (bufsz)
  507                         *bufsz = sizeof(shminfo);
  508                 td->td_retval[0] = shmalloced;
  509                 goto done2;
  510         case SHM_INFO: {
  511                 struct shm_info shm_info;
  512                 shm_info.used_ids = shm_nused;
  513                 shm_info.shm_rss = 0;   /*XXX where to get from ? */
  514                 shm_info.shm_tot = 0;   /*XXX where to get from ? */
  515                 shm_info.shm_swp = 0;   /*XXX where to get from ? */
  516                 shm_info.swap_attempts = 0;     /*XXX where to get from ? */
  517                 shm_info.swap_successes = 0;    /*XXX where to get from ? */
  518                 memcpy(buf, &shm_info, sizeof(shm_info));
  519                 if (bufsz)
  520                         *bufsz = sizeof(shm_info);
  521                 td->td_retval[0] = shmalloced;
  522                 goto done2;
  523         }
  524         }
  525         if (cmd == SHM_STAT)
  526                 shmseg = shm_find_segment_by_shmidx(shmid);
  527         else
  528                 shmseg = shm_find_segment_by_shmid(shmid);
  529         if (shmseg == NULL) {
  530                 error = EINVAL;
  531                 goto done2;
  532         }
  533         switch (cmd) {
  534         case SHM_STAT:
  535         case IPC_STAT:
  536                 error = ipcperm(td, &shmseg->shm_perm, IPC_R);
  537                 if (error)
  538                         goto done2;
  539                 memcpy(buf, shmseg, sizeof(struct shmid_ds));
  540                 if (bufsz)
  541                         *bufsz = sizeof(struct shmid_ds);
  542                 if (cmd == SHM_STAT)
  543                         td->td_retval[0] = IXSEQ_TO_IPCID(shmid, shmseg->shm_perm);
  544                 break;
  545         case IPC_SET: {
  546                 struct shmid_ds *shmid;
  547 
  548                 shmid = (struct shmid_ds *)buf;
  549                 error = ipcperm(td, &shmseg->shm_perm, IPC_M);
  550                 if (error)
  551                         goto done2;
  552                 shmseg->shm_perm.uid = shmid->shm_perm.uid;
  553                 shmseg->shm_perm.gid = shmid->shm_perm.gid;
  554                 shmseg->shm_perm.mode =
  555                     (shmseg->shm_perm.mode & ~ACCESSPERMS) |
  556                     (shmid->shm_perm.mode & ACCESSPERMS);
  557                 shmseg->shm_ctime = time_second;
  558                 break;
  559         }
  560         case IPC_RMID:
  561                 error = ipcperm(td, &shmseg->shm_perm, IPC_M);
  562                 if (error)
  563                         goto done2;
  564                 shmseg->shm_perm.key = IPC_PRIVATE;
  565                 shmseg->shm_perm.mode |= SHMSEG_REMOVED;
  566                 if (shmseg->shm_nattch <= 0) {
  567                         shm_deallocate_segment(shmseg);
  568                         shm_last_free = IPCID_TO_IX(shmid);
  569                 }
  570                 break;
  571 #if 0
  572         case SHM_LOCK:
  573         case SHM_UNLOCK:
  574 #endif
  575         default:
  576                 error = EINVAL;
  577                 break;
  578         }
  579 done2:
  580         mtx_unlock(&Giant);
  581         return (error);
  582 }
  583 
  584 int
  585 shmctl(td, uap)
  586         struct thread *td;
  587         struct shmctl_args *uap;
  588 {
  589         int error = 0;
  590         struct shmid_ds buf;
  591         size_t bufsz;
  592         
  593         /* IPC_SET needs to copyin the buffer before calling kern_shmctl */
  594         if (uap->cmd == IPC_SET) {
  595                 if ((error = copyin(uap->buf, &buf, sizeof(struct shmid_ds))))
  596                         goto done;
  597         }
  598         
  599         error = kern_shmctl(td, uap->shmid, uap->cmd, (void *)&buf, &bufsz);
  600         if (error)
  601                 goto done;
  602         
  603         /* Cases in which we need to copyout */
  604         switch (uap->cmd) {
  605         case IPC_INFO:
  606         case SHM_INFO:
  607         case SHM_STAT:
  608         case IPC_STAT:
  609                 error = copyout(&buf, uap->buf, bufsz);
  610                 break;
  611         }
  612 
  613 done:
  614         if (error) {
  615                 /* Invalidate the return value */
  616                 td->td_retval[0] = -1;
  617         }
  618         return (error);
  619 }
  620 
  621 
  622 #ifndef _SYS_SYSPROTO_H_
  623 struct shmget_args {
  624         key_t key;
  625         size_t size;
  626         int shmflg;
  627 };
  628 #endif
  629 
  630 static int
  631 shmget_existing(td, uap, mode, segnum)
  632         struct thread *td;
  633         struct shmget_args *uap;
  634         int mode;
  635         int segnum;
  636 {
  637         struct shmid_ds *shmseg;
  638         int error;
  639 
  640         shmseg = &shmsegs[segnum];
  641         if (shmseg->shm_perm.mode & SHMSEG_REMOVED) {
  642                 /*
  643                  * This segment is in the process of being allocated.  Wait
  644                  * until it's done, and look the key up again (in case the
  645                  * allocation failed or it was freed).
  646                  */
  647                 shmseg->shm_perm.mode |= SHMSEG_WANTED;
  648                 error = tsleep(shmseg, PLOCK | PCATCH, "shmget", 0);
  649                 if (error)
  650                         return (error);
  651                 return (EAGAIN);
  652         }
  653         if ((uap->shmflg & (IPC_CREAT | IPC_EXCL)) == (IPC_CREAT | IPC_EXCL))
  654                 return (EEXIST);
  655         error = ipcperm(td, &shmseg->shm_perm, mode);
  656         if (error)
  657                 return (error);
  658         if (uap->size && uap->size > shmseg->shm_segsz)
  659                 return (EINVAL);
  660         td->td_retval[0] = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm);
  661         return (0);
  662 }
  663 
  664 static int
  665 shmget_allocate_segment(td, uap, mode)
  666         struct thread *td;
  667         struct shmget_args *uap;
  668         int mode;
  669 {
  670         int i, segnum, shmid, size;
  671         struct ucred *cred = td->td_ucred;
  672         struct shmid_ds *shmseg;
  673         struct shm_handle *shm_handle;
  674 
  675         GIANT_REQUIRED;
  676 
  677         if (uap->size < shminfo.shmmin || uap->size > shminfo.shmmax)
  678                 return (EINVAL);
  679         if (shm_nused >= shminfo.shmmni) /* Any shmids left? */
  680                 return (ENOSPC);
  681         size = round_page(uap->size);
  682         if (shm_committed + btoc(size) > shminfo.shmall)
  683                 return (ENOMEM);
  684         if (shm_last_free < 0) {
  685                 shmrealloc();   /* Maybe expand the shmsegs[] array. */
  686                 for (i = 0; i < shmalloced; i++)
  687                         if (shmsegs[i].shm_perm.mode & SHMSEG_FREE)
  688                                 break;
  689                 if (i == shmalloced)
  690                         return (ENOSPC);
  691                 segnum = i;
  692         } else  {
  693                 segnum = shm_last_free;
  694                 shm_last_free = -1;
  695         }
  696         shmseg = &shmsegs[segnum];
  697         /*
  698          * In case we sleep in malloc(), mark the segment present but deleted
  699          * so that noone else tries to create the same key.
  700          */
  701         shmseg->shm_perm.mode = SHMSEG_ALLOCATED | SHMSEG_REMOVED;
  702         shmseg->shm_perm.key = uap->key;
  703         shmseg->shm_perm.seq = (shmseg->shm_perm.seq + 1) & 0x7fff;
  704         shm_handle = (struct shm_handle *)
  705             malloc(sizeof(struct shm_handle), M_SHM, M_WAITOK);
  706         shmid = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm);
  707         
  708         /*
  709          * We make sure that we have allocated a pager before we need
  710          * to.
  711          */
  712         if (shm_use_phys) {
  713                 shm_handle->shm_object =
  714                     vm_pager_allocate(OBJT_PHYS, 0, size, VM_PROT_DEFAULT, 0);
  715         } else {
  716                 shm_handle->shm_object =
  717                     vm_pager_allocate(OBJT_SWAP, 0, size, VM_PROT_DEFAULT, 0);
  718         }
  719         VM_OBJECT_LOCK(shm_handle->shm_object);
  720         vm_object_clear_flag(shm_handle->shm_object, OBJ_ONEMAPPING);
  721         vm_object_set_flag(shm_handle->shm_object, OBJ_NOSPLIT);
  722         VM_OBJECT_UNLOCK(shm_handle->shm_object);
  723 
  724         shmseg->shm_internal = shm_handle;
  725         shmseg->shm_perm.cuid = shmseg->shm_perm.uid = cred->cr_uid;
  726         shmseg->shm_perm.cgid = shmseg->shm_perm.gid = cred->cr_gid;
  727         shmseg->shm_perm.mode = (shmseg->shm_perm.mode & SHMSEG_WANTED) |
  728             (mode & ACCESSPERMS) | SHMSEG_ALLOCATED;
  729         shmseg->shm_segsz = uap->size;
  730         shmseg->shm_cpid = td->td_proc->p_pid;
  731         shmseg->shm_lpid = shmseg->shm_nattch = 0;
  732         shmseg->shm_atime = shmseg->shm_dtime = 0;
  733         shmseg->shm_ctime = time_second;
  734         shm_committed += btoc(size);
  735         shm_nused++;
  736         if (shmseg->shm_perm.mode & SHMSEG_WANTED) {
  737                 /*
  738                  * Somebody else wanted this key while we were asleep.  Wake
  739                  * them up now.
  740                  */
  741                 shmseg->shm_perm.mode &= ~SHMSEG_WANTED;
  742                 wakeup(shmseg);
  743         }
  744         td->td_retval[0] = shmid;
  745         return (0);
  746 }
  747 
  748 /*
  749  * MPSAFE
  750  */
  751 int
  752 shmget(td, uap)
  753         struct thread *td;
  754         struct shmget_args *uap;
  755 {
  756         int segnum, mode;
  757         int error;
  758 
  759         if (!jail_sysvipc_allowed && jailed(td->td_ucred))
  760                 return (ENOSYS);
  761         mtx_lock(&Giant);
  762         mode = uap->shmflg & ACCESSPERMS;
  763         if (uap->key != IPC_PRIVATE) {
  764         again:
  765                 segnum = shm_find_segment_by_key(uap->key);
  766                 if (segnum >= 0) {
  767                         error = shmget_existing(td, uap, mode, segnum);
  768                         if (error == EAGAIN)
  769                                 goto again;
  770                         goto done2;
  771                 }
  772                 if ((uap->shmflg & IPC_CREAT) == 0) {
  773                         error = ENOENT;
  774                         goto done2;
  775                 }
  776         }
  777         error = shmget_allocate_segment(td, uap, mode);
  778 done2:
  779         mtx_unlock(&Giant);
  780         return (error);
  781 }
  782 
  783 /*
  784  * MPSAFE
  785  */
  786 int
  787 shmsys(td, uap)
  788         struct thread *td;
  789         /* XXX actually varargs. */
  790         struct shmsys_args /* {
  791                 int     which;
  792                 int     a2;
  793                 int     a3;
  794                 int     a4;
  795         } */ *uap;
  796 {
  797         int error;
  798 
  799         if (!jail_sysvipc_allowed && jailed(td->td_ucred))
  800                 return (ENOSYS);
  801         if (uap->which < 0 ||
  802             uap->which >= sizeof(shmcalls)/sizeof(shmcalls[0]))
  803                 return (EINVAL);
  804         mtx_lock(&Giant);
  805         error = (*shmcalls[uap->which])(td, &uap->a2);
  806         mtx_unlock(&Giant);
  807         return (error);
  808 }
  809 
  810 static void
  811 shmfork_myhook(p1, p2)
  812         struct proc *p1, *p2;
  813 {
  814         struct shmmap_state *shmmap_s;
  815         size_t size;
  816         int i;
  817 
  818         size = shminfo.shmseg * sizeof(struct shmmap_state);
  819         shmmap_s = malloc(size, M_SHM, M_WAITOK);
  820         bcopy(p1->p_vmspace->vm_shm, shmmap_s, size);
  821         p2->p_vmspace->vm_shm = shmmap_s;
  822         for (i = 0; i < shminfo.shmseg; i++, shmmap_s++)
  823                 if (shmmap_s->shmid != -1)
  824                         shmsegs[IPCID_TO_IX(shmmap_s->shmid)].shm_nattch++;
  825 }
  826 
  827 static void
  828 shmexit_myhook(struct vmspace *vm)
  829 {
  830         struct shmmap_state *base, *shm;
  831         int i;
  832 
  833         GIANT_REQUIRED;
  834 
  835         if ((base = vm->vm_shm) != NULL) {
  836                 vm->vm_shm = NULL;
  837                 for (i = 0, shm = base; i < shminfo.shmseg; i++, shm++) {
  838                         if (shm->shmid != -1)
  839                                 shm_delete_mapping(vm, shm);
  840                 }
  841                 free(base, M_SHM);
  842         }
  843 }
  844 
  845 static void
  846 shmrealloc(void)
  847 {
  848         int i;
  849         struct shmid_ds *newsegs;
  850 
  851         if (shmalloced >= shminfo.shmmni)
  852                 return;
  853 
  854         newsegs = malloc(shminfo.shmmni * sizeof(*newsegs), M_SHM, M_WAITOK);
  855         if (newsegs == NULL)
  856                 return;
  857         for (i = 0; i < shmalloced; i++)
  858                 bcopy(&shmsegs[i], &newsegs[i], sizeof(newsegs[0]));
  859         for (; i < shminfo.shmmni; i++) {
  860                 shmsegs[i].shm_perm.mode = SHMSEG_FREE;
  861                 shmsegs[i].shm_perm.seq = 0;
  862         }
  863         free(shmsegs, M_SHM);
  864         shmsegs = newsegs;
  865         shmalloced = shminfo.shmmni;
  866 }
  867 
  868 static void
  869 shminit()
  870 {
  871         int i;
  872 
  873         TUNABLE_INT_FETCH("kern.ipc.shmmaxpgs", &shminfo.shmall);
  874         for (i = PAGE_SIZE; i > 0; i--) {
  875                 shminfo.shmmax = shminfo.shmall * PAGE_SIZE;
  876                 if (shminfo.shmmax >= shminfo.shmall)
  877                         break;
  878         }
  879         TUNABLE_INT_FETCH("kern.ipc.shmmin", &shminfo.shmmin);
  880         TUNABLE_INT_FETCH("kern.ipc.shmmni", &shminfo.shmmni);
  881         TUNABLE_INT_FETCH("kern.ipc.shmseg", &shminfo.shmseg);
  882         TUNABLE_INT_FETCH("kern.ipc.shm_use_phys", &shm_use_phys);
  883 
  884         shmalloced = shminfo.shmmni;
  885         shmsegs = malloc(shmalloced * sizeof(shmsegs[0]), M_SHM, M_WAITOK);
  886         if (shmsegs == NULL)
  887                 panic("cannot allocate initial memory for sysvshm");
  888         for (i = 0; i < shmalloced; i++) {
  889                 shmsegs[i].shm_perm.mode = SHMSEG_FREE;
  890                 shmsegs[i].shm_perm.seq = 0;
  891         }
  892         shm_last_free = 0;
  893         shm_nused = 0;
  894         shm_committed = 0;
  895         shmexit_hook = &shmexit_myhook;
  896         shmfork_hook = &shmfork_myhook;
  897 }
  898 
  899 static int
  900 shmunload()
  901 {
  902 
  903         if (shm_nused > 0)
  904                 return (EBUSY);
  905 
  906         free(shmsegs, M_SHM);
  907         shmexit_hook = NULL;
  908         shmfork_hook = NULL;
  909         return (0);
  910 }
  911 
  912 static int
  913 sysctl_shmsegs(SYSCTL_HANDLER_ARGS)
  914 {
  915 
  916         return (SYSCTL_OUT(req, shmsegs, shmalloced * sizeof(shmsegs[0])));
  917 }
  918 
  919 static int
  920 sysvshm_modload(struct module *module, int cmd, void *arg)
  921 {
  922         int error = 0;
  923 
  924         switch (cmd) {
  925         case MOD_LOAD:
  926                 shminit();
  927                 break;
  928         case MOD_UNLOAD:
  929                 error = shmunload();
  930                 break;
  931         case MOD_SHUTDOWN:
  932                 break;
  933         default:
  934                 error = EINVAL;
  935                 break;
  936         }
  937         return (error);
  938 }
  939 
  940 static moduledata_t sysvshm_mod = {
  941         "sysvshm",
  942         &sysvshm_modload,
  943         NULL
  944 };
  945 
  946 SYSCALL_MODULE_HELPER(shmsys);
  947 SYSCALL_MODULE_HELPER(shmat);
  948 SYSCALL_MODULE_HELPER(shmctl);
  949 SYSCALL_MODULE_HELPER(shmdt);
  950 SYSCALL_MODULE_HELPER(shmget);
  951 
  952 DECLARE_MODULE(sysvshm, sysvshm_mod,
  953         SI_SUB_SYSV_SHM, SI_ORDER_FIRST);
  954 MODULE_VERSION(sysvshm, 1);

Cache object: 3e9d3576ebb56aa10681e65986e3da67


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.