The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/kern/sysv_shm.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*      $NetBSD: sysv_shm.c,v 1.23 1994/07/04 23:25:12 glass Exp $      */
    2 /*-
    3  * Copyright (c) 1994 Adam Glass and Charles Hannum.  All rights reserved.
    4  *
    5  * Redistribution and use in source and binary forms, with or without
    6  * modification, are permitted provided that the following conditions
    7  * are met:
    8  * 1. Redistributions of source code must retain the above copyright
    9  *    notice, this list of conditions and the following disclaimer.
   10  * 2. Redistributions in binary form must reproduce the above copyright
   11  *    notice, this list of conditions and the following disclaimer in the
   12  *    documentation and/or other materials provided with the distribution.
   13  * 3. All advertising materials mentioning features or use of this software
   14  *    must display the following acknowledgement:
   15  *      This product includes software developed by Adam Glass and Charles
   16  *      Hannum.
   17  * 4. The names of the authors may not be used to endorse or promote products
   18  *    derived from this software without specific prior written permission.
   19  *
   20  * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
   21  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
   22  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
   23  * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
   24  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
   25  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
   26  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
   27  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
   28  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
   29  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
   30  */
   31 /*-
   32  * Copyright (c) 2003-2005 McAfee, Inc.
   33  * All rights reserved.
   34  *
   35  * This software was developed for the FreeBSD Project in part by McAfee
   36  * Research, the Security Research Division of McAfee, Inc under DARPA/SPAWAR
   37  * contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA CHATS research
   38  * program.
   39  *
   40  * Redistribution and use in source and binary forms, with or without
   41  * modification, are permitted provided that the following conditions
   42  * are met:
   43  * 1. Redistributions of source code must retain the above copyright
   44  *    notice, this list of conditions and the following disclaimer.
   45  * 2. Redistributions in binary form must reproduce the above copyright
   46  *    notice, this list of conditions and the following disclaimer in the
   47  *    documentation and/or other materials provided with the distribution.
   48  *
   49  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   50  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   51  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   52  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   53  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   54  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   55  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   56  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   57  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   58  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   59  * SUCH DAMAGE.
   60  */
   61 
   62 #include <sys/cdefs.h>
   63 __FBSDID("$FreeBSD: releng/6.4/sys/kern/sysv_shm.c 174233 2007-12-03 21:47:33Z jhb $");
   64 
   65 #include "opt_compat.h"
   66 #include "opt_sysvipc.h"
   67 #include "opt_mac.h"
   68 
   69 #include <sys/param.h>
   70 #include <sys/systm.h>
   71 #include <sys/kernel.h>
   72 #include <sys/lock.h>
   73 #include <sys/sysctl.h>
   74 #include <sys/shm.h>
   75 #include <sys/proc.h>
   76 #include <sys/malloc.h>
   77 #include <sys/mman.h>
   78 #include <sys/module.h>
   79 #include <sys/mutex.h>
   80 #include <sys/resourcevar.h>
   81 #include <sys/stat.h>
   82 #include <sys/syscall.h>
   83 #include <sys/syscallsubr.h>
   84 #include <sys/sysent.h>
   85 #include <sys/sysproto.h>
   86 #include <sys/jail.h>
   87 #include <sys/mac.h>
   88 
   89 #include <vm/vm.h>
   90 #include <vm/vm_param.h>
   91 #include <vm/pmap.h>
   92 #include <vm/vm_object.h>
   93 #include <vm/vm_map.h>
   94 #include <vm/vm_page.h>
   95 #include <vm/vm_pager.h>
   96 
   97 #ifdef MAC_DEBUG
   98 #define MPRINTF(a)      printf a
   99 #else
  100 #define MPRINTF(a)      
  101 #endif
  102 
  103 static MALLOC_DEFINE(M_SHM, "shm", "SVID compatible shared memory segments");
  104 
  105 #if defined(__i386__) && (defined(COMPAT_FREEBSD4) || defined(COMPAT_43))
  106 struct oshmctl_args;
  107 static int oshmctl(struct thread *td, struct oshmctl_args *uap);
  108 #endif
  109 
  110 static int shmget_allocate_segment(struct thread *td,
  111     struct shmget_args *uap, int mode);
  112 static int shmget_existing(struct thread *td, struct shmget_args *uap,
  113     int mode, int segnum);
  114 
  115 #if defined(__i386__) && (defined(COMPAT_FREEBSD4) || defined(COMPAT_43))
  116 /* XXX casting to (sy_call_t *) is bogus, as usual. */
  117 static sy_call_t *shmcalls[] = {
  118         (sy_call_t *)shmat, (sy_call_t *)oshmctl,
  119         (sy_call_t *)shmdt, (sy_call_t *)shmget,
  120         (sy_call_t *)shmctl
  121 };
  122 #endif
  123 
  124 #define SHMSEG_FREE             0x0200
  125 #define SHMSEG_REMOVED          0x0400
  126 #define SHMSEG_ALLOCATED        0x0800
  127 #define SHMSEG_WANTED           0x1000
  128 
  129 static int shm_last_free, shm_nused, shm_committed, shmalloced;
  130 static struct shmid_kernel      *shmsegs;
  131 
  132 struct shmmap_state {
  133         vm_offset_t va;
  134         int shmid;
  135 };
  136 
  137 static void shm_deallocate_segment(struct shmid_kernel *);
  138 static int shm_find_segment_by_key(key_t);
  139 static struct shmid_kernel *shm_find_segment_by_shmid(int);
  140 static struct shmid_kernel *shm_find_segment_by_shmidx(int);
  141 static int shm_delete_mapping(struct vmspace *vm, struct shmmap_state *);
  142 static void shmrealloc(void);
  143 static void shminit(void);
  144 static int sysvshm_modload(struct module *, int, void *);
  145 static int shmunload(void);
  146 static void shmexit_myhook(struct vmspace *vm);
  147 static void shmfork_myhook(struct proc *p1, struct proc *p2);
  148 static int sysctl_shmsegs(SYSCTL_HANDLER_ARGS);
  149 
  150 /*
  151  * Tuneable values.
  152  */
  153 #ifndef SHMMAXPGS
  154 #define SHMMAXPGS       8192    /* Note: sysv shared memory is swap backed. */
  155 #endif
  156 #ifndef SHMMAX
  157 #define SHMMAX  (SHMMAXPGS*PAGE_SIZE)
  158 #endif
  159 #ifndef SHMMIN
  160 #define SHMMIN  1
  161 #endif
  162 #ifndef SHMMNI
  163 #define SHMMNI  192
  164 #endif
  165 #ifndef SHMSEG
  166 #define SHMSEG  128
  167 #endif
  168 #ifndef SHMALL
  169 #define SHMALL  (SHMMAXPGS)
  170 #endif
  171 
  172 struct  shminfo shminfo = {
  173         SHMMAX,
  174         SHMMIN,
  175         SHMMNI,
  176         SHMSEG,
  177         SHMALL
  178 };
  179 
  180 static int shm_use_phys;
  181 static int shm_allow_removed;
  182 
  183 SYSCTL_DECL(_kern_ipc);
  184 SYSCTL_INT(_kern_ipc, OID_AUTO, shmmax, CTLFLAG_RW, &shminfo.shmmax, 0,
  185     "Maximum shared memory segment size");
  186 SYSCTL_INT(_kern_ipc, OID_AUTO, shmmin, CTLFLAG_RW, &shminfo.shmmin, 0,
  187     "Minimum shared memory segment size");
  188 SYSCTL_INT(_kern_ipc, OID_AUTO, shmmni, CTLFLAG_RDTUN, &shminfo.shmmni, 0,
  189     "Number of shared memory identifiers");
  190 SYSCTL_INT(_kern_ipc, OID_AUTO, shmseg, CTLFLAG_RDTUN, &shminfo.shmseg, 0,
  191     "Number of segments per process");
  192 SYSCTL_INT(_kern_ipc, OID_AUTO, shmall, CTLFLAG_RW, &shminfo.shmall, 0,
  193     "Maximum number of pages available for shared memory");
  194 SYSCTL_INT(_kern_ipc, OID_AUTO, shm_use_phys, CTLFLAG_RW,
  195     &shm_use_phys, 0, "Enable/Disable locking of shared memory pages in core");
  196 SYSCTL_INT(_kern_ipc, OID_AUTO, shm_allow_removed, CTLFLAG_RW,
  197     &shm_allow_removed, 0,
  198     "Enable/Disable attachment to attached segments marked for removal");
  199 SYSCTL_PROC(_kern_ipc, OID_AUTO, shmsegs, CTLFLAG_RD,
  200     NULL, 0, sysctl_shmsegs, "",
  201     "Current number of shared memory segments allocated");
  202 
  203 static int
  204 shm_find_segment_by_key(key)
  205         key_t key;
  206 {
  207         int i;
  208 
  209         for (i = 0; i < shmalloced; i++)
  210                 if ((shmsegs[i].u.shm_perm.mode & SHMSEG_ALLOCATED) &&
  211                     shmsegs[i].u.shm_perm.key == key)
  212                         return (i);
  213         return (-1);
  214 }
  215 
  216 static struct shmid_kernel *
  217 shm_find_segment_by_shmid(int shmid)
  218 {
  219         int segnum;
  220         struct shmid_kernel *shmseg;
  221 
  222         segnum = IPCID_TO_IX(shmid);
  223         if (segnum < 0 || segnum >= shmalloced)
  224                 return (NULL);
  225         shmseg = &shmsegs[segnum];
  226         if ((shmseg->u.shm_perm.mode & SHMSEG_ALLOCATED) == 0 ||
  227             (!shm_allow_removed &&
  228              (shmseg->u.shm_perm.mode & SHMSEG_REMOVED) != 0) ||
  229             shmseg->u.shm_perm.seq != IPCID_TO_SEQ(shmid))
  230                 return (NULL);
  231         return (shmseg);
  232 }
  233 
  234 static struct shmid_kernel *
  235 shm_find_segment_by_shmidx(int segnum)
  236 {
  237         struct shmid_kernel *shmseg;
  238 
  239         if (segnum < 0 || segnum >= shmalloced)
  240                 return (NULL);
  241         shmseg = &shmsegs[segnum];
  242         if ((shmseg->u.shm_perm.mode & SHMSEG_ALLOCATED) == 0 ||
  243             (!shm_allow_removed &&
  244              (shmseg->u.shm_perm.mode & SHMSEG_REMOVED) != 0))
  245                 return (NULL);
  246         return (shmseg);
  247 }
  248 
  249 static void
  250 shm_deallocate_segment(shmseg)
  251         struct shmid_kernel *shmseg;
  252 {
  253         size_t size;
  254 
  255         GIANT_REQUIRED;
  256 
  257         vm_object_deallocate(shmseg->u.shm_internal);
  258         shmseg->u.shm_internal = NULL;
  259         size = round_page(shmseg->u.shm_segsz);
  260         shm_committed -= btoc(size);
  261         shm_nused--;
  262         shmseg->u.shm_perm.mode = SHMSEG_FREE;
  263 #ifdef MAC
  264         mac_cleanup_sysv_shm(shmseg);
  265 #endif
  266 }
  267 
  268 static int
  269 shm_delete_mapping(struct vmspace *vm, struct shmmap_state *shmmap_s)
  270 {
  271         struct shmid_kernel *shmseg;
  272         int segnum, result;
  273         size_t size;
  274 
  275         GIANT_REQUIRED;
  276 
  277         segnum = IPCID_TO_IX(shmmap_s->shmid);
  278         shmseg = &shmsegs[segnum];
  279         size = round_page(shmseg->u.shm_segsz);
  280         result = vm_map_remove(&vm->vm_map, shmmap_s->va, shmmap_s->va + size);
  281         if (result != KERN_SUCCESS)
  282                 return (EINVAL);
  283         shmmap_s->shmid = -1;
  284         shmseg->u.shm_dtime = time_second;
  285         if ((--shmseg->u.shm_nattch <= 0) &&
  286             (shmseg->u.shm_perm.mode & SHMSEG_REMOVED)) {
  287                 shm_deallocate_segment(shmseg);
  288                 shm_last_free = segnum;
  289         }
  290         return (0);
  291 }
  292 
  293 #ifndef _SYS_SYSPROTO_H_
  294 struct shmdt_args {
  295         const void *shmaddr;
  296 };
  297 #endif
  298 
  299 /*
  300  * MPSAFE
  301  */
  302 int
  303 shmdt(td, uap)
  304         struct thread *td;
  305         struct shmdt_args *uap;
  306 {
  307         struct proc *p = td->td_proc;
  308         struct shmmap_state *shmmap_s;
  309 #ifdef MAC
  310         struct shmid_kernel *shmsegptr;
  311 #endif
  312         int i;
  313         int error = 0;
  314 
  315         if (!jail_sysvipc_allowed && jailed(td->td_ucred))
  316                 return (ENOSYS);
  317         mtx_lock(&Giant);
  318         shmmap_s = p->p_vmspace->vm_shm;
  319         if (shmmap_s == NULL) {
  320                 error = EINVAL;
  321                 goto done2;
  322         }
  323         for (i = 0; i < shminfo.shmseg; i++, shmmap_s++) {
  324                 if (shmmap_s->shmid != -1 &&
  325                     shmmap_s->va == (vm_offset_t)uap->shmaddr) {
  326                         break;
  327                 }
  328         }
  329         if (i == shminfo.shmseg) {
  330                 error = EINVAL;
  331                 goto done2;
  332         }
  333 #ifdef MAC
  334         shmsegptr = &shmsegs[IPCID_TO_IX(shmmap_s->shmid)];
  335         error = mac_check_sysv_shmdt(td->td_ucred, shmsegptr);
  336         if (error != 0) {
  337                 MPRINTF(("mac_check_sysv_shmdt returned %d\n", error));
  338                 goto done2;
  339         }
  340 #endif
  341         error = shm_delete_mapping(p->p_vmspace, shmmap_s);
  342 done2:
  343         mtx_unlock(&Giant);
  344         return (error);
  345 }
  346 
  347 #ifndef _SYS_SYSPROTO_H_
  348 struct shmat_args {
  349         int shmid;
  350         const void *shmaddr;
  351         int shmflg;
  352 };
  353 #endif
  354 
  355 /*
  356  * MPSAFE
  357  */
  358 int
  359 kern_shmat(td, shmid, shmaddr, shmflg)
  360         struct thread *td;
  361         int shmid;
  362         const void *shmaddr;
  363         int shmflg;
  364 {
  365         struct proc *p = td->td_proc;
  366         int i, flags;
  367         struct shmid_kernel *shmseg;
  368         struct shmmap_state *shmmap_s = NULL;
  369         vm_offset_t attach_va;
  370         vm_prot_t prot;
  371         vm_size_t size;
  372         int rv;
  373         int error = 0;
  374 
  375         if (!jail_sysvipc_allowed && jailed(td->td_ucred))
  376                 return (ENOSYS);
  377         mtx_lock(&Giant);
  378         shmmap_s = p->p_vmspace->vm_shm;
  379         if (shmmap_s == NULL) {
  380                 size = shminfo.shmseg * sizeof(struct shmmap_state);
  381                 shmmap_s = malloc(size, M_SHM, M_WAITOK);
  382                 for (i = 0; i < shminfo.shmseg; i++)
  383                         shmmap_s[i].shmid = -1;
  384                 p->p_vmspace->vm_shm = shmmap_s;
  385         }
  386         shmseg = shm_find_segment_by_shmid(shmid);
  387         if (shmseg == NULL) {
  388                 error = EINVAL;
  389                 goto done2;
  390         }
  391         error = ipcperm(td, &shmseg->u.shm_perm,
  392             (shmflg & SHM_RDONLY) ? IPC_R : IPC_R|IPC_W);
  393         if (error)
  394                 goto done2;
  395 #ifdef MAC
  396         error = mac_check_sysv_shmat(td->td_ucred, shmseg, shmflg);
  397         if (error != 0) {
  398                 MPRINTF(("mac_check_sysv_shmat returned %d\n", error));
  399                 goto done2;
  400         }
  401 #endif
  402         for (i = 0; i < shminfo.shmseg; i++) {
  403                 if (shmmap_s->shmid == -1)
  404                         break;
  405                 shmmap_s++;
  406         }
  407         if (i >= shminfo.shmseg) {
  408                 error = EMFILE;
  409                 goto done2;
  410         }
  411         size = round_page(shmseg->u.shm_segsz);
  412 #ifdef VM_PROT_READ_IS_EXEC
  413         prot = VM_PROT_READ | VM_PROT_EXECUTE;
  414 #else
  415         prot = VM_PROT_READ;
  416 #endif
  417         if ((shmflg & SHM_RDONLY) == 0)
  418                 prot |= VM_PROT_WRITE;
  419         flags = MAP_ANON | MAP_SHARED;
  420         if (shmaddr) {
  421                 flags |= MAP_FIXED;
  422                 if (shmflg & SHM_RND) {
  423                         attach_va = (vm_offset_t)shmaddr & ~(SHMLBA-1);
  424                 } else if (((vm_offset_t)shmaddr & (SHMLBA-1)) == 0) {
  425                         attach_va = (vm_offset_t)shmaddr;
  426                 } else {
  427                         error = EINVAL;
  428                         goto done2;
  429                 }
  430         } else {
  431                 /*
  432                  * This is just a hint to vm_map_find() about where to
  433                  * put it.
  434                  */
  435                 PROC_LOCK(p);
  436                 attach_va = round_page((vm_offset_t)p->p_vmspace->vm_daddr +
  437                     lim_max(p, RLIMIT_DATA));
  438                 PROC_UNLOCK(p);
  439         }
  440 
  441         vm_object_reference(shmseg->u.shm_internal);
  442         rv = vm_map_find(&p->p_vmspace->vm_map, shmseg->u.shm_internal,
  443                 0, &attach_va, size, (flags & MAP_FIXED)?0:1, prot, prot, 0);
  444         if (rv != KERN_SUCCESS) {
  445                 vm_object_deallocate(shmseg->u.shm_internal);
  446                 error = ENOMEM;
  447                 goto done2;
  448         }
  449         vm_map_inherit(&p->p_vmspace->vm_map,
  450                 attach_va, attach_va + size, VM_INHERIT_SHARE);
  451 
  452         shmmap_s->va = attach_va;
  453         shmmap_s->shmid = shmid;
  454         shmseg->u.shm_lpid = p->p_pid;
  455         shmseg->u.shm_atime = time_second;
  456         shmseg->u.shm_nattch++;
  457         td->td_retval[0] = attach_va;
  458 done2:
  459         mtx_unlock(&Giant);
  460         return (error);
  461 }
  462 
  463 int
  464 shmat(td, uap)
  465         struct thread *td;
  466         struct shmat_args *uap;
  467 {
  468         return kern_shmat(td, uap->shmid, uap->shmaddr, uap->shmflg);
  469 }
  470 
  471 #if defined(__i386__) && (defined(COMPAT_FREEBSD4) || defined(COMPAT_43))
  472 struct oshmid_ds {
  473         struct  ipc_perm shm_perm;      /* operation perms */
  474         int     shm_segsz;              /* size of segment (bytes) */
  475         u_short shm_cpid;               /* pid, creator */
  476         u_short shm_lpid;               /* pid, last operation */
  477         short   shm_nattch;             /* no. of current attaches */
  478         time_t  shm_atime;              /* last attach time */
  479         time_t  shm_dtime;              /* last detach time */
  480         time_t  shm_ctime;              /* last change time */
  481         void    *shm_handle;            /* internal handle for shm segment */
  482 };
  483 
  484 struct oshmctl_args {
  485         int shmid;
  486         int cmd;
  487         struct oshmid_ds *ubuf;
  488 };
  489 
  490 /*
  491  * MPSAFE
  492  */
  493 static int
  494 oshmctl(td, uap)
  495         struct thread *td;
  496         struct oshmctl_args *uap;
  497 {
  498 #ifdef COMPAT_43
  499         int error = 0;
  500         struct shmid_kernel *shmseg;
  501         struct oshmid_ds outbuf;
  502 
  503         if (!jail_sysvipc_allowed && jailed(td->td_ucred))
  504                 return (ENOSYS);
  505         mtx_lock(&Giant);
  506         shmseg = shm_find_segment_by_shmid(uap->shmid);
  507         if (shmseg == NULL) {
  508                 error = EINVAL;
  509                 goto done2;
  510         }
  511         switch (uap->cmd) {
  512         case IPC_STAT:
  513                 error = ipcperm(td, &shmseg->u.shm_perm, IPC_R);
  514                 if (error)
  515                         goto done2;
  516 #ifdef MAC
  517                 error = mac_check_sysv_shmctl(td->td_ucred, shmseg, uap->cmd);
  518                 if (error != 0) {
  519                         MPRINTF(("mac_check_sysv_shmctl returned %d\n",
  520                             error));
  521                         goto done2;
  522                 }
  523 #endif
  524                 outbuf.shm_perm = shmseg->u.shm_perm;
  525                 outbuf.shm_segsz = shmseg->u.shm_segsz;
  526                 outbuf.shm_cpid = shmseg->u.shm_cpid;
  527                 outbuf.shm_lpid = shmseg->u.shm_lpid;
  528                 outbuf.shm_nattch = shmseg->u.shm_nattch;
  529                 outbuf.shm_atime = shmseg->u.shm_atime;
  530                 outbuf.shm_dtime = shmseg->u.shm_dtime;
  531                 outbuf.shm_ctime = shmseg->u.shm_ctime;
  532                 outbuf.shm_handle = shmseg->u.shm_internal;
  533                 error = copyout(&outbuf, uap->ubuf, sizeof(outbuf));
  534                 if (error)
  535                         goto done2;
  536                 break;
  537         default:
  538                 error = shmctl(td, (struct shmctl_args *)uap);
  539                 break;
  540         }
  541 done2:
  542         mtx_unlock(&Giant);
  543         return (error);
  544 #else
  545         return (EINVAL);
  546 #endif
  547 }
  548 #endif
  549 
  550 #ifndef _SYS_SYSPROTO_H_
  551 struct shmctl_args {
  552         int shmid;
  553         int cmd;
  554         struct shmid_ds *buf;
  555 };
  556 #endif
  557 
  558 /*
  559  * MPSAFE
  560  */
  561 int
  562 kern_shmctl(td, shmid, cmd, buf, bufsz)
  563         struct thread *td;
  564         int shmid;
  565         int cmd;
  566         void *buf;
  567         size_t *bufsz;
  568 {
  569         int error = 0;
  570         struct shmid_kernel *shmseg;
  571 
  572         if (!jail_sysvipc_allowed && jailed(td->td_ucred))
  573                 return (ENOSYS);
  574 
  575         mtx_lock(&Giant);
  576         switch (cmd) {
  577         case IPC_INFO:
  578                 memcpy(buf, &shminfo, sizeof(shminfo));
  579                 if (bufsz)
  580                         *bufsz = sizeof(shminfo);
  581                 td->td_retval[0] = shmalloced;
  582                 goto done2;
  583         case SHM_INFO: {
  584                 struct shm_info shm_info;
  585                 shm_info.used_ids = shm_nused;
  586                 shm_info.shm_rss = 0;   /*XXX where to get from ? */
  587                 shm_info.shm_tot = 0;   /*XXX where to get from ? */
  588                 shm_info.shm_swp = 0;   /*XXX where to get from ? */
  589                 shm_info.swap_attempts = 0;     /*XXX where to get from ? */
  590                 shm_info.swap_successes = 0;    /*XXX where to get from ? */
  591                 memcpy(buf, &shm_info, sizeof(shm_info));
  592                 if (bufsz)
  593                         *bufsz = sizeof(shm_info);
  594                 td->td_retval[0] = shmalloced;
  595                 goto done2;
  596         }
  597         }
  598         if (cmd == SHM_STAT)
  599                 shmseg = shm_find_segment_by_shmidx(shmid);
  600         else
  601                 shmseg = shm_find_segment_by_shmid(shmid);
  602         if (shmseg == NULL) {
  603                 error = EINVAL;
  604                 goto done2;
  605         }
  606 #ifdef MAC
  607         error = mac_check_sysv_shmctl(td->td_ucred, shmseg, cmd);
  608         if (error != 0) {
  609                 MPRINTF(("mac_check_sysv_shmctl returned %d\n", error));
  610                 goto done2;
  611         }
  612 #endif
  613         switch (cmd) {
  614         case SHM_STAT:
  615         case IPC_STAT:
  616                 error = ipcperm(td, &shmseg->u.shm_perm, IPC_R);
  617                 if (error)
  618                         goto done2;
  619                 memcpy(buf, &shmseg->u, sizeof(struct shmid_ds));
  620                 if (bufsz)
  621                         *bufsz = sizeof(struct shmid_ds);
  622                 if (cmd == SHM_STAT)
  623                         td->td_retval[0] = IXSEQ_TO_IPCID(shmid, shmseg->u.shm_perm);
  624                 break;
  625         case IPC_SET: {
  626                 struct shmid_ds *shmid;
  627 
  628                 shmid = (struct shmid_ds *)buf;
  629                 error = ipcperm(td, &shmseg->u.shm_perm, IPC_M);
  630                 if (error)
  631                         goto done2;
  632                 shmseg->u.shm_perm.uid = shmid->shm_perm.uid;
  633                 shmseg->u.shm_perm.gid = shmid->shm_perm.gid;
  634                 shmseg->u.shm_perm.mode =
  635                     (shmseg->u.shm_perm.mode & ~ACCESSPERMS) |
  636                     (shmid->shm_perm.mode & ACCESSPERMS);
  637                 shmseg->u.shm_ctime = time_second;
  638                 break;
  639         }
  640         case IPC_RMID:
  641                 error = ipcperm(td, &shmseg->u.shm_perm, IPC_M);
  642                 if (error)
  643                         goto done2;
  644                 shmseg->u.shm_perm.key = IPC_PRIVATE;
  645                 shmseg->u.shm_perm.mode |= SHMSEG_REMOVED;
  646                 if (shmseg->u.shm_nattch <= 0) {
  647                         shm_deallocate_segment(shmseg);
  648                         shm_last_free = IPCID_TO_IX(shmid);
  649                 }
  650                 break;
  651 #if 0
  652         case SHM_LOCK:
  653         case SHM_UNLOCK:
  654 #endif
  655         default:
  656                 error = EINVAL;
  657                 break;
  658         }
  659 done2:
  660         mtx_unlock(&Giant);
  661         return (error);
  662 }
  663 
  664 int
  665 shmctl(td, uap)
  666         struct thread *td;
  667         struct shmctl_args *uap;
  668 {
  669         int error = 0;
  670         struct shmid_ds buf;
  671         size_t bufsz;
  672         
  673         /* IPC_SET needs to copyin the buffer before calling kern_shmctl */
  674         if (uap->cmd == IPC_SET) {
  675                 if ((error = copyin(uap->buf, &buf, sizeof(struct shmid_ds))))
  676                         goto done;
  677         }
  678         
  679         error = kern_shmctl(td, uap->shmid, uap->cmd, (void *)&buf, &bufsz);
  680         if (error)
  681                 goto done;
  682         
  683         /* Cases in which we need to copyout */
  684         switch (uap->cmd) {
  685         case IPC_INFO:
  686         case SHM_INFO:
  687         case SHM_STAT:
  688         case IPC_STAT:
  689                 error = copyout(&buf, uap->buf, bufsz);
  690                 break;
  691         }
  692 
  693 done:
  694         if (error) {
  695                 /* Invalidate the return value */
  696                 td->td_retval[0] = -1;
  697         }
  698         return (error);
  699 }
  700 
  701 
  702 #ifndef _SYS_SYSPROTO_H_
  703 struct shmget_args {
  704         key_t key;
  705         size_t size;
  706         int shmflg;
  707 };
  708 #endif
  709 
  710 static int
  711 shmget_existing(td, uap, mode, segnum)
  712         struct thread *td;
  713         struct shmget_args *uap;
  714         int mode;
  715         int segnum;
  716 {
  717         struct shmid_kernel *shmseg;
  718         int error;
  719 
  720         shmseg = &shmsegs[segnum];
  721         if (shmseg->u.shm_perm.mode & SHMSEG_REMOVED) {
  722                 /*
  723                  * This segment is in the process of being allocated.  Wait
  724                  * until it's done, and look the key up again (in case the
  725                  * allocation failed or it was freed).
  726                  */
  727                 shmseg->u.shm_perm.mode |= SHMSEG_WANTED;
  728                 error = tsleep(shmseg, PLOCK | PCATCH, "shmget", 0);
  729                 if (error)
  730                         return (error);
  731                 return (EAGAIN);
  732         }
  733         if ((uap->shmflg & (IPC_CREAT | IPC_EXCL)) == (IPC_CREAT | IPC_EXCL))
  734                 return (EEXIST);
  735 #ifdef MAC
  736         error = mac_check_sysv_shmget(td->td_ucred, shmseg, uap->shmflg);
  737         if (error != 0) {
  738                 MPRINTF(("mac_check_sysv_shmget returned %d\n", error));
  739                 return (error);
  740         }
  741 #endif
  742         if (uap->size && uap->size > shmseg->u.shm_segsz)
  743                 return (EINVAL);
  744         td->td_retval[0] = IXSEQ_TO_IPCID(segnum, shmseg->u.shm_perm);
  745         return (0);
  746 }
  747 
  748 static int
  749 shmget_allocate_segment(td, uap, mode)
  750         struct thread *td;
  751         struct shmget_args *uap;
  752         int mode;
  753 {
  754         int i, segnum, shmid, size;
  755         struct ucred *cred = td->td_ucred;
  756         struct shmid_kernel *shmseg;
  757         vm_object_t shm_object;
  758 
  759         GIANT_REQUIRED;
  760 
  761         if (uap->size < shminfo.shmmin || uap->size > shminfo.shmmax)
  762                 return (EINVAL);
  763         if (shm_nused >= shminfo.shmmni) /* Any shmids left? */
  764                 return (ENOSPC);
  765         size = round_page(uap->size);
  766         if (shm_committed + btoc(size) > shminfo.shmall)
  767                 return (ENOMEM);
  768         if (shm_last_free < 0) {
  769                 shmrealloc();   /* Maybe expand the shmsegs[] array. */
  770                 for (i = 0; i < shmalloced; i++)
  771                         if (shmsegs[i].u.shm_perm.mode & SHMSEG_FREE)
  772                                 break;
  773                 if (i == shmalloced)
  774                         return (ENOSPC);
  775                 segnum = i;
  776         } else  {
  777                 segnum = shm_last_free;
  778                 shm_last_free = -1;
  779         }
  780         shmseg = &shmsegs[segnum];
  781         /*
  782          * In case we sleep in malloc(), mark the segment present but deleted
  783          * so that noone else tries to create the same key.
  784          */
  785         shmseg->u.shm_perm.mode = SHMSEG_ALLOCATED | SHMSEG_REMOVED;
  786         shmseg->u.shm_perm.key = uap->key;
  787         shmseg->u.shm_perm.seq = (shmseg->u.shm_perm.seq + 1) & 0x7fff;
  788         shmid = IXSEQ_TO_IPCID(segnum, shmseg->u.shm_perm);
  789         
  790         /*
  791          * We make sure that we have allocated a pager before we need
  792          * to.
  793          */
  794         if (shm_use_phys) {
  795                 shm_object =
  796                     vm_pager_allocate(OBJT_PHYS, 0, size, VM_PROT_DEFAULT, 0);
  797         } else {
  798                 shm_object =
  799                     vm_pager_allocate(OBJT_SWAP, 0, size, VM_PROT_DEFAULT, 0);
  800         }
  801         VM_OBJECT_LOCK(shm_object);
  802         vm_object_clear_flag(shm_object, OBJ_ONEMAPPING);
  803         vm_object_set_flag(shm_object, OBJ_NOSPLIT);
  804         VM_OBJECT_UNLOCK(shm_object);
  805 
  806         shmseg->u.shm_internal = shm_object;
  807         shmseg->u.shm_perm.cuid = shmseg->u.shm_perm.uid = cred->cr_uid;
  808         shmseg->u.shm_perm.cgid = shmseg->u.shm_perm.gid = cred->cr_gid;
  809         shmseg->u.shm_perm.mode = (shmseg->u.shm_perm.mode & SHMSEG_WANTED) |
  810             (mode & ACCESSPERMS) | SHMSEG_ALLOCATED;
  811         shmseg->u.shm_segsz = uap->size;
  812         shmseg->u.shm_cpid = td->td_proc->p_pid;
  813         shmseg->u.shm_lpid = shmseg->u.shm_nattch = 0;
  814         shmseg->u.shm_atime = shmseg->u.shm_dtime = 0;
  815 #ifdef MAC
  816         mac_create_sysv_shm(cred, shmseg);
  817 #endif
  818         shmseg->u.shm_ctime = time_second;
  819         shm_committed += btoc(size);
  820         shm_nused++;
  821         if (shmseg->u.shm_perm.mode & SHMSEG_WANTED) {
  822                 /*
  823                  * Somebody else wanted this key while we were asleep.  Wake
  824                  * them up now.
  825                  */
  826                 shmseg->u.shm_perm.mode &= ~SHMSEG_WANTED;
  827                 wakeup(shmseg);
  828         }
  829         td->td_retval[0] = shmid;
  830         return (0);
  831 }
  832 
  833 /*
  834  * MPSAFE
  835  */
  836 int
  837 shmget(td, uap)
  838         struct thread *td;
  839         struct shmget_args *uap;
  840 {
  841         int segnum, mode;
  842         int error;
  843 
  844         if (!jail_sysvipc_allowed && jailed(td->td_ucred))
  845                 return (ENOSYS);
  846         mtx_lock(&Giant);
  847         mode = uap->shmflg & ACCESSPERMS;
  848         if (uap->key != IPC_PRIVATE) {
  849         again:
  850                 segnum = shm_find_segment_by_key(uap->key);
  851                 if (segnum >= 0) {
  852                         error = shmget_existing(td, uap, mode, segnum);
  853                         if (error == EAGAIN)
  854                                 goto again;
  855                         goto done2;
  856                 }
  857                 if ((uap->shmflg & IPC_CREAT) == 0) {
  858                         error = ENOENT;
  859                         goto done2;
  860                 }
  861         }
  862         error = shmget_allocate_segment(td, uap, mode);
  863 done2:
  864         mtx_unlock(&Giant);
  865         return (error);
  866 }
  867 
  868 /*
  869  * MPSAFE
  870  */
  871 int
  872 shmsys(td, uap)
  873         struct thread *td;
  874         /* XXX actually varargs. */
  875         struct shmsys_args /* {
  876                 int     which;
  877                 int     a2;
  878                 int     a3;
  879                 int     a4;
  880         } */ *uap;
  881 {
  882 #if defined(__i386__) && (defined(COMPAT_FREEBSD4) || defined(COMPAT_43))
  883         int error;
  884 
  885         if (!jail_sysvipc_allowed && jailed(td->td_ucred))
  886                 return (ENOSYS);
  887         if (uap->which < 0 ||
  888             uap->which >= sizeof(shmcalls)/sizeof(shmcalls[0]))
  889                 return (EINVAL);
  890         mtx_lock(&Giant);
  891         error = (*shmcalls[uap->which])(td, &uap->a2);
  892         mtx_unlock(&Giant);
  893         return (error);
  894 #else
  895         return (nosys(td, NULL));
  896 #endif
  897 }
  898 
  899 static void
  900 shmfork_myhook(p1, p2)
  901         struct proc *p1, *p2;
  902 {
  903         struct shmmap_state *shmmap_s;
  904         size_t size;
  905         int i;
  906 
  907         mtx_lock(&Giant);
  908         size = shminfo.shmseg * sizeof(struct shmmap_state);
  909         shmmap_s = malloc(size, M_SHM, M_WAITOK);
  910         bcopy(p1->p_vmspace->vm_shm, shmmap_s, size);
  911         p2->p_vmspace->vm_shm = shmmap_s;
  912         for (i = 0; i < shminfo.shmseg; i++, shmmap_s++)
  913                 if (shmmap_s->shmid != -1)
  914                         shmsegs[IPCID_TO_IX(shmmap_s->shmid)].u.shm_nattch++;
  915         mtx_unlock(&Giant);
  916 }
  917 
  918 static void
  919 shmexit_myhook(struct vmspace *vm)
  920 {
  921         struct shmmap_state *base, *shm;
  922         int i;
  923 
  924         if ((base = vm->vm_shm) != NULL) {
  925                 vm->vm_shm = NULL;
  926                 mtx_lock(&Giant);
  927                 for (i = 0, shm = base; i < shminfo.shmseg; i++, shm++) {
  928                         if (shm->shmid != -1)
  929                                 shm_delete_mapping(vm, shm);
  930                 }
  931                 mtx_unlock(&Giant);
  932                 free(base, M_SHM);
  933         }
  934 }
  935 
  936 static void
  937 shmrealloc(void)
  938 {
  939         int i;
  940         struct shmid_kernel *newsegs;
  941 
  942         if (shmalloced >= shminfo.shmmni)
  943                 return;
  944 
  945         newsegs = malloc(shminfo.shmmni * sizeof(*newsegs), M_SHM, M_WAITOK);
  946         if (newsegs == NULL)
  947                 return;
  948         for (i = 0; i < shmalloced; i++)
  949                 bcopy(&shmsegs[i], &newsegs[i], sizeof(newsegs[0]));
  950         for (; i < shminfo.shmmni; i++) {
  951                 shmsegs[i].u.shm_perm.mode = SHMSEG_FREE;
  952                 shmsegs[i].u.shm_perm.seq = 0;
  953 #ifdef MAC
  954                 mac_init_sysv_shm(&shmsegs[i]);
  955 #endif
  956         }
  957         free(shmsegs, M_SHM);
  958         shmsegs = newsegs;
  959         shmalloced = shminfo.shmmni;
  960 }
  961 
  962 static void
  963 shminit()
  964 {
  965         int i;
  966 
  967         TUNABLE_INT_FETCH("kern.ipc.shmmaxpgs", &shminfo.shmall);
  968         for (i = PAGE_SIZE; i > 0; i--) {
  969                 shminfo.shmmax = shminfo.shmall * i;
  970                 if (shminfo.shmmax >= shminfo.shmall)
  971                         break;
  972         }
  973         TUNABLE_INT_FETCH("kern.ipc.shmmin", &shminfo.shmmin);
  974         TUNABLE_INT_FETCH("kern.ipc.shmmni", &shminfo.shmmni);
  975         TUNABLE_INT_FETCH("kern.ipc.shmseg", &shminfo.shmseg);
  976         TUNABLE_INT_FETCH("kern.ipc.shm_use_phys", &shm_use_phys);
  977 
  978         shmalloced = shminfo.shmmni;
  979         shmsegs = malloc(shmalloced * sizeof(shmsegs[0]), M_SHM, M_WAITOK);
  980         if (shmsegs == NULL)
  981                 panic("cannot allocate initial memory for sysvshm");
  982         for (i = 0; i < shmalloced; i++) {
  983                 shmsegs[i].u.shm_perm.mode = SHMSEG_FREE;
  984                 shmsegs[i].u.shm_perm.seq = 0;
  985 #ifdef MAC
  986                 mac_init_sysv_shm(&shmsegs[i]);
  987 #endif
  988         }
  989         shm_last_free = 0;
  990         shm_nused = 0;
  991         shm_committed = 0;
  992         shmexit_hook = &shmexit_myhook;
  993         shmfork_hook = &shmfork_myhook;
  994 }
  995 
  996 static int
  997 shmunload()
  998 {
  999 #ifdef MAC
 1000         int i;  
 1001 #endif
 1002 
 1003         if (shm_nused > 0)
 1004                 return (EBUSY);
 1005 
 1006 #ifdef MAC
 1007         for (i = 0; i < shmalloced; i++)
 1008                 mac_destroy_sysv_shm(&shmsegs[i]);
 1009 #endif
 1010         free(shmsegs, M_SHM);
 1011         shmexit_hook = NULL;
 1012         shmfork_hook = NULL;
 1013         return (0);
 1014 }
 1015 
 1016 static int
 1017 sysctl_shmsegs(SYSCTL_HANDLER_ARGS)
 1018 {
 1019 
 1020         return (SYSCTL_OUT(req, shmsegs, shmalloced * sizeof(shmsegs[0])));
 1021 }
 1022 
 1023 static int
 1024 sysvshm_modload(struct module *module, int cmd, void *arg)
 1025 {
 1026         int error = 0;
 1027 
 1028         switch (cmd) {
 1029         case MOD_LOAD:
 1030                 shminit();
 1031                 break;
 1032         case MOD_UNLOAD:
 1033                 error = shmunload();
 1034                 break;
 1035         case MOD_SHUTDOWN:
 1036                 break;
 1037         default:
 1038                 error = EINVAL;
 1039                 break;
 1040         }
 1041         return (error);
 1042 }
 1043 
 1044 static moduledata_t sysvshm_mod = {
 1045         "sysvshm",
 1046         &sysvshm_modload,
 1047         NULL
 1048 };
 1049 
 1050 SYSCALL_MODULE_HELPER(shmsys);
 1051 SYSCALL_MODULE_HELPER(shmat);
 1052 SYSCALL_MODULE_HELPER(shmctl);
 1053 SYSCALL_MODULE_HELPER(shmdt);
 1054 SYSCALL_MODULE_HELPER(shmget);
 1055 
 1056 DECLARE_MODULE(sysvshm, sysvshm_mod,
 1057         SI_SUB_SYSV_SHM, SI_ORDER_FIRST);
 1058 MODULE_VERSION(sysvshm, 1);

Cache object: a5d73ced5800e0b87ec9d06a49c6fd56


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.