1 /*
2 * Copyright (c) 2000-2003 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
7 *
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * file.
14 *
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
22 *
23 * @APPLE_LICENSE_HEADER_END@
24 */
25 /* $NetBSD: sysv_shm.c,v 1.23 1994/07/04 23:25:12 glass Exp $ */
26
27 /*
28 * Copyright (c) 1994 Adam Glass and Charles Hannum. All rights reserved.
29 *
30 * Redistribution and use in source and binary forms, with or without
31 * modification, are permitted provided that the following conditions
32 * are met:
33 * 1. Redistributions of source code must retain the above copyright
34 * notice, this list of conditions and the following disclaimer.
35 * 2. Redistributions in binary form must reproduce the above copyright
36 * notice, this list of conditions and the following disclaimer in the
37 * documentation and/or other materials provided with the distribution.
38 * 3. All advertising materials mentioning features or use of this software
39 * must display the following acknowledgement:
40 * This product includes software developed by Adam Glass and Charles
41 * Hannum.
42 * 4. The names of the authors may not be used to endorse or promote products
43 * derived from this software without specific prior written permission.
44 *
45 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
46 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
47 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
48 * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
49 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
50 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
51 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
52 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
53 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
54 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
55 */
56
57
58 #include <sys/appleapiopts.h>
59 #include <sys/param.h>
60 #include <sys/systm.h>
61 #include <sys/kernel.h>
62 #include <sys/shm.h>
63 #include <sys/proc.h>
64 #include <sys/malloc.h>
65 #include <sys/mman.h>
66 #include <sys/stat.h>
67 #include <sys/sysctl.h>
68 #include <sys/kern_audit.h>
69
70 #include <mach/mach_types.h>
71 #include <mach/vm_inherit.h>
72 #include <vm/vm_map.h>
73
74 struct shmat_args;
75 extern int shmat __P((struct proc *p, struct shmat_args *uap, int *retval));
76 struct shmctl_args;
77 extern int shmctl __P((struct proc *p, struct shmctl_args *uap, int *retval));
78 struct shmdt_args;
79 extern int shmdt __P((struct proc *p, struct shmdt_args *uap, int *retval));
80 struct shmget_args;
81 extern int shmget __P((struct proc *p, struct shmget_args *uap, int *retval));
82
83 #if 0
84 static void shminit __P((void *));
85 SYSINIT(sysv_shm, SI_SUB_SYSV_SHM, SI_ORDER_FIRST, shminit, NULL)
86 #endif 0
87
88 struct oshmctl_args;
89 static int oshmctl __P((struct proc *p, struct oshmctl_args *uap, int * retval));
90 static int shmget_allocate_segment __P((struct proc *p, struct shmget_args *uap, int mode, int * retval));
91 static int shmget_existing __P((struct proc *p, struct shmget_args *uap, int mode, int segnum, int * retval));
92
93 typedef int sy_call_t __P((struct proc *, void *, int *));
94
95 /* XXX casting to (sy_call_t *) is bogus, as usual. */
96 static sy_call_t *shmcalls[] = {
97 (sy_call_t *)shmat, (sy_call_t *)oshmctl,
98 (sy_call_t *)shmdt, (sy_call_t *)shmget,
99 (sy_call_t *)shmctl
100 };
101
102 #define SHMSEG_FREE 0x0200
103 #define SHMSEG_REMOVED 0x0400
104 #define SHMSEG_ALLOCATED 0x0800
105 #define SHMSEG_WANTED 0x1000
106
107 static int shm_last_free, shm_nused, shm_committed;
108 struct shmid_ds *shmsegs;
109 static int shm_inited = 0;
110
111 struct shm_handle {
112 /* vm_offset_t kva; */
113 void * shm_object;
114 };
115
116 struct shmmap_state {
117 vm_offset_t va;
118 int shmid;
119 };
120
121 static void shm_deallocate_segment __P((struct shmid_ds *));
122 static int shm_find_segment_by_key __P((key_t));
123 static struct shmid_ds *shm_find_segment_by_shmid __P((int));
124 static int shm_delete_mapping __P((struct proc *, struct shmmap_state *, int));
125
126 #ifdef __APPLE_API_PRIVATE
127 struct shminfo shminfo = {
128 -1, /* SHMMAX 4096 *1024 */
129 -1, /* SHMMIN = 1 */
130 -1, /* SHMMNI = 1 */
131 -1, /* SHMSEG = 8 */
132 -1 /* SHMALL = 1024 */
133 };
134 #endif /* __APPLE_API_PRIVATE */
135
136 static int
137 shm_find_segment_by_key(key)
138 key_t key;
139 {
140 int i;
141
142 for (i = 0; i < shminfo.shmmni; i++)
143 if ((shmsegs[i].shm_perm.mode & SHMSEG_ALLOCATED) &&
144 shmsegs[i].shm_perm.key == key)
145 return i;
146 return -1;
147 }
148
149 static struct shmid_ds *
150 shm_find_segment_by_shmid(shmid)
151 int shmid;
152 {
153 int segnum;
154 struct shmid_ds *shmseg;
155
156 segnum = IPCID_TO_IX(shmid);
157 if (segnum < 0 || segnum >= shminfo.shmmni)
158 return NULL;
159 shmseg = &shmsegs[segnum];
160 if ((shmseg->shm_perm.mode & (SHMSEG_ALLOCATED | SHMSEG_REMOVED))
161 != SHMSEG_ALLOCATED ||
162 shmseg->shm_perm.seq != IPCID_TO_SEQ(shmid))
163 return NULL;
164 return shmseg;
165 }
166
167 static void
168 shm_deallocate_segment(shmseg)
169 struct shmid_ds *shmseg;
170 {
171 struct shm_handle *shm_handle;
172 struct shmmap_state *shmmap_s=NULL;
173 size_t size;
174 char * ptr;
175
176 shm_handle = shmseg->shm_internal;
177 size = round_page_32(shmseg->shm_segsz);
178 mach_destroy_memory_entry(shm_handle->shm_object);
179 FREE((caddr_t)shm_handle, M_SHM);
180 shmseg->shm_internal = NULL;
181 shm_committed -= btoc(size);
182 shm_nused--;
183 shmseg->shm_perm.mode = SHMSEG_FREE;
184 }
185
186 static int
187 shm_delete_mapping(p, shmmap_s, deallocate)
188 struct proc *p;
189 struct shmmap_state *shmmap_s;
190 int deallocate;
191 {
192 struct shmid_ds *shmseg;
193 int segnum, result;
194 size_t size;
195
196 segnum = IPCID_TO_IX(shmmap_s->shmid);
197 shmseg = &shmsegs[segnum];
198 size = round_page_32(shmseg->shm_segsz);
199 if (deallocate) {
200 result = vm_deallocate(current_map(), shmmap_s->va, size);
201 if (result != KERN_SUCCESS)
202 return EINVAL;
203 }
204 shmmap_s->shmid = -1;
205 shmseg->shm_dtime = time_second;
206 if ((--shmseg->shm_nattch <= 0) &&
207 (shmseg->shm_perm.mode & SHMSEG_REMOVED)) {
208 shm_deallocate_segment(shmseg);
209 shm_last_free = segnum;
210 }
211 return 0;
212 }
213
214 struct shmdt_args {
215 void *shmaddr;
216 };
217
218 int
219 shmdt(p, uap, retval)
220 struct proc *p;
221 struct shmdt_args *uap;
222 register_t *retval;
223 {
224 struct shmmap_state *shmmap_s;
225 int i;
226
227 AUDIT_ARG(svipc_addr, uap->shmaddr);
228 if (!shm_inited)
229 return(EINVAL);
230 shmmap_s = (struct shmmap_state *)p->vm_shm;
231 if (shmmap_s == NULL)
232 return EINVAL;
233 for (i = 0; i < shminfo.shmseg; i++, shmmap_s++)
234 if (shmmap_s->shmid != -1 &&
235 shmmap_s->va == (vm_offset_t)uap->shmaddr)
236 break;
237 if (i == shminfo.shmseg)
238 return EINVAL;
239 return shm_delete_mapping(p, shmmap_s, 1);
240 }
241
242 #ifndef _SYS_SYSPROTO_H_
243 struct shmat_args {
244 int shmid;
245 void *shmaddr;
246 int shmflg;
247 };
248 #endif
249
250 int
251 shmat(p, uap, retval)
252 struct proc *p;
253 struct shmat_args *uap;
254 register_t *retval;
255 {
256 int error, i, flags;
257 struct ucred *cred = p->p_ucred;
258 struct shmid_ds *shmseg;
259 struct shmmap_state *shmmap_s = NULL;
260 struct shm_handle *shm_handle;
261 vm_offset_t attach_va;
262 vm_prot_t prot;
263 vm_size_t size;
264 kern_return_t rv;
265
266 AUDIT_ARG(svipc_id, uap->shmid);
267 AUDIT_ARG(svipc_addr, uap->shmaddr);
268 if (!shm_inited)
269 return(EINVAL);
270 shmmap_s = (struct shmmap_state *)p->vm_shm;
271 if (shmmap_s == NULL) {
272 size = shminfo.shmseg * sizeof(struct shmmap_state);
273 shmmap_s = (struct shmmap_state *)_MALLOC(size, M_SHM, M_WAITOK);
274 for (i = 0; i < shminfo.shmseg; i++)
275 shmmap_s[i].shmid = -1;
276 p->vm_shm = (caddr_t)shmmap_s;
277 }
278 shmseg = shm_find_segment_by_shmid(uap->shmid);
279 if (shmseg == NULL)
280 return EINVAL;
281
282 AUDIT_ARG(svipc_perm, &shmseg->shm_perm);
283 error = ipcperm(cred, &shmseg->shm_perm,
284 (uap->shmflg & SHM_RDONLY) ? IPC_R : IPC_R|IPC_W);
285 if (error)
286 return error;
287 for (i = 0; i < shminfo.shmseg; i++) {
288 if (shmmap_s->shmid == -1)
289 break;
290 shmmap_s++;
291 }
292 if (i >= shminfo.shmseg)
293 return EMFILE;
294 size = round_page_32(shmseg->shm_segsz);
295 prot = VM_PROT_READ;
296 if ((uap->shmflg & SHM_RDONLY) == 0)
297 prot |= VM_PROT_WRITE;
298 flags = MAP_ANON | MAP_SHARED;
299 if (uap->shmaddr) {
300 flags |= MAP_FIXED;
301 if (uap->shmflg & SHM_RND)
302 attach_va = (vm_offset_t)uap->shmaddr & ~(SHMLBA-1);
303 else if (((vm_offset_t)uap->shmaddr & (SHMLBA-1)) == 0)
304 attach_va = (vm_offset_t)uap->shmaddr;
305 else
306 return EINVAL;
307 } else {
308 attach_va = round_page_32((unsigned int)uap->shmaddr);
309 }
310
311 shm_handle = shmseg->shm_internal;
312 rv = vm_map(current_map(), &attach_va, size, 0, (flags & MAP_FIXED)? FALSE: TRUE,
313 shm_handle->shm_object, 0, FALSE, prot, prot, VM_INHERIT_DEFAULT);
314 if (rv != KERN_SUCCESS)
315 goto out;
316 rv = vm_inherit(current_map(), attach_va, size,
317 VM_INHERIT_SHARE);
318 if (rv != KERN_SUCCESS) {
319 (void) vm_deallocate(current_map(), attach_va, size);
320 goto out;
321 }
322
323 shmmap_s->va = attach_va;
324 shmmap_s->shmid = uap->shmid;
325 shmseg->shm_lpid = p->p_pid;
326 shmseg->shm_atime = time_second;
327 shmseg->shm_nattch++;
328 *retval = attach_va;
329 return( 0);
330 out:
331 switch (rv) {
332 case KERN_INVALID_ADDRESS:
333 case KERN_NO_SPACE:
334 return (ENOMEM);
335 case KERN_PROTECTION_FAILURE:
336 return (EACCES);
337 default:
338 return (EINVAL);
339 }
340
341 }
342
343 struct oshmid_ds {
344 struct ipc_perm shm_perm; /* operation perms */
345 int shm_segsz; /* size of segment (bytes) */
346 ushort shm_cpid; /* pid, creator */
347 ushort shm_lpid; /* pid, last operation */
348 short shm_nattch; /* no. of current attaches */
349 time_t shm_atime; /* last attach time */
350 time_t shm_dtime; /* last detach time */
351 time_t shm_ctime; /* last change time */
352 void *shm_handle; /* internal handle for shm segment */
353 };
354
355 struct oshmctl_args {
356 int shmid;
357 int cmd;
358 struct oshmid_ds *ubuf;
359 };
360
361 static int
362 oshmctl(p, uap, retval)
363 struct proc *p;
364 struct oshmctl_args *uap;
365 register_t *retval;
366 {
367 #ifdef COMPAT_43
368 int error;
369 struct ucred *cred = p->p_ucred;
370 struct shmid_ds *shmseg;
371 struct oshmid_ds outbuf;
372
373 if (!shm_inited)
374 return(EINVAL);
375 shmseg = shm_find_segment_by_shmid(uap->shmid);
376 if (shmseg == NULL)
377 return EINVAL;
378 switch (uap->cmd) {
379 case IPC_STAT:
380 error = ipcperm(cred, &shmseg->shm_perm, IPC_R);
381 if (error)
382 return error;
383 outbuf.shm_perm = shmseg->shm_perm;
384 outbuf.shm_segsz = shmseg->shm_segsz;
385 outbuf.shm_cpid = shmseg->shm_cpid;
386 outbuf.shm_lpid = shmseg->shm_lpid;
387 outbuf.shm_nattch = shmseg->shm_nattch;
388 outbuf.shm_atime = shmseg->shm_atime;
389 outbuf.shm_dtime = shmseg->shm_dtime;
390 outbuf.shm_ctime = shmseg->shm_ctime;
391 outbuf.shm_handle = shmseg->shm_internal;
392 error = copyout((caddr_t)&outbuf, uap->ubuf, sizeof(outbuf));
393 if (error)
394 return error;
395 break;
396 default:
397 /* XXX casting to (sy_call_t *) is bogus, as usual. */
398 return ((sy_call_t *)shmctl)(p, uap, retval);
399 }
400 return 0;
401 #else
402 return EINVAL;
403 #endif
404 }
405
406 #ifndef _SYS_SYSPROTO_H_
407 struct shmctl_args {
408 int shmid;
409 int cmd;
410 struct shmid_ds *buf;
411 };
412 #endif
413
414 int
415 shmctl(p, uap, retval)
416 struct proc *p;
417 struct shmctl_args *uap;
418 register_t *retval;
419 {
420 int error;
421 struct ucred *cred = p->p_ucred;
422 struct shmid_ds inbuf;
423 struct shmid_ds *shmseg;
424
425 AUDIT_ARG(svipc_cmd, uap->cmd);
426 AUDIT_ARG(svipc_id, uap->shmid);
427 if (!shm_inited)
428 return(EINVAL);
429 shmseg = shm_find_segment_by_shmid(uap->shmid);
430 if (shmseg == NULL)
431 return EINVAL;
432 /* XXAUDIT: This is the perms BEFORE any change by this call. This
433 * may not be what is desired.
434 */
435 AUDIT_ARG(svipc_perm, &shmseg->shm_perm);
436
437 switch (uap->cmd) {
438 case IPC_STAT:
439 error = ipcperm(cred, &shmseg->shm_perm, IPC_R);
440 if (error)
441 return error;
442 error = copyout((caddr_t)shmseg, uap->buf, sizeof(inbuf));
443 if (error)
444 return error;
445 break;
446 case IPC_SET:
447 error = ipcperm(cred, &shmseg->shm_perm, IPC_M);
448 if (error)
449 return error;
450 error = copyin(uap->buf, (caddr_t)&inbuf, sizeof(inbuf));
451 if (error)
452 return error;
453 shmseg->shm_perm.uid = inbuf.shm_perm.uid;
454 shmseg->shm_perm.gid = inbuf.shm_perm.gid;
455 shmseg->shm_perm.mode =
456 (shmseg->shm_perm.mode & ~ACCESSPERMS) |
457 (inbuf.shm_perm.mode & ACCESSPERMS);
458 shmseg->shm_ctime = time_second;
459 break;
460 case IPC_RMID:
461 error = ipcperm(cred, &shmseg->shm_perm, IPC_M);
462 if (error)
463 return error;
464 shmseg->shm_perm.key = IPC_PRIVATE;
465 shmseg->shm_perm.mode |= SHMSEG_REMOVED;
466 if (shmseg->shm_nattch <= 0) {
467 shm_deallocate_segment(shmseg);
468 shm_last_free = IPCID_TO_IX(uap->shmid);
469 }
470 break;
471 #if 0
472 case SHM_LOCK:
473 case SHM_UNLOCK:
474 #endif
475 default:
476 return EINVAL;
477 }
478 return 0;
479 }
480
481 #ifndef _SYS_SYSPROTO_H_
482 struct shmget_args {
483 key_t key;
484 size_t size;
485 int shmflg;
486 };
487 #endif
488
489 static int
490 shmget_existing(p, uap, mode, segnum, retval)
491 struct proc *p;
492 struct shmget_args *uap;
493 int mode;
494 int segnum;
495 int *retval;
496 {
497 struct shmid_ds *shmseg;
498 struct ucred *cred = p->p_ucred;
499 int error;
500
501 shmseg = &shmsegs[segnum];
502 if (shmseg->shm_perm.mode & SHMSEG_REMOVED) {
503 /*
504 * This segment is in the process of being allocated. Wait
505 * until it's done, and look the key up again (in case the
506 * allocation failed or it was freed).
507 */
508 shmseg->shm_perm.mode |= SHMSEG_WANTED;
509 error = tsleep((caddr_t)shmseg, PLOCK | PCATCH, "shmget", 0);
510 if (error)
511 return error;
512 return EAGAIN;
513 }
514 error = ipcperm(cred, &shmseg->shm_perm, mode);
515 if (error)
516 return error;
517 if (uap->size && uap->size > shmseg->shm_segsz)
518 return EINVAL;
519 if ((uap->shmflg & (IPC_CREAT | IPC_EXCL)) == (IPC_CREAT | IPC_EXCL))
520 return EEXIST;
521 *retval = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm);
522 return 0;
523 }
524
525 static int
526 shmget_allocate_segment(p, uap, mode, retval)
527 struct proc *p;
528 struct shmget_args *uap;
529 int mode;
530 int * retval;
531 {
532 int i, segnum, shmid, size;
533 struct ucred *cred = p->p_ucred;
534 struct shmid_ds *shmseg;
535 struct shm_handle *shm_handle;
536 kern_return_t kret;
537 vm_offset_t user_addr;
538 void * mem_object;
539
540 if (uap->size < shminfo.shmmin || uap->size > shminfo.shmmax)
541 return EINVAL;
542 if (shm_nused >= shminfo.shmmni) /* any shmids left? */
543 return ENOSPC;
544 size = round_page_32(uap->size);
545 if (shm_committed + btoc(size) > shminfo.shmall)
546 return ENOMEM;
547 if (shm_last_free < 0) {
548 for (i = 0; i < shminfo.shmmni; i++)
549 if (shmsegs[i].shm_perm.mode & SHMSEG_FREE)
550 break;
551 if (i == shminfo.shmmni)
552 panic("shmseg free count inconsistent");
553 segnum = i;
554 } else {
555 segnum = shm_last_free;
556 shm_last_free = -1;
557 }
558 shmseg = &shmsegs[segnum];
559 /*
560 * In case we sleep in malloc(), mark the segment present but deleted
561 * so that noone else tries to create the same key.
562 */
563 kret = vm_allocate(current_map(), &user_addr, size, TRUE);
564 if (kret != KERN_SUCCESS)
565 goto out;
566
567 kret = mach_make_memory_entry (current_map(), &size,
568 user_addr, VM_PROT_DEFAULT, &mem_object, 0);
569
570 if (kret != KERN_SUCCESS)
571 goto out;
572 shmseg->shm_perm.mode = SHMSEG_ALLOCATED | SHMSEG_REMOVED;
573 shmseg->shm_perm.key = uap->key;
574 shmseg->shm_perm.seq = (shmseg->shm_perm.seq + 1) & 0x7fff;
575 shm_handle = (struct shm_handle *)
576 _MALLOC(sizeof(struct shm_handle), M_SHM, M_WAITOK);
577 shm_handle->shm_object = mem_object;
578 shmid = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm);
579
580 shmseg->shm_internal = shm_handle;
581 shmseg->shm_perm.cuid = shmseg->shm_perm.uid = cred->cr_uid;
582 shmseg->shm_perm.cgid = shmseg->shm_perm.gid = cred->cr_gid;
583 shmseg->shm_perm.mode = (shmseg->shm_perm.mode & SHMSEG_WANTED) |
584 (mode & ACCESSPERMS) | SHMSEG_ALLOCATED;
585 shmseg->shm_segsz = uap->size;
586 shmseg->shm_cpid = p->p_pid;
587 shmseg->shm_lpid = shmseg->shm_nattch = 0;
588 shmseg->shm_atime = shmseg->shm_dtime = 0;
589 shmseg->shm_ctime = time_second;
590 shm_committed += btoc(size);
591 shm_nused++;
592 AUDIT_ARG(svipc_perm, &shmseg->shm_perm);
593 if (shmseg->shm_perm.mode & SHMSEG_WANTED) {
594 /*
595 * Somebody else wanted this key while we were asleep. Wake
596 * them up now.
597 */
598 shmseg->shm_perm.mode &= ~SHMSEG_WANTED;
599 wakeup((caddr_t)shmseg);
600 }
601 *retval = shmid;
602 AUDIT_ARG(svipc_id, shmid);
603 return 0;
604 out:
605 switch (kret) {
606 case KERN_INVALID_ADDRESS:
607 case KERN_NO_SPACE:
608 return (ENOMEM);
609 case KERN_PROTECTION_FAILURE:
610 return (EACCES);
611 default:
612 return (EINVAL);
613 }
614
615 }
616
617 int
618 shmget(p, uap, retval)
619 struct proc *p;
620 struct shmget_args *uap;
621 register_t *retval;
622 {
623 int segnum, mode, error;
624
625 /* Auditing is actually done in shmget_allocate_segment() */
626 if (!shm_inited)
627 return(EINVAL);
628
629 mode = uap->shmflg & ACCESSPERMS;
630 if (uap->key != IPC_PRIVATE) {
631 again:
632 segnum = shm_find_segment_by_key(uap->key);
633 if (segnum >= 0) {
634 error = shmget_existing(p, uap, mode, segnum, retval);
635 if (error == EAGAIN)
636 goto again;
637 return(error);
638 }
639 if ((uap->shmflg & IPC_CREAT) == 0)
640 return ENOENT;
641 }
642 return( shmget_allocate_segment(p, uap, mode, retval));;
643 /*NOTREACHED*/
644
645 }
646
647 struct shmsys_args {
648 u_int which;
649 int a2;
650 int a3;
651 int a4;
652 };
653 int
654 shmsys(p, uap, retval)
655 struct proc *p;
656 /* XXX actually varargs. */
657 struct shmsys_args *uap;
658 register_t *retval;
659 {
660
661 if (!shm_inited)
662 return(EINVAL);
663
664 if (uap->which >= sizeof(shmcalls)/sizeof(shmcalls[0]))
665 return EINVAL;
666 return ((*shmcalls[uap->which])(p, &uap->a2, retval));
667 }
668
669 void
670 shmfork(p1, p2)
671 struct proc *p1, *p2;
672 {
673 struct shmmap_state *shmmap_s;
674 size_t size;
675 int i;
676
677 if (!shm_inited)
678 return;
679 size = shminfo.shmseg * sizeof(struct shmmap_state);
680 shmmap_s = (struct shmmap_state *)_MALLOC(size, M_SHM, M_WAITOK);
681 bcopy((caddr_t)p1->vm_shm, (caddr_t)shmmap_s, size);
682 p2->vm_shm = (caddr_t)shmmap_s;
683 for (i = 0; i < shminfo.shmseg; i++, shmmap_s++)
684 if (shmmap_s->shmid != -1)
685 shmsegs[IPCID_TO_IX(shmmap_s->shmid)].shm_nattch++;
686 }
687
688 void
689 shmexit(p)
690 struct proc *p;
691 {
692 struct shmmap_state *shmmap_s;
693 int i;
694
695 shmmap_s = (struct shmmap_state *)p->vm_shm;
696 for (i = 0; i < shminfo.shmseg; i++, shmmap_s++)
697 if (shmmap_s->shmid != -1)
698 shm_delete_mapping(p, shmmap_s, 1);
699 FREE((caddr_t)p->vm_shm, M_SHM);
700 p->vm_shm = NULL;
701 }
702
703 /*
704 * shmexec() is like shmexit(), only it doesn't delete the mappings,
705 * since the old address space has already been destroyed and the new
706 * one instantiated. Instead, it just does the housekeeping work we
707 * need to do to keep the System V shared memory subsystem sane.
708 */
709 __private_extern__ void
710 shmexec(p)
711 struct proc *p;
712 {
713 struct shmmap_state *shmmap_s;
714 int i;
715
716 shmmap_s = (struct shmmap_state *)p->vm_shm;
717 for (i = 0; i < shminfo.shmseg; i++, shmmap_s++)
718 if (shmmap_s->shmid != -1)
719 shm_delete_mapping(p, shmmap_s, 0);
720 FREE((caddr_t)p->vm_shm, M_SHM);
721 p->vm_shm = NULL;
722 }
723
724 void
725 shminit(dummy)
726 void *dummy;
727 {
728 int i;
729 int s;
730
731 if (!shm_inited) {
732 s = sizeof(struct shmid_ds) * shminfo.shmmni;
733
734 MALLOC(shmsegs, struct shmid_ds *, s,
735 M_SHM, M_WAITOK);
736 for (i = 0; i < shminfo.shmmni; i++) {
737 shmsegs[i].shm_perm.mode = SHMSEG_FREE;
738 shmsegs[i].shm_perm.seq = 0;
739 }
740 shm_last_free = 0;
741 shm_nused = 0;
742 shm_committed = 0;
743 shm_inited = 1;
744 }
745 }
746
747 /* (struct sysctl_oid *oidp, void *arg1, int arg2, \
748 struct sysctl_req *req) */
749 static int
750 sysctl_shminfo SYSCTL_HANDLER_ARGS
751 {
752 int error = 0;
753
754 error = SYSCTL_OUT(req, arg1, sizeof(int));
755 if (error || !req->newptr)
756 return(error);
757
758 /* Set the values only if shared memory is not initialised */
759 if (!shm_inited) {
760 if (error = SYSCTL_IN(req, arg1, sizeof(int)))
761 return(error);
762 if (arg1 == &shminfo.shmmax) {
763 if (shminfo.shmmax & PAGE_MASK) {
764 shminfo.shmmax = -1;
765 return(EINVAL);
766 }
767 }
768
769 /* Initialize only when all values are set */
770 if ((shminfo.shmmax != -1) &&
771 (shminfo.shmmin != -1) &&
772 (shminfo.shmmni != -1) &&
773 (shminfo.shmseg != -1) &&
774 (shminfo.shmall != -1)) {
775 shminit(NULL);
776 }
777 }
778 return(0);
779 }
780
781 SYSCTL_NODE(_kern, KERN_SYSV, sysv, CTLFLAG_RW, 0, "SYSV");
782
783 SYSCTL_PROC(_kern_sysv, KSYSV_SHMMAX, shmmax, CTLTYPE_INT | CTLFLAG_RW,
784 &shminfo.shmmax, 0, &sysctl_shminfo ,"I","shmmax");
785
786 SYSCTL_PROC(_kern_sysv, KSYSV_SHMMIN, shmmin, CTLTYPE_INT | CTLFLAG_RW,
787 &shminfo.shmmin, 0, &sysctl_shminfo ,"I","shmmin");
788
789 SYSCTL_PROC(_kern_sysv, KSYSV_SHMMNI, shmmni, CTLTYPE_INT | CTLFLAG_RW,
790 &shminfo.shmmni, 0, &sysctl_shminfo ,"I","shmmni");
791
792 SYSCTL_PROC(_kern_sysv, KSYSV_SHMSEG, shmseg, CTLTYPE_INT | CTLFLAG_RW,
793 &shminfo.shmseg, 0, &sysctl_shminfo ,"I","shmseg");
794
795 SYSCTL_PROC(_kern_sysv, KSYSV_SHMALL, shmall, CTLTYPE_INT | CTLFLAG_RW,
796 &shminfo.shmall, 0, &sysctl_shminfo ,"I","shmall");
797
798
Cache object: f4c644eb0d7d6767bfa3108a8cd276de
|