FreeBSD/Linux Kernel Cross Reference
sys/kern/sysv_shm.c
1 /* $FreeBSD: releng/5.0/sys/kern/sysv_shm.c 125489 2004-02-05 18:01:26Z nectar $ */
2 /* $NetBSD: sysv_shm.c,v 1.23 1994/07/04 23:25:12 glass Exp $ */
3
4 /*
5 * Copyright (c) 1994 Adam Glass and Charles Hannum. All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed by Adam Glass and Charles
18 * Hannum.
19 * 4. The names of the authors may not be used to endorse or promote products
20 * derived from this software without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25 * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34 #include "opt_compat.h"
35 #include "opt_sysvipc.h"
36
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/kernel.h>
40 #include <sys/lock.h>
41 #include <sys/sysctl.h>
42 #include <sys/shm.h>
43 #include <sys/proc.h>
44 #include <sys/malloc.h>
45 #include <sys/mman.h>
46 #include <sys/mutex.h>
47 #include <sys/stat.h>
48 #include <sys/syscall.h>
49 #include <sys/sysent.h>
50 #include <sys/sysproto.h>
51 #include <sys/jail.h>
52
53 #include <vm/vm.h>
54 #include <vm/vm_param.h>
55 #include <vm/pmap.h>
56 #include <vm/vm_object.h>
57 #include <vm/vm_map.h>
58 #include <vm/vm_page.h>
59 #include <vm/vm_pager.h>
60
61 static MALLOC_DEFINE(M_SHM, "shm", "SVID compatible shared memory segments");
62
63 struct oshmctl_args;
64 static int oshmctl(struct thread *td, struct oshmctl_args *uap);
65
66 static int shmget_allocate_segment(struct thread *td,
67 struct shmget_args *uap, int mode);
68 static int shmget_existing(struct thread *td, struct shmget_args *uap,
69 int mode, int segnum);
70
71 /* XXX casting to (sy_call_t *) is bogus, as usual. */
72 static sy_call_t *shmcalls[] = {
73 (sy_call_t *)shmat, (sy_call_t *)oshmctl,
74 (sy_call_t *)shmdt, (sy_call_t *)shmget,
75 (sy_call_t *)shmctl
76 };
77
78 #define SHMSEG_FREE 0x0200
79 #define SHMSEG_REMOVED 0x0400
80 #define SHMSEG_ALLOCATED 0x0800
81 #define SHMSEG_WANTED 0x1000
82
83 static int shm_last_free, shm_nused, shm_committed, shmalloced;
84 static struct shmid_ds *shmsegs;
85
86 struct shm_handle {
87 /* vm_offset_t kva; */
88 vm_object_t shm_object;
89 };
90
91 struct shmmap_state {
92 vm_offset_t va;
93 int shmid;
94 };
95
96 static void shm_deallocate_segment(struct shmid_ds *);
97 static int shm_find_segment_by_key(key_t);
98 static struct shmid_ds *shm_find_segment_by_shmid(int);
99 static struct shmid_ds *shm_find_segment_by_shmidx(int);
100 static int shm_delete_mapping(struct proc *p, struct shmmap_state *);
101 static void shmrealloc(void);
102 static void shminit(void);
103 static int sysvshm_modload(struct module *, int, void *);
104 static int shmunload(void);
105 static void shmexit_myhook(struct proc *p);
106 static void shmfork_myhook(struct proc *p1, struct proc *p2);
107 static int sysctl_shmsegs(SYSCTL_HANDLER_ARGS);
108
109 /*
110 * Tuneable values.
111 */
112 #ifndef SHMMAXPGS
113 #define SHMMAXPGS 8192 /* Note: sysv shared memory is swap backed. */
114 #endif
115 #ifndef SHMMAX
116 #define SHMMAX (SHMMAXPGS*PAGE_SIZE)
117 #endif
118 #ifndef SHMMIN
119 #define SHMMIN 1
120 #endif
121 #ifndef SHMMNI
122 #define SHMMNI 192
123 #endif
124 #ifndef SHMSEG
125 #define SHMSEG 128
126 #endif
127 #ifndef SHMALL
128 #define SHMALL (SHMMAXPGS)
129 #endif
130
131 struct shminfo shminfo = {
132 SHMMAX,
133 SHMMIN,
134 SHMMNI,
135 SHMSEG,
136 SHMALL
137 };
138
139 static int shm_use_phys;
140
141 SYSCTL_DECL(_kern_ipc);
142 SYSCTL_INT(_kern_ipc, OID_AUTO, shmmax, CTLFLAG_RW, &shminfo.shmmax, 0, "");
143 SYSCTL_INT(_kern_ipc, OID_AUTO, shmmin, CTLFLAG_RW, &shminfo.shmmin, 0, "");
144 SYSCTL_INT(_kern_ipc, OID_AUTO, shmmni, CTLFLAG_RD, &shminfo.shmmni, 0, "");
145 SYSCTL_INT(_kern_ipc, OID_AUTO, shmseg, CTLFLAG_RD, &shminfo.shmseg, 0, "");
146 SYSCTL_INT(_kern_ipc, OID_AUTO, shmall, CTLFLAG_RW, &shminfo.shmall, 0, "");
147 SYSCTL_INT(_kern_ipc, OID_AUTO, shm_use_phys, CTLFLAG_RW,
148 &shm_use_phys, 0, "");
149 SYSCTL_PROC(_kern_ipc, OID_AUTO, shmsegs, CTLFLAG_RD,
150 NULL, 0, sysctl_shmsegs, "", "");
151
152 static int
153 shm_find_segment_by_key(key)
154 key_t key;
155 {
156 int i;
157
158 for (i = 0; i < shmalloced; i++)
159 if ((shmsegs[i].shm_perm.mode & SHMSEG_ALLOCATED) &&
160 shmsegs[i].shm_perm.key == key)
161 return (i);
162 return (-1);
163 }
164
165 static struct shmid_ds *
166 shm_find_segment_by_shmid(shmid)
167 int shmid;
168 {
169 int segnum;
170 struct shmid_ds *shmseg;
171
172 segnum = IPCID_TO_IX(shmid);
173 if (segnum < 0 || segnum >= shmalloced)
174 return (NULL);
175 shmseg = &shmsegs[segnum];
176 if ((shmseg->shm_perm.mode & (SHMSEG_ALLOCATED | SHMSEG_REMOVED))
177 != SHMSEG_ALLOCATED ||
178 shmseg->shm_perm.seq != IPCID_TO_SEQ(shmid))
179 return (NULL);
180 return (shmseg);
181 }
182
183 static struct shmid_ds *
184 shm_find_segment_by_shmidx(int segnum)
185 {
186 struct shmid_ds *shmseg;
187
188 if (segnum < 0 || segnum >= shmalloced)
189 return (NULL);
190 shmseg = &shmsegs[segnum];
191 if ((shmseg->shm_perm.mode & (SHMSEG_ALLOCATED | SHMSEG_REMOVED))
192 != SHMSEG_ALLOCATED )
193 return (NULL);
194 return (shmseg);
195 }
196
197 static void
198 shm_deallocate_segment(shmseg)
199 struct shmid_ds *shmseg;
200 {
201 struct shm_handle *shm_handle;
202 size_t size;
203
204 GIANT_REQUIRED;
205
206 shm_handle = shmseg->shm_internal;
207 vm_object_deallocate(shm_handle->shm_object);
208 free(shm_handle, M_SHM);
209 shmseg->shm_internal = NULL;
210 size = round_page(shmseg->shm_segsz);
211 shm_committed -= btoc(size);
212 shm_nused--;
213 shmseg->shm_perm.mode = SHMSEG_FREE;
214 }
215
216 static int
217 shm_delete_mapping(p, shmmap_s)
218 struct proc *p;
219 struct shmmap_state *shmmap_s;
220 {
221 struct shmid_ds *shmseg;
222 int segnum, result;
223 size_t size;
224
225 GIANT_REQUIRED;
226
227 segnum = IPCID_TO_IX(shmmap_s->shmid);
228 shmseg = &shmsegs[segnum];
229 size = round_page(shmseg->shm_segsz);
230 result = vm_map_remove(&p->p_vmspace->vm_map, shmmap_s->va,
231 shmmap_s->va + size);
232 if (result != KERN_SUCCESS)
233 return (EINVAL);
234 shmmap_s->shmid = -1;
235 shmseg->shm_dtime = time_second;
236 if ((--shmseg->shm_nattch <= 0) &&
237 (shmseg->shm_perm.mode & SHMSEG_REMOVED)) {
238 shm_deallocate_segment(shmseg);
239 shm_last_free = segnum;
240 }
241 return (0);
242 }
243
244 #ifndef _SYS_SYSPROTO_H_
245 struct shmdt_args {
246 void *shmaddr;
247 };
248 #endif
249
250 /*
251 * MPSAFE
252 */
253 int
254 shmdt(td, uap)
255 struct thread *td;
256 struct shmdt_args *uap;
257 {
258 struct proc *p = td->td_proc;
259 struct shmmap_state *shmmap_s;
260 int i;
261 int error = 0;
262
263 if (!jail_sysvipc_allowed && jailed(td->td_ucred))
264 return (ENOSYS);
265 mtx_lock(&Giant);
266 shmmap_s = p->p_vmspace->vm_shm;
267 if (shmmap_s == NULL) {
268 error = EINVAL;
269 goto done2;
270 }
271 for (i = 0; i < shminfo.shmseg; i++, shmmap_s++) {
272 if (shmmap_s->shmid != -1 &&
273 shmmap_s->va == (vm_offset_t)uap->shmaddr) {
274 break;
275 }
276 }
277 if (i == shminfo.shmseg) {
278 error = EINVAL;
279 goto done2;
280 }
281 error = shm_delete_mapping(p, shmmap_s);
282 done2:
283 mtx_unlock(&Giant);
284 return (error);
285 }
286
287 #ifndef _SYS_SYSPROTO_H_
288 struct shmat_args {
289 int shmid;
290 void *shmaddr;
291 int shmflg;
292 };
293 #endif
294
295 /*
296 * MPSAFE
297 */
298 int
299 shmat(td, uap)
300 struct thread *td;
301 struct shmat_args *uap;
302 {
303 struct proc *p = td->td_proc;
304 int i, flags;
305 struct shmid_ds *shmseg;
306 struct shmmap_state *shmmap_s = NULL;
307 struct shm_handle *shm_handle;
308 vm_offset_t attach_va;
309 vm_prot_t prot;
310 vm_size_t size;
311 int rv;
312 int error = 0;
313
314 if (!jail_sysvipc_allowed && jailed(td->td_ucred))
315 return (ENOSYS);
316 mtx_lock(&Giant);
317 shmmap_s = p->p_vmspace->vm_shm;
318 if (shmmap_s == NULL) {
319 size = shminfo.shmseg * sizeof(struct shmmap_state);
320 shmmap_s = malloc(size, M_SHM, M_WAITOK);
321 for (i = 0; i < shminfo.shmseg; i++)
322 shmmap_s[i].shmid = -1;
323 p->p_vmspace->vm_shm = shmmap_s;
324 }
325 shmseg = shm_find_segment_by_shmid(uap->shmid);
326 if (shmseg == NULL) {
327 error = EINVAL;
328 goto done2;
329 }
330 error = ipcperm(td, &shmseg->shm_perm,
331 (uap->shmflg & SHM_RDONLY) ? IPC_R : IPC_R|IPC_W);
332 if (error)
333 goto done2;
334 for (i = 0; i < shminfo.shmseg; i++) {
335 if (shmmap_s->shmid == -1)
336 break;
337 shmmap_s++;
338 }
339 if (i >= shminfo.shmseg) {
340 error = EMFILE;
341 goto done2;
342 }
343 size = round_page(shmseg->shm_segsz);
344 #ifdef VM_PROT_READ_IS_EXEC
345 prot = VM_PROT_READ | VM_PROT_EXECUTE;
346 #else
347 prot = VM_PROT_READ;
348 #endif
349 if ((uap->shmflg & SHM_RDONLY) == 0)
350 prot |= VM_PROT_WRITE;
351 flags = MAP_ANON | MAP_SHARED;
352 if (uap->shmaddr) {
353 flags |= MAP_FIXED;
354 if (uap->shmflg & SHM_RND) {
355 attach_va = (vm_offset_t)uap->shmaddr & ~(SHMLBA-1);
356 } else if (((vm_offset_t)uap->shmaddr & (SHMLBA-1)) == 0) {
357 attach_va = (vm_offset_t)uap->shmaddr;
358 } else {
359 error = EINVAL;
360 goto done2;
361 }
362 } else {
363 /*
364 * This is just a hint to vm_map_find() about where to
365 * put it.
366 */
367 attach_va = round_page((vm_offset_t)p->p_vmspace->vm_taddr
368 + maxtsiz + maxdsiz);
369 }
370
371 shm_handle = shmseg->shm_internal;
372 vm_object_reference(shm_handle->shm_object);
373 rv = vm_map_find(&p->p_vmspace->vm_map, shm_handle->shm_object,
374 0, &attach_va, size, (flags & MAP_FIXED)?0:1, prot, prot, 0);
375 if (rv != KERN_SUCCESS) {
376 vm_object_deallocate(shm_handle->shm_object);
377 error = ENOMEM;
378 goto done2;
379 }
380 vm_map_inherit(&p->p_vmspace->vm_map,
381 attach_va, attach_va + size, VM_INHERIT_SHARE);
382
383 shmmap_s->va = attach_va;
384 shmmap_s->shmid = uap->shmid;
385 shmseg->shm_lpid = p->p_pid;
386 shmseg->shm_atime = time_second;
387 shmseg->shm_nattch++;
388 td->td_retval[0] = attach_va;
389 done2:
390 mtx_unlock(&Giant);
391 return (error);
392 }
393
394 struct oshmid_ds {
395 struct ipc_perm shm_perm; /* operation perms */
396 int shm_segsz; /* size of segment (bytes) */
397 ushort shm_cpid; /* pid, creator */
398 ushort shm_lpid; /* pid, last operation */
399 short shm_nattch; /* no. of current attaches */
400 time_t shm_atime; /* last attach time */
401 time_t shm_dtime; /* last detach time */
402 time_t shm_ctime; /* last change time */
403 void *shm_handle; /* internal handle for shm segment */
404 };
405
406 struct oshmctl_args {
407 int shmid;
408 int cmd;
409 struct oshmid_ds *ubuf;
410 };
411
412 /*
413 * MPSAFE
414 */
415 static int
416 oshmctl(td, uap)
417 struct thread *td;
418 struct oshmctl_args *uap;
419 {
420 #ifdef COMPAT_43
421 int error = 0;
422 struct shmid_ds *shmseg;
423 struct oshmid_ds outbuf;
424
425 if (!jail_sysvipc_allowed && jailed(td->td_ucred))
426 return (ENOSYS);
427 mtx_lock(&Giant);
428 shmseg = shm_find_segment_by_shmid(uap->shmid);
429 if (shmseg == NULL) {
430 error = EINVAL;
431 goto done2;
432 }
433 switch (uap->cmd) {
434 case IPC_STAT:
435 error = ipcperm(td, &shmseg->shm_perm, IPC_R);
436 if (error)
437 goto done2;
438 outbuf.shm_perm = shmseg->shm_perm;
439 outbuf.shm_segsz = shmseg->shm_segsz;
440 outbuf.shm_cpid = shmseg->shm_cpid;
441 outbuf.shm_lpid = shmseg->shm_lpid;
442 outbuf.shm_nattch = shmseg->shm_nattch;
443 outbuf.shm_atime = shmseg->shm_atime;
444 outbuf.shm_dtime = shmseg->shm_dtime;
445 outbuf.shm_ctime = shmseg->shm_ctime;
446 outbuf.shm_handle = shmseg->shm_internal;
447 error = copyout(&outbuf, uap->ubuf, sizeof(outbuf));
448 if (error)
449 goto done2;
450 break;
451 default:
452 /* XXX casting to (sy_call_t *) is bogus, as usual. */
453 error = ((sy_call_t *)shmctl)(td, uap);
454 break;
455 }
456 done2:
457 mtx_unlock(&Giant);
458 return (error);
459 #else
460 return (EINVAL);
461 #endif
462 }
463
464 #ifndef _SYS_SYSPROTO_H_
465 struct shmctl_args {
466 int shmid;
467 int cmd;
468 struct shmid_ds *buf;
469 };
470 #endif
471
472 /*
473 * MPSAFE
474 */
475 int
476 shmctl(td, uap)
477 struct thread *td;
478 struct shmctl_args *uap;
479 {
480 int error = 0;
481 struct shmid_ds inbuf;
482 struct shmid_ds *shmseg;
483
484 if (!jail_sysvipc_allowed && jailed(td->td_ucred))
485 return (ENOSYS);
486 mtx_lock(&Giant);
487 switch (uap->cmd) {
488 case IPC_INFO:
489 error = copyout(&shminfo, uap->buf, sizeof(shminfo));
490 if (error)
491 goto done2;
492 td->td_retval[0] = shmalloced;
493 goto done2;
494 case SHM_INFO: {
495 struct shm_info shm_info;
496 shm_info.used_ids = shm_nused;
497 shm_info.shm_rss = 0; /*XXX where to get from ? */
498 shm_info.shm_tot = 0; /*XXX where to get from ? */
499 shm_info.shm_swp = 0; /*XXX where to get from ? */
500 shm_info.swap_attempts = 0; /*XXX where to get from ? */
501 shm_info.swap_successes = 0; /*XXX where to get from ? */
502 error = copyout(&shm_info, uap->buf, sizeof(shm_info));
503 if (error)
504 goto done2;
505 td->td_retval[0] = shmalloced;
506 goto done2;
507 }
508 }
509 if( (uap->cmd) == SHM_STAT )
510 shmseg = shm_find_segment_by_shmidx(uap->shmid);
511 else
512 shmseg = shm_find_segment_by_shmid(uap->shmid);
513 if (shmseg == NULL) {
514 error = EINVAL;
515 goto done2;
516 }
517 switch (uap->cmd) {
518 case SHM_STAT:
519 case IPC_STAT:
520 error = ipcperm(td, &shmseg->shm_perm, IPC_R);
521 if (error)
522 goto done2;
523 error = copyout(shmseg, uap->buf, sizeof(inbuf));
524 if (error)
525 goto done2;
526 else if( (uap->cmd) == SHM_STAT )
527 td->td_retval[0] = IXSEQ_TO_IPCID( uap->shmid, shmseg->shm_perm );
528 break;
529 case IPC_SET:
530 error = ipcperm(td, &shmseg->shm_perm, IPC_M);
531 if (error)
532 goto done2;
533 error = copyin(uap->buf, &inbuf, sizeof(inbuf));
534 if (error)
535 goto done2;
536 shmseg->shm_perm.uid = inbuf.shm_perm.uid;
537 shmseg->shm_perm.gid = inbuf.shm_perm.gid;
538 shmseg->shm_perm.mode =
539 (shmseg->shm_perm.mode & ~ACCESSPERMS) |
540 (inbuf.shm_perm.mode & ACCESSPERMS);
541 shmseg->shm_ctime = time_second;
542 break;
543 case IPC_RMID:
544 error = ipcperm(td, &shmseg->shm_perm, IPC_M);
545 if (error)
546 goto done2;
547 shmseg->shm_perm.key = IPC_PRIVATE;
548 shmseg->shm_perm.mode |= SHMSEG_REMOVED;
549 if (shmseg->shm_nattch <= 0) {
550 shm_deallocate_segment(shmseg);
551 shm_last_free = IPCID_TO_IX(uap->shmid);
552 }
553 break;
554 #if 0
555 case SHM_LOCK:
556 case SHM_UNLOCK:
557 #endif
558 default:
559 error = EINVAL;
560 break;
561 }
562 done2:
563 mtx_unlock(&Giant);
564 return (error);
565 }
566
567 #ifndef _SYS_SYSPROTO_H_
568 struct shmget_args {
569 key_t key;
570 size_t size;
571 int shmflg;
572 };
573 #endif
574
575 static int
576 shmget_existing(td, uap, mode, segnum)
577 struct thread *td;
578 struct shmget_args *uap;
579 int mode;
580 int segnum;
581 {
582 struct shmid_ds *shmseg;
583 int error;
584
585 shmseg = &shmsegs[segnum];
586 if (shmseg->shm_perm.mode & SHMSEG_REMOVED) {
587 /*
588 * This segment is in the process of being allocated. Wait
589 * until it's done, and look the key up again (in case the
590 * allocation failed or it was freed).
591 */
592 shmseg->shm_perm.mode |= SHMSEG_WANTED;
593 error = tsleep(shmseg, PLOCK | PCATCH, "shmget", 0);
594 if (error)
595 return (error);
596 return (EAGAIN);
597 }
598 if ((uap->shmflg & (IPC_CREAT | IPC_EXCL)) == (IPC_CREAT | IPC_EXCL))
599 return (EEXIST);
600 error = ipcperm(td, &shmseg->shm_perm, mode);
601 if (error)
602 return (error);
603 if (uap->size && uap->size > shmseg->shm_segsz)
604 return (EINVAL);
605 td->td_retval[0] = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm);
606 return (0);
607 }
608
609 static int
610 shmget_allocate_segment(td, uap, mode)
611 struct thread *td;
612 struct shmget_args *uap;
613 int mode;
614 {
615 int i, segnum, shmid, size;
616 struct ucred *cred = td->td_ucred;
617 struct shmid_ds *shmseg;
618 struct shm_handle *shm_handle;
619
620 GIANT_REQUIRED;
621
622 if (uap->size < shminfo.shmmin || uap->size > shminfo.shmmax)
623 return (EINVAL);
624 if (shm_nused >= shminfo.shmmni) /* Any shmids left? */
625 return (ENOSPC);
626 size = round_page(uap->size);
627 if (shm_committed + btoc(size) > shminfo.shmall)
628 return (ENOMEM);
629 if (shm_last_free < 0) {
630 shmrealloc(); /* Maybe expand the shmsegs[] array. */
631 for (i = 0; i < shmalloced; i++)
632 if (shmsegs[i].shm_perm.mode & SHMSEG_FREE)
633 break;
634 if (i == shmalloced)
635 return (ENOSPC);
636 segnum = i;
637 } else {
638 segnum = shm_last_free;
639 shm_last_free = -1;
640 }
641 shmseg = &shmsegs[segnum];
642 /*
643 * In case we sleep in malloc(), mark the segment present but deleted
644 * so that noone else tries to create the same key.
645 */
646 shmseg->shm_perm.mode = SHMSEG_ALLOCATED | SHMSEG_REMOVED;
647 shmseg->shm_perm.key = uap->key;
648 shmseg->shm_perm.seq = (shmseg->shm_perm.seq + 1) & 0x7fff;
649 shm_handle = (struct shm_handle *)
650 malloc(sizeof(struct shm_handle), M_SHM, M_WAITOK);
651 shmid = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm);
652
653 /*
654 * We make sure that we have allocated a pager before we need
655 * to.
656 */
657 if (shm_use_phys) {
658 shm_handle->shm_object =
659 vm_pager_allocate(OBJT_PHYS, 0, size, VM_PROT_DEFAULT, 0);
660 } else {
661 shm_handle->shm_object =
662 vm_pager_allocate(OBJT_SWAP, 0, size, VM_PROT_DEFAULT, 0);
663 }
664 vm_object_clear_flag(shm_handle->shm_object, OBJ_ONEMAPPING);
665 vm_object_set_flag(shm_handle->shm_object, OBJ_NOSPLIT);
666
667 shmseg->shm_internal = shm_handle;
668 shmseg->shm_perm.cuid = shmseg->shm_perm.uid = cred->cr_uid;
669 shmseg->shm_perm.cgid = shmseg->shm_perm.gid = cred->cr_gid;
670 shmseg->shm_perm.mode = (shmseg->shm_perm.mode & SHMSEG_WANTED) |
671 (mode & ACCESSPERMS) | SHMSEG_ALLOCATED;
672 shmseg->shm_segsz = uap->size;
673 shmseg->shm_cpid = td->td_proc->p_pid;
674 shmseg->shm_lpid = shmseg->shm_nattch = 0;
675 shmseg->shm_atime = shmseg->shm_dtime = 0;
676 shmseg->shm_ctime = time_second;
677 shm_committed += btoc(size);
678 shm_nused++;
679 if (shmseg->shm_perm.mode & SHMSEG_WANTED) {
680 /*
681 * Somebody else wanted this key while we were asleep. Wake
682 * them up now.
683 */
684 shmseg->shm_perm.mode &= ~SHMSEG_WANTED;
685 wakeup(shmseg);
686 }
687 td->td_retval[0] = shmid;
688 return (0);
689 }
690
691 /*
692 * MPSAFE
693 */
694 int
695 shmget(td, uap)
696 struct thread *td;
697 struct shmget_args *uap;
698 {
699 int segnum, mode;
700 int error;
701
702 if (!jail_sysvipc_allowed && jailed(td->td_ucred))
703 return (ENOSYS);
704 mtx_lock(&Giant);
705 mode = uap->shmflg & ACCESSPERMS;
706 if (uap->key != IPC_PRIVATE) {
707 again:
708 segnum = shm_find_segment_by_key(uap->key);
709 if (segnum >= 0) {
710 error = shmget_existing(td, uap, mode, segnum);
711 if (error == EAGAIN)
712 goto again;
713 goto done2;
714 }
715 if ((uap->shmflg & IPC_CREAT) == 0) {
716 error = ENOENT;
717 goto done2;
718 }
719 }
720 error = shmget_allocate_segment(td, uap, mode);
721 done2:
722 mtx_unlock(&Giant);
723 return (error);
724 }
725
726 /*
727 * MPSAFE
728 */
729 int
730 shmsys(td, uap)
731 struct thread *td;
732 /* XXX actually varargs. */
733 struct shmsys_args /* {
734 u_int which;
735 int a2;
736 int a3;
737 int a4;
738 } */ *uap;
739 {
740 int error;
741
742 if (!jail_sysvipc_allowed && jailed(td->td_ucred))
743 return (ENOSYS);
744 if (uap->which >= sizeof(shmcalls)/sizeof(shmcalls[0]))
745 return (EINVAL);
746 mtx_lock(&Giant);
747 error = (*shmcalls[uap->which])(td, &uap->a2);
748 mtx_unlock(&Giant);
749 return (error);
750 }
751
752 static void
753 shmfork_myhook(p1, p2)
754 struct proc *p1, *p2;
755 {
756 struct shmmap_state *shmmap_s;
757 size_t size;
758 int i;
759
760 size = shminfo.shmseg * sizeof(struct shmmap_state);
761 shmmap_s = malloc(size, M_SHM, M_WAITOK);
762 bcopy(p1->p_vmspace->vm_shm, shmmap_s, size);
763 p2->p_vmspace->vm_shm = shmmap_s;
764 for (i = 0; i < shminfo.shmseg; i++, shmmap_s++)
765 if (shmmap_s->shmid != -1)
766 shmsegs[IPCID_TO_IX(shmmap_s->shmid)].shm_nattch++;
767 }
768
769 static void
770 shmexit_myhook(p)
771 struct proc *p;
772 {
773 struct shmmap_state *shmmap_s;
774 int i;
775
776 GIANT_REQUIRED;
777
778 shmmap_s = p->p_vmspace->vm_shm;
779 for (i = 0; i < shminfo.shmseg; i++, shmmap_s++)
780 if (shmmap_s->shmid != -1)
781 shm_delete_mapping(p, shmmap_s);
782 free(p->p_vmspace->vm_shm, M_SHM);
783 p->p_vmspace->vm_shm = NULL;
784 }
785
786 static void
787 shmrealloc(void)
788 {
789 int i;
790 struct shmid_ds *newsegs;
791
792 if (shmalloced >= shminfo.shmmni)
793 return;
794
795 newsegs = malloc(shminfo.shmmni * sizeof(*newsegs), M_SHM, M_WAITOK);
796 if (newsegs == NULL)
797 return;
798 for (i = 0; i < shmalloced; i++)
799 bcopy(&shmsegs[i], &newsegs[i], sizeof(newsegs[0]));
800 for (; i < shminfo.shmmni; i++) {
801 shmsegs[i].shm_perm.mode = SHMSEG_FREE;
802 shmsegs[i].shm_perm.seq = 0;
803 }
804 free(shmsegs, M_SHM);
805 shmsegs = newsegs;
806 shmalloced = shminfo.shmmni;
807 }
808
809 static void
810 shminit()
811 {
812 int i;
813
814 TUNABLE_INT_FETCH("kern.ipc.shmmaxpgs", &shminfo.shmall);
815 shminfo.shmmax = shminfo.shmall * PAGE_SIZE;
816 TUNABLE_INT_FETCH("kern.ipc.shmmin", &shminfo.shmmin);
817 TUNABLE_INT_FETCH("kern.ipc.shmmni", &shminfo.shmmni);
818 TUNABLE_INT_FETCH("kern.ipc.shmseg", &shminfo.shmseg);
819 TUNABLE_INT_FETCH("kern.ipc.shm_use_phys", &shm_use_phys);
820
821 shmalloced = shminfo.shmmni;
822 shmsegs = malloc(shmalloced * sizeof(shmsegs[0]), M_SHM, M_WAITOK);
823 if (shmsegs == NULL)
824 panic("cannot allocate initial memory for sysvshm");
825 for (i = 0; i < shmalloced; i++) {
826 shmsegs[i].shm_perm.mode = SHMSEG_FREE;
827 shmsegs[i].shm_perm.seq = 0;
828 }
829 shm_last_free = 0;
830 shm_nused = 0;
831 shm_committed = 0;
832 shmexit_hook = &shmexit_myhook;
833 shmfork_hook = &shmfork_myhook;
834 }
835
836 static int
837 shmunload()
838 {
839
840 if (shm_nused > 0)
841 return (EBUSY);
842
843 free(shmsegs, M_SHM);
844 shmexit_hook = NULL;
845 shmfork_hook = NULL;
846 return (0);
847 }
848
849 static int
850 sysctl_shmsegs(SYSCTL_HANDLER_ARGS)
851 {
852
853 return (SYSCTL_OUT(req, shmsegs, shmalloced * sizeof(shmsegs[0])));
854 }
855
856 static int
857 sysvshm_modload(struct module *module, int cmd, void *arg)
858 {
859 int error = 0;
860
861 switch (cmd) {
862 case MOD_LOAD:
863 shminit();
864 break;
865 case MOD_UNLOAD:
866 error = shmunload();
867 break;
868 case MOD_SHUTDOWN:
869 break;
870 default:
871 error = EINVAL;
872 break;
873 }
874 return (error);
875 }
876
877 static moduledata_t sysvshm_mod = {
878 "sysvshm",
879 &sysvshm_modload,
880 NULL
881 };
882
883 SYSCALL_MODULE_HELPER(shmsys);
884 SYSCALL_MODULE_HELPER(shmat);
885 SYSCALL_MODULE_HELPER(shmctl);
886 SYSCALL_MODULE_HELPER(shmdt);
887 SYSCALL_MODULE_HELPER(shmget);
888
889 DECLARE_MODULE(sysvshm, sysvshm_mod,
890 SI_SUB_SYSV_SHM, SI_ORDER_FIRST);
891 MODULE_VERSION(sysvshm, 1);
Cache object: 310e9c464dd939fa042c7d6aedb26afd
|