FreeBSD/Linux Kernel Cross Reference
sys/kern/sysv_shm.c
1 /* $NetBSD: sysv_shm.c,v 1.23 1994/07/04 23:25:12 glass Exp $ */
2 /*
3 * Copyright (c) 1994 Adam Glass and Charles Hannum. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by Adam Glass and Charles
16 * Hannum.
17 * 4. The names of the authors may not be used to endorse or promote products
18 * derived from this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23 * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
29 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD: releng/5.3/sys/kern/sysv_shm.c 136588 2004-10-16 08:43:07Z cvs2svn $");
34
35 #include "opt_compat.h"
36 #include "opt_sysvipc.h"
37
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/kernel.h>
41 #include <sys/lock.h>
42 #include <sys/sysctl.h>
43 #include <sys/shm.h>
44 #include <sys/proc.h>
45 #include <sys/malloc.h>
46 #include <sys/mman.h>
47 #include <sys/module.h>
48 #include <sys/mutex.h>
49 #include <sys/resourcevar.h>
50 #include <sys/stat.h>
51 #include <sys/syscall.h>
52 #include <sys/syscallsubr.h>
53 #include <sys/sysent.h>
54 #include <sys/sysproto.h>
55 #include <sys/jail.h>
56
57 #include <vm/vm.h>
58 #include <vm/vm_param.h>
59 #include <vm/pmap.h>
60 #include <vm/vm_object.h>
61 #include <vm/vm_map.h>
62 #include <vm/vm_page.h>
63 #include <vm/vm_pager.h>
64
65 static MALLOC_DEFINE(M_SHM, "shm", "SVID compatible shared memory segments");
66
67 struct oshmctl_args;
68 static int oshmctl(struct thread *td, struct oshmctl_args *uap);
69
70 static int shmget_allocate_segment(struct thread *td,
71 struct shmget_args *uap, int mode);
72 static int shmget_existing(struct thread *td, struct shmget_args *uap,
73 int mode, int segnum);
74
75 /* XXX casting to (sy_call_t *) is bogus, as usual. */
76 static sy_call_t *shmcalls[] = {
77 (sy_call_t *)shmat, (sy_call_t *)oshmctl,
78 (sy_call_t *)shmdt, (sy_call_t *)shmget,
79 (sy_call_t *)shmctl
80 };
81
82 #define SHMSEG_FREE 0x0200
83 #define SHMSEG_REMOVED 0x0400
84 #define SHMSEG_ALLOCATED 0x0800
85 #define SHMSEG_WANTED 0x1000
86
87 static int shm_last_free, shm_nused, shm_committed, shmalloced;
88 static struct shmid_ds *shmsegs;
89
90 struct shmmap_state {
91 vm_offset_t va;
92 int shmid;
93 };
94
95 static void shm_deallocate_segment(struct shmid_ds *);
96 static int shm_find_segment_by_key(key_t);
97 static struct shmid_ds *shm_find_segment_by_shmid(int);
98 static struct shmid_ds *shm_find_segment_by_shmidx(int);
99 static int shm_delete_mapping(struct vmspace *vm, struct shmmap_state *);
100 static void shmrealloc(void);
101 static void shminit(void);
102 static int sysvshm_modload(struct module *, int, void *);
103 static int shmunload(void);
104 static void shmexit_myhook(struct vmspace *vm);
105 static void shmfork_myhook(struct proc *p1, struct proc *p2);
106 static int sysctl_shmsegs(SYSCTL_HANDLER_ARGS);
107
108 /*
109 * Tuneable values.
110 */
111 #ifndef SHMMAXPGS
112 #define SHMMAXPGS 8192 /* Note: sysv shared memory is swap backed. */
113 #endif
114 #ifndef SHMMAX
115 #define SHMMAX (SHMMAXPGS*PAGE_SIZE)
116 #endif
117 #ifndef SHMMIN
118 #define SHMMIN 1
119 #endif
120 #ifndef SHMMNI
121 #define SHMMNI 192
122 #endif
123 #ifndef SHMSEG
124 #define SHMSEG 128
125 #endif
126 #ifndef SHMALL
127 #define SHMALL (SHMMAXPGS)
128 #endif
129
130 struct shminfo shminfo = {
131 SHMMAX,
132 SHMMIN,
133 SHMMNI,
134 SHMSEG,
135 SHMALL
136 };
137
138 static int shm_use_phys;
139 static int shm_allow_removed;
140
141 SYSCTL_DECL(_kern_ipc);
142 SYSCTL_INT(_kern_ipc, OID_AUTO, shmmax, CTLFLAG_RW, &shminfo.shmmax, 0, "");
143 SYSCTL_INT(_kern_ipc, OID_AUTO, shmmin, CTLFLAG_RW, &shminfo.shmmin, 0, "");
144 SYSCTL_INT(_kern_ipc, OID_AUTO, shmmni, CTLFLAG_RDTUN, &shminfo.shmmni, 0, "");
145 SYSCTL_INT(_kern_ipc, OID_AUTO, shmseg, CTLFLAG_RDTUN, &shminfo.shmseg, 0, "");
146 SYSCTL_INT(_kern_ipc, OID_AUTO, shmall, CTLFLAG_RW, &shminfo.shmall, 0, "");
147 SYSCTL_INT(_kern_ipc, OID_AUTO, shm_use_phys, CTLFLAG_RW,
148 &shm_use_phys, 0, "");
149 SYSCTL_INT(_kern_ipc, OID_AUTO, shm_allow_removed, CTLFLAG_RW,
150 &shm_allow_removed, 0, "");
151 SYSCTL_PROC(_kern_ipc, OID_AUTO, shmsegs, CTLFLAG_RD,
152 NULL, 0, sysctl_shmsegs, "", "");
153
154 static int
155 shm_find_segment_by_key(key)
156 key_t key;
157 {
158 int i;
159
160 for (i = 0; i < shmalloced; i++)
161 if ((shmsegs[i].shm_perm.mode & SHMSEG_ALLOCATED) &&
162 shmsegs[i].shm_perm.key == key)
163 return (i);
164 return (-1);
165 }
166
167 static struct shmid_ds *
168 shm_find_segment_by_shmid(int shmid)
169 {
170 int segnum;
171 struct shmid_ds *shmseg;
172
173 segnum = IPCID_TO_IX(shmid);
174 if (segnum < 0 || segnum >= shmalloced)
175 return (NULL);
176 shmseg = &shmsegs[segnum];
177 if ((shmseg->shm_perm.mode & SHMSEG_ALLOCATED) == 0 ||
178 (!shm_allow_removed &&
179 (shmseg->shm_perm.mode & SHMSEG_REMOVED) != 0) ||
180 shmseg->shm_perm.seq != IPCID_TO_SEQ(shmid))
181 return (NULL);
182 return (shmseg);
183 }
184
185 static struct shmid_ds *
186 shm_find_segment_by_shmidx(int segnum)
187 {
188 struct shmid_ds *shmseg;
189
190 if (segnum < 0 || segnum >= shmalloced)
191 return (NULL);
192 shmseg = &shmsegs[segnum];
193 if ((shmseg->shm_perm.mode & SHMSEG_ALLOCATED) == 0 ||
194 (!shm_allow_removed &&
195 (shmseg->shm_perm.mode & SHMSEG_REMOVED) != 0))
196 return (NULL);
197 return (shmseg);
198 }
199
200 static void
201 shm_deallocate_segment(shmseg)
202 struct shmid_ds *shmseg;
203 {
204 size_t size;
205
206 GIANT_REQUIRED;
207
208 vm_object_deallocate(shmseg->shm_internal);
209 shmseg->shm_internal = NULL;
210 size = round_page(shmseg->shm_segsz);
211 shm_committed -= btoc(size);
212 shm_nused--;
213 shmseg->shm_perm.mode = SHMSEG_FREE;
214 }
215
216 static int
217 shm_delete_mapping(struct vmspace *vm, struct shmmap_state *shmmap_s)
218 {
219 struct shmid_ds *shmseg;
220 int segnum, result;
221 size_t size;
222
223 GIANT_REQUIRED;
224
225 segnum = IPCID_TO_IX(shmmap_s->shmid);
226 shmseg = &shmsegs[segnum];
227 size = round_page(shmseg->shm_segsz);
228 result = vm_map_remove(&vm->vm_map, shmmap_s->va, shmmap_s->va + size);
229 if (result != KERN_SUCCESS)
230 return (EINVAL);
231 shmmap_s->shmid = -1;
232 shmseg->shm_dtime = time_second;
233 if ((--shmseg->shm_nattch <= 0) &&
234 (shmseg->shm_perm.mode & SHMSEG_REMOVED)) {
235 shm_deallocate_segment(shmseg);
236 shm_last_free = segnum;
237 }
238 return (0);
239 }
240
241 #ifndef _SYS_SYSPROTO_H_
242 struct shmdt_args {
243 const void *shmaddr;
244 };
245 #endif
246
247 /*
248 * MPSAFE
249 */
250 int
251 shmdt(td, uap)
252 struct thread *td;
253 struct shmdt_args *uap;
254 {
255 struct proc *p = td->td_proc;
256 struct shmmap_state *shmmap_s;
257 int i;
258 int error = 0;
259
260 if (!jail_sysvipc_allowed && jailed(td->td_ucred))
261 return (ENOSYS);
262 mtx_lock(&Giant);
263 shmmap_s = p->p_vmspace->vm_shm;
264 if (shmmap_s == NULL) {
265 error = EINVAL;
266 goto done2;
267 }
268 for (i = 0; i < shminfo.shmseg; i++, shmmap_s++) {
269 if (shmmap_s->shmid != -1 &&
270 shmmap_s->va == (vm_offset_t)uap->shmaddr) {
271 break;
272 }
273 }
274 if (i == shminfo.shmseg) {
275 error = EINVAL;
276 goto done2;
277 }
278 error = shm_delete_mapping(p->p_vmspace, shmmap_s);
279 done2:
280 mtx_unlock(&Giant);
281 return (error);
282 }
283
284 #ifndef _SYS_SYSPROTO_H_
285 struct shmat_args {
286 int shmid;
287 const void *shmaddr;
288 int shmflg;
289 };
290 #endif
291
292 /*
293 * MPSAFE
294 */
295 int
296 kern_shmat(td, shmid, shmaddr, shmflg)
297 struct thread *td;
298 int shmid;
299 const void *shmaddr;
300 int shmflg;
301 {
302 struct proc *p = td->td_proc;
303 int i, flags;
304 struct shmid_ds *shmseg;
305 struct shmmap_state *shmmap_s = NULL;
306 vm_offset_t attach_va;
307 vm_prot_t prot;
308 vm_size_t size;
309 int rv;
310 int error = 0;
311
312 if (!jail_sysvipc_allowed && jailed(td->td_ucred))
313 return (ENOSYS);
314 mtx_lock(&Giant);
315 shmmap_s = p->p_vmspace->vm_shm;
316 if (shmmap_s == NULL) {
317 size = shminfo.shmseg * sizeof(struct shmmap_state);
318 shmmap_s = malloc(size, M_SHM, M_WAITOK);
319 for (i = 0; i < shminfo.shmseg; i++)
320 shmmap_s[i].shmid = -1;
321 p->p_vmspace->vm_shm = shmmap_s;
322 }
323 shmseg = shm_find_segment_by_shmid(shmid);
324 if (shmseg == NULL) {
325 error = EINVAL;
326 goto done2;
327 }
328 error = ipcperm(td, &shmseg->shm_perm,
329 (shmflg & SHM_RDONLY) ? IPC_R : IPC_R|IPC_W);
330 if (error)
331 goto done2;
332 for (i = 0; i < shminfo.shmseg; i++) {
333 if (shmmap_s->shmid == -1)
334 break;
335 shmmap_s++;
336 }
337 if (i >= shminfo.shmseg) {
338 error = EMFILE;
339 goto done2;
340 }
341 size = round_page(shmseg->shm_segsz);
342 #ifdef VM_PROT_READ_IS_EXEC
343 prot = VM_PROT_READ | VM_PROT_EXECUTE;
344 #else
345 prot = VM_PROT_READ;
346 #endif
347 if ((shmflg & SHM_RDONLY) == 0)
348 prot |= VM_PROT_WRITE;
349 flags = MAP_ANON | MAP_SHARED;
350 if (shmaddr) {
351 flags |= MAP_FIXED;
352 if (shmflg & SHM_RND) {
353 attach_va = (vm_offset_t)shmaddr & ~(SHMLBA-1);
354 } else if (((vm_offset_t)shmaddr & (SHMLBA-1)) == 0) {
355 attach_va = (vm_offset_t)shmaddr;
356 } else {
357 error = EINVAL;
358 goto done2;
359 }
360 } else {
361 /*
362 * This is just a hint to vm_map_find() about where to
363 * put it.
364 */
365 PROC_LOCK(p);
366 attach_va = round_page((vm_offset_t)p->p_vmspace->vm_daddr +
367 lim_max(p, RLIMIT_DATA));
368 PROC_UNLOCK(p);
369 }
370
371 vm_object_reference(shmseg->shm_internal);
372 rv = vm_map_find(&p->p_vmspace->vm_map, shmseg->shm_internal,
373 0, &attach_va, size, (flags & MAP_FIXED)?0:1, prot, prot, 0);
374 if (rv != KERN_SUCCESS) {
375 vm_object_deallocate(shmseg->shm_internal);
376 error = ENOMEM;
377 goto done2;
378 }
379 vm_map_inherit(&p->p_vmspace->vm_map,
380 attach_va, attach_va + size, VM_INHERIT_SHARE);
381
382 shmmap_s->va = attach_va;
383 shmmap_s->shmid = shmid;
384 shmseg->shm_lpid = p->p_pid;
385 shmseg->shm_atime = time_second;
386 shmseg->shm_nattch++;
387 td->td_retval[0] = attach_va;
388 done2:
389 mtx_unlock(&Giant);
390 return (error);
391 }
392
393 int
394 shmat(td, uap)
395 struct thread *td;
396 struct shmat_args *uap;
397 {
398 return kern_shmat(td, uap->shmid, uap->shmaddr, uap->shmflg);
399 }
400
401 struct oshmid_ds {
402 struct ipc_perm shm_perm; /* operation perms */
403 int shm_segsz; /* size of segment (bytes) */
404 u_short shm_cpid; /* pid, creator */
405 u_short shm_lpid; /* pid, last operation */
406 short shm_nattch; /* no. of current attaches */
407 time_t shm_atime; /* last attach time */
408 time_t shm_dtime; /* last detach time */
409 time_t shm_ctime; /* last change time */
410 void *shm_handle; /* internal handle for shm segment */
411 };
412
413 struct oshmctl_args {
414 int shmid;
415 int cmd;
416 struct oshmid_ds *ubuf;
417 };
418
419 /*
420 * MPSAFE
421 */
422 static int
423 oshmctl(td, uap)
424 struct thread *td;
425 struct oshmctl_args *uap;
426 {
427 #ifdef COMPAT_43
428 int error = 0;
429 struct shmid_ds *shmseg;
430 struct oshmid_ds outbuf;
431
432 if (!jail_sysvipc_allowed && jailed(td->td_ucred))
433 return (ENOSYS);
434 mtx_lock(&Giant);
435 shmseg = shm_find_segment_by_shmid(uap->shmid);
436 if (shmseg == NULL) {
437 error = EINVAL;
438 goto done2;
439 }
440 switch (uap->cmd) {
441 case IPC_STAT:
442 error = ipcperm(td, &shmseg->shm_perm, IPC_R);
443 if (error)
444 goto done2;
445 outbuf.shm_perm = shmseg->shm_perm;
446 outbuf.shm_segsz = shmseg->shm_segsz;
447 outbuf.shm_cpid = shmseg->shm_cpid;
448 outbuf.shm_lpid = shmseg->shm_lpid;
449 outbuf.shm_nattch = shmseg->shm_nattch;
450 outbuf.shm_atime = shmseg->shm_atime;
451 outbuf.shm_dtime = shmseg->shm_dtime;
452 outbuf.shm_ctime = shmseg->shm_ctime;
453 outbuf.shm_handle = shmseg->shm_internal;
454 error = copyout(&outbuf, uap->ubuf, sizeof(outbuf));
455 if (error)
456 goto done2;
457 break;
458 default:
459 error = shmctl(td, (struct shmctl_args *)uap);
460 break;
461 }
462 done2:
463 mtx_unlock(&Giant);
464 return (error);
465 #else
466 return (EINVAL);
467 #endif
468 }
469
470 #ifndef _SYS_SYSPROTO_H_
471 struct shmctl_args {
472 int shmid;
473 int cmd;
474 struct shmid_ds *buf;
475 };
476 #endif
477
478 /*
479 * MPSAFE
480 */
481 int
482 kern_shmctl(td, shmid, cmd, buf, bufsz)
483 struct thread *td;
484 int shmid;
485 int cmd;
486 void *buf;
487 size_t *bufsz;
488 {
489 int error = 0;
490 struct shmid_ds *shmseg;
491
492 if (!jail_sysvipc_allowed && jailed(td->td_ucred))
493 return (ENOSYS);
494
495 mtx_lock(&Giant);
496 switch (cmd) {
497 case IPC_INFO:
498 memcpy(buf, &shminfo, sizeof(shminfo));
499 if (bufsz)
500 *bufsz = sizeof(shminfo);
501 td->td_retval[0] = shmalloced;
502 goto done2;
503 case SHM_INFO: {
504 struct shm_info shm_info;
505 shm_info.used_ids = shm_nused;
506 shm_info.shm_rss = 0; /*XXX where to get from ? */
507 shm_info.shm_tot = 0; /*XXX where to get from ? */
508 shm_info.shm_swp = 0; /*XXX where to get from ? */
509 shm_info.swap_attempts = 0; /*XXX where to get from ? */
510 shm_info.swap_successes = 0; /*XXX where to get from ? */
511 memcpy(buf, &shm_info, sizeof(shm_info));
512 if (bufsz)
513 *bufsz = sizeof(shm_info);
514 td->td_retval[0] = shmalloced;
515 goto done2;
516 }
517 }
518 if (cmd == SHM_STAT)
519 shmseg = shm_find_segment_by_shmidx(shmid);
520 else
521 shmseg = shm_find_segment_by_shmid(shmid);
522 if (shmseg == NULL) {
523 error = EINVAL;
524 goto done2;
525 }
526 switch (cmd) {
527 case SHM_STAT:
528 case IPC_STAT:
529 error = ipcperm(td, &shmseg->shm_perm, IPC_R);
530 if (error)
531 goto done2;
532 memcpy(buf, shmseg, sizeof(struct shmid_ds));
533 if (bufsz)
534 *bufsz = sizeof(struct shmid_ds);
535 if (cmd == SHM_STAT)
536 td->td_retval[0] = IXSEQ_TO_IPCID(shmid, shmseg->shm_perm);
537 break;
538 case IPC_SET: {
539 struct shmid_ds *shmid;
540
541 shmid = (struct shmid_ds *)buf;
542 error = ipcperm(td, &shmseg->shm_perm, IPC_M);
543 if (error)
544 goto done2;
545 shmseg->shm_perm.uid = shmid->shm_perm.uid;
546 shmseg->shm_perm.gid = shmid->shm_perm.gid;
547 shmseg->shm_perm.mode =
548 (shmseg->shm_perm.mode & ~ACCESSPERMS) |
549 (shmid->shm_perm.mode & ACCESSPERMS);
550 shmseg->shm_ctime = time_second;
551 break;
552 }
553 case IPC_RMID:
554 error = ipcperm(td, &shmseg->shm_perm, IPC_M);
555 if (error)
556 goto done2;
557 shmseg->shm_perm.key = IPC_PRIVATE;
558 shmseg->shm_perm.mode |= SHMSEG_REMOVED;
559 if (shmseg->shm_nattch <= 0) {
560 shm_deallocate_segment(shmseg);
561 shm_last_free = IPCID_TO_IX(shmid);
562 }
563 break;
564 #if 0
565 case SHM_LOCK:
566 case SHM_UNLOCK:
567 #endif
568 default:
569 error = EINVAL;
570 break;
571 }
572 done2:
573 mtx_unlock(&Giant);
574 return (error);
575 }
576
577 int
578 shmctl(td, uap)
579 struct thread *td;
580 struct shmctl_args *uap;
581 {
582 int error = 0;
583 struct shmid_ds buf;
584 size_t bufsz;
585
586 /* IPC_SET needs to copyin the buffer before calling kern_shmctl */
587 if (uap->cmd == IPC_SET) {
588 if ((error = copyin(uap->buf, &buf, sizeof(struct shmid_ds))))
589 goto done;
590 }
591
592 error = kern_shmctl(td, uap->shmid, uap->cmd, (void *)&buf, &bufsz);
593 if (error)
594 goto done;
595
596 /* Cases in which we need to copyout */
597 switch (uap->cmd) {
598 case IPC_INFO:
599 case SHM_INFO:
600 case SHM_STAT:
601 case IPC_STAT:
602 error = copyout(&buf, uap->buf, bufsz);
603 break;
604 }
605
606 done:
607 if (error) {
608 /* Invalidate the return value */
609 td->td_retval[0] = -1;
610 }
611 return (error);
612 }
613
614
615 #ifndef _SYS_SYSPROTO_H_
616 struct shmget_args {
617 key_t key;
618 size_t size;
619 int shmflg;
620 };
621 #endif
622
623 static int
624 shmget_existing(td, uap, mode, segnum)
625 struct thread *td;
626 struct shmget_args *uap;
627 int mode;
628 int segnum;
629 {
630 struct shmid_ds *shmseg;
631 int error;
632
633 shmseg = &shmsegs[segnum];
634 if (shmseg->shm_perm.mode & SHMSEG_REMOVED) {
635 /*
636 * This segment is in the process of being allocated. Wait
637 * until it's done, and look the key up again (in case the
638 * allocation failed or it was freed).
639 */
640 shmseg->shm_perm.mode |= SHMSEG_WANTED;
641 error = tsleep(shmseg, PLOCK | PCATCH, "shmget", 0);
642 if (error)
643 return (error);
644 return (EAGAIN);
645 }
646 if ((uap->shmflg & (IPC_CREAT | IPC_EXCL)) == (IPC_CREAT | IPC_EXCL))
647 return (EEXIST);
648 error = ipcperm(td, &shmseg->shm_perm, mode);
649 if (error)
650 return (error);
651 if (uap->size && uap->size > shmseg->shm_segsz)
652 return (EINVAL);
653 td->td_retval[0] = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm);
654 return (0);
655 }
656
657 static int
658 shmget_allocate_segment(td, uap, mode)
659 struct thread *td;
660 struct shmget_args *uap;
661 int mode;
662 {
663 int i, segnum, shmid, size;
664 struct ucred *cred = td->td_ucred;
665 struct shmid_ds *shmseg;
666 vm_object_t shm_object;
667
668 GIANT_REQUIRED;
669
670 if (uap->size < shminfo.shmmin || uap->size > shminfo.shmmax)
671 return (EINVAL);
672 if (shm_nused >= shminfo.shmmni) /* Any shmids left? */
673 return (ENOSPC);
674 size = round_page(uap->size);
675 if (shm_committed + btoc(size) > shminfo.shmall)
676 return (ENOMEM);
677 if (shm_last_free < 0) {
678 shmrealloc(); /* Maybe expand the shmsegs[] array. */
679 for (i = 0; i < shmalloced; i++)
680 if (shmsegs[i].shm_perm.mode & SHMSEG_FREE)
681 break;
682 if (i == shmalloced)
683 return (ENOSPC);
684 segnum = i;
685 } else {
686 segnum = shm_last_free;
687 shm_last_free = -1;
688 }
689 shmseg = &shmsegs[segnum];
690 /*
691 * In case we sleep in malloc(), mark the segment present but deleted
692 * so that noone else tries to create the same key.
693 */
694 shmseg->shm_perm.mode = SHMSEG_ALLOCATED | SHMSEG_REMOVED;
695 shmseg->shm_perm.key = uap->key;
696 shmseg->shm_perm.seq = (shmseg->shm_perm.seq + 1) & 0x7fff;
697 shmid = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm);
698
699 /*
700 * We make sure that we have allocated a pager before we need
701 * to.
702 */
703 if (shm_use_phys) {
704 shm_object =
705 vm_pager_allocate(OBJT_PHYS, 0, size, VM_PROT_DEFAULT, 0);
706 } else {
707 shm_object =
708 vm_pager_allocate(OBJT_SWAP, 0, size, VM_PROT_DEFAULT, 0);
709 }
710 VM_OBJECT_LOCK(shm_object);
711 vm_object_clear_flag(shm_object, OBJ_ONEMAPPING);
712 vm_object_set_flag(shm_object, OBJ_NOSPLIT);
713 VM_OBJECT_UNLOCK(shm_object);
714
715 shmseg->shm_internal = shm_object;
716 shmseg->shm_perm.cuid = shmseg->shm_perm.uid = cred->cr_uid;
717 shmseg->shm_perm.cgid = shmseg->shm_perm.gid = cred->cr_gid;
718 shmseg->shm_perm.mode = (shmseg->shm_perm.mode & SHMSEG_WANTED) |
719 (mode & ACCESSPERMS) | SHMSEG_ALLOCATED;
720 shmseg->shm_segsz = uap->size;
721 shmseg->shm_cpid = td->td_proc->p_pid;
722 shmseg->shm_lpid = shmseg->shm_nattch = 0;
723 shmseg->shm_atime = shmseg->shm_dtime = 0;
724 shmseg->shm_ctime = time_second;
725 shm_committed += btoc(size);
726 shm_nused++;
727 if (shmseg->shm_perm.mode & SHMSEG_WANTED) {
728 /*
729 * Somebody else wanted this key while we were asleep. Wake
730 * them up now.
731 */
732 shmseg->shm_perm.mode &= ~SHMSEG_WANTED;
733 wakeup(shmseg);
734 }
735 td->td_retval[0] = shmid;
736 return (0);
737 }
738
739 /*
740 * MPSAFE
741 */
742 int
743 shmget(td, uap)
744 struct thread *td;
745 struct shmget_args *uap;
746 {
747 int segnum, mode;
748 int error;
749
750 if (!jail_sysvipc_allowed && jailed(td->td_ucred))
751 return (ENOSYS);
752 mtx_lock(&Giant);
753 mode = uap->shmflg & ACCESSPERMS;
754 if (uap->key != IPC_PRIVATE) {
755 again:
756 segnum = shm_find_segment_by_key(uap->key);
757 if (segnum >= 0) {
758 error = shmget_existing(td, uap, mode, segnum);
759 if (error == EAGAIN)
760 goto again;
761 goto done2;
762 }
763 if ((uap->shmflg & IPC_CREAT) == 0) {
764 error = ENOENT;
765 goto done2;
766 }
767 }
768 error = shmget_allocate_segment(td, uap, mode);
769 done2:
770 mtx_unlock(&Giant);
771 return (error);
772 }
773
774 /*
775 * MPSAFE
776 */
777 int
778 shmsys(td, uap)
779 struct thread *td;
780 /* XXX actually varargs. */
781 struct shmsys_args /* {
782 int which;
783 int a2;
784 int a3;
785 int a4;
786 } */ *uap;
787 {
788 int error;
789
790 if (!jail_sysvipc_allowed && jailed(td->td_ucred))
791 return (ENOSYS);
792 if (uap->which < 0 ||
793 uap->which >= sizeof(shmcalls)/sizeof(shmcalls[0]))
794 return (EINVAL);
795 mtx_lock(&Giant);
796 error = (*shmcalls[uap->which])(td, &uap->a2);
797 mtx_unlock(&Giant);
798 return (error);
799 }
800
801 static void
802 shmfork_myhook(p1, p2)
803 struct proc *p1, *p2;
804 {
805 struct shmmap_state *shmmap_s;
806 size_t size;
807 int i;
808
809 size = shminfo.shmseg * sizeof(struct shmmap_state);
810 shmmap_s = malloc(size, M_SHM, M_WAITOK);
811 bcopy(p1->p_vmspace->vm_shm, shmmap_s, size);
812 p2->p_vmspace->vm_shm = shmmap_s;
813 for (i = 0; i < shminfo.shmseg; i++, shmmap_s++)
814 if (shmmap_s->shmid != -1)
815 shmsegs[IPCID_TO_IX(shmmap_s->shmid)].shm_nattch++;
816 }
817
818 static void
819 shmexit_myhook(struct vmspace *vm)
820 {
821 struct shmmap_state *base, *shm;
822 int i;
823
824 if ((base = vm->vm_shm) != NULL) {
825 vm->vm_shm = NULL;
826 mtx_lock(&Giant);
827 for (i = 0, shm = base; i < shminfo.shmseg; i++, shm++) {
828 if (shm->shmid != -1)
829 shm_delete_mapping(vm, shm);
830 }
831 mtx_unlock(&Giant);
832 free(base, M_SHM);
833 }
834 }
835
836 static void
837 shmrealloc(void)
838 {
839 int i;
840 struct shmid_ds *newsegs;
841
842 if (shmalloced >= shminfo.shmmni)
843 return;
844
845 newsegs = malloc(shminfo.shmmni * sizeof(*newsegs), M_SHM, M_WAITOK);
846 if (newsegs == NULL)
847 return;
848 for (i = 0; i < shmalloced; i++)
849 bcopy(&shmsegs[i], &newsegs[i], sizeof(newsegs[0]));
850 for (; i < shminfo.shmmni; i++) {
851 shmsegs[i].shm_perm.mode = SHMSEG_FREE;
852 shmsegs[i].shm_perm.seq = 0;
853 }
854 free(shmsegs, M_SHM);
855 shmsegs = newsegs;
856 shmalloced = shminfo.shmmni;
857 }
858
859 static void
860 shminit()
861 {
862 int i;
863
864 TUNABLE_INT_FETCH("kern.ipc.shmmaxpgs", &shminfo.shmall);
865 for (i = PAGE_SIZE; i > 0; i--) {
866 shminfo.shmmax = shminfo.shmall * PAGE_SIZE;
867 if (shminfo.shmmax >= shminfo.shmall)
868 break;
869 }
870 TUNABLE_INT_FETCH("kern.ipc.shmmin", &shminfo.shmmin);
871 TUNABLE_INT_FETCH("kern.ipc.shmmni", &shminfo.shmmni);
872 TUNABLE_INT_FETCH("kern.ipc.shmseg", &shminfo.shmseg);
873 TUNABLE_INT_FETCH("kern.ipc.shm_use_phys", &shm_use_phys);
874
875 shmalloced = shminfo.shmmni;
876 shmsegs = malloc(shmalloced * sizeof(shmsegs[0]), M_SHM, M_WAITOK);
877 if (shmsegs == NULL)
878 panic("cannot allocate initial memory for sysvshm");
879 for (i = 0; i < shmalloced; i++) {
880 shmsegs[i].shm_perm.mode = SHMSEG_FREE;
881 shmsegs[i].shm_perm.seq = 0;
882 }
883 shm_last_free = 0;
884 shm_nused = 0;
885 shm_committed = 0;
886 shmexit_hook = &shmexit_myhook;
887 shmfork_hook = &shmfork_myhook;
888 }
889
890 static int
891 shmunload()
892 {
893
894 if (shm_nused > 0)
895 return (EBUSY);
896
897 free(shmsegs, M_SHM);
898 shmexit_hook = NULL;
899 shmfork_hook = NULL;
900 return (0);
901 }
902
903 static int
904 sysctl_shmsegs(SYSCTL_HANDLER_ARGS)
905 {
906
907 return (SYSCTL_OUT(req, shmsegs, shmalloced * sizeof(shmsegs[0])));
908 }
909
910 static int
911 sysvshm_modload(struct module *module, int cmd, void *arg)
912 {
913 int error = 0;
914
915 switch (cmd) {
916 case MOD_LOAD:
917 shminit();
918 break;
919 case MOD_UNLOAD:
920 error = shmunload();
921 break;
922 case MOD_SHUTDOWN:
923 break;
924 default:
925 error = EINVAL;
926 break;
927 }
928 return (error);
929 }
930
931 static moduledata_t sysvshm_mod = {
932 "sysvshm",
933 &sysvshm_modload,
934 NULL
935 };
936
937 SYSCALL_MODULE_HELPER(shmsys);
938 SYSCALL_MODULE_HELPER(shmat);
939 SYSCALL_MODULE_HELPER(shmctl);
940 SYSCALL_MODULE_HELPER(shmdt);
941 SYSCALL_MODULE_HELPER(shmget);
942
943 DECLARE_MODULE(sysvshm, sysvshm_mod,
944 SI_SUB_SYSV_SHM, SI_ORDER_FIRST);
945 MODULE_VERSION(sysvshm, 1);
Cache object: b1492d6f5cf6fd00364cca7f3ee730ca
|