FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_sig.c
1 /*-
2 * Copyright (c) 1982, 1986, 1989, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 4. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * @(#)kern_sig.c 8.7 (Berkeley) 4/18/94
35 */
36
37 #include <sys/cdefs.h>
38 __FBSDID("$FreeBSD: releng/6.1/sys/kern/kern_sig.c 158179 2006-04-30 16:44:43Z cvs2svn $");
39
40 #include "opt_compat.h"
41 #include "opt_ktrace.h"
42
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/signalvar.h>
46 #include <sys/vnode.h>
47 #include <sys/acct.h>
48 #include <sys/condvar.h>
49 #include <sys/event.h>
50 #include <sys/fcntl.h>
51 #include <sys/kernel.h>
52 #include <sys/kse.h>
53 #include <sys/ktr.h>
54 #include <sys/ktrace.h>
55 #include <sys/lock.h>
56 #include <sys/malloc.h>
57 #include <sys/mutex.h>
58 #include <sys/namei.h>
59 #include <sys/proc.h>
60 #include <sys/pioctl.h>
61 #include <sys/resourcevar.h>
62 #include <sys/sched.h>
63 #include <sys/sleepqueue.h>
64 #include <sys/smp.h>
65 #include <sys/stat.h>
66 #include <sys/sx.h>
67 #include <sys/syscallsubr.h>
68 #include <sys/sysctl.h>
69 #include <sys/sysent.h>
70 #include <sys/syslog.h>
71 #include <sys/sysproto.h>
72 #include <sys/unistd.h>
73 #include <sys/wait.h>
74
75 #include <machine/cpu.h>
76
77 #if defined (__alpha__) && !defined(COMPAT_43)
78 #error "You *really* need COMPAT_43 on the alpha for longjmp(3)"
79 #endif
80
81 #define ONSIG 32 /* NSIG for osig* syscalls. XXX. */
82
83 static int coredump(struct thread *);
84 static char *expand_name(const char *, uid_t, pid_t);
85 static int killpg1(struct thread *td, int sig, int pgid, int all);
86 static int issignal(struct thread *p);
87 static int sigprop(int sig);
88 static void tdsigwakeup(struct thread *, int, sig_t, int);
89 static void sig_suspend_threads(struct thread *, struct proc *, int);
90 static int filt_sigattach(struct knote *kn);
91 static void filt_sigdetach(struct knote *kn);
92 static int filt_signal(struct knote *kn, long hint);
93 static struct thread *sigtd(struct proc *p, int sig, int prop);
94 static int kern_sigtimedwait(struct thread *td, sigset_t set,
95 siginfo_t *info, struct timespec *timeout);
96 static void do_tdsignal(struct thread *td, int sig, sigtarget_t target);
97
98 struct filterops sig_filtops =
99 { 0, filt_sigattach, filt_sigdetach, filt_signal };
100
101 static int kern_logsigexit = 1;
102 SYSCTL_INT(_kern, KERN_LOGSIGEXIT, logsigexit, CTLFLAG_RW,
103 &kern_logsigexit, 0,
104 "Log processes quitting on abnormal signals to syslog(3)");
105
106 /*
107 * Policy -- Can ucred cr1 send SIGIO to process cr2?
108 * Should use cr_cansignal() once cr_cansignal() allows SIGIO and SIGURG
109 * in the right situations.
110 */
111 #define CANSIGIO(cr1, cr2) \
112 ((cr1)->cr_uid == 0 || \
113 (cr1)->cr_ruid == (cr2)->cr_ruid || \
114 (cr1)->cr_uid == (cr2)->cr_ruid || \
115 (cr1)->cr_ruid == (cr2)->cr_uid || \
116 (cr1)->cr_uid == (cr2)->cr_uid)
117
118 int sugid_coredump;
119 SYSCTL_INT(_kern, OID_AUTO, sugid_coredump, CTLFLAG_RW,
120 &sugid_coredump, 0, "Enable coredumping set user/group ID processes");
121
122 static int do_coredump = 1;
123 SYSCTL_INT(_kern, OID_AUTO, coredump, CTLFLAG_RW,
124 &do_coredump, 0, "Enable/Disable coredumps");
125
126 static int set_core_nodump_flag = 0;
127 SYSCTL_INT(_kern, OID_AUTO, nodump_coredump, CTLFLAG_RW, &set_core_nodump_flag,
128 0, "Enable setting the NODUMP flag on coredump files");
129
130 /*
131 * Signal properties and actions.
132 * The array below categorizes the signals and their default actions
133 * according to the following properties:
134 */
135 #define SA_KILL 0x01 /* terminates process by default */
136 #define SA_CORE 0x02 /* ditto and coredumps */
137 #define SA_STOP 0x04 /* suspend process */
138 #define SA_TTYSTOP 0x08 /* ditto, from tty */
139 #define SA_IGNORE 0x10 /* ignore by default */
140 #define SA_CONT 0x20 /* continue if suspended */
141 #define SA_CANTMASK 0x40 /* non-maskable, catchable */
142 #define SA_PROC 0x80 /* deliverable to any thread */
143
144 static int sigproptbl[NSIG] = {
145 SA_KILL|SA_PROC, /* SIGHUP */
146 SA_KILL|SA_PROC, /* SIGINT */
147 SA_KILL|SA_CORE|SA_PROC, /* SIGQUIT */
148 SA_KILL|SA_CORE, /* SIGILL */
149 SA_KILL|SA_CORE, /* SIGTRAP */
150 SA_KILL|SA_CORE, /* SIGABRT */
151 SA_KILL|SA_CORE|SA_PROC, /* SIGEMT */
152 SA_KILL|SA_CORE, /* SIGFPE */
153 SA_KILL|SA_PROC, /* SIGKILL */
154 SA_KILL|SA_CORE, /* SIGBUS */
155 SA_KILL|SA_CORE, /* SIGSEGV */
156 SA_KILL|SA_CORE, /* SIGSYS */
157 SA_KILL|SA_PROC, /* SIGPIPE */
158 SA_KILL|SA_PROC, /* SIGALRM */
159 SA_KILL|SA_PROC, /* SIGTERM */
160 SA_IGNORE|SA_PROC, /* SIGURG */
161 SA_STOP|SA_PROC, /* SIGSTOP */
162 SA_STOP|SA_TTYSTOP|SA_PROC, /* SIGTSTP */
163 SA_IGNORE|SA_CONT|SA_PROC, /* SIGCONT */
164 SA_IGNORE|SA_PROC, /* SIGCHLD */
165 SA_STOP|SA_TTYSTOP|SA_PROC, /* SIGTTIN */
166 SA_STOP|SA_TTYSTOP|SA_PROC, /* SIGTTOU */
167 SA_IGNORE|SA_PROC, /* SIGIO */
168 SA_KILL, /* SIGXCPU */
169 SA_KILL, /* SIGXFSZ */
170 SA_KILL|SA_PROC, /* SIGVTALRM */
171 SA_KILL|SA_PROC, /* SIGPROF */
172 SA_IGNORE|SA_PROC, /* SIGWINCH */
173 SA_IGNORE|SA_PROC, /* SIGINFO */
174 SA_KILL|SA_PROC, /* SIGUSR1 */
175 SA_KILL|SA_PROC, /* SIGUSR2 */
176 };
177
178 /*
179 * Determine signal that should be delivered to process p, the current
180 * process, 0 if none. If there is a pending stop signal with default
181 * action, the process stops in issignal().
182 * XXXKSE the check for a pending stop is not done under KSE
183 *
184 * MP SAFE.
185 */
186 int
187 cursig(struct thread *td)
188 {
189 PROC_LOCK_ASSERT(td->td_proc, MA_OWNED);
190 mtx_assert(&td->td_proc->p_sigacts->ps_mtx, MA_OWNED);
191 mtx_assert(&sched_lock, MA_NOTOWNED);
192 return (SIGPENDING(td) ? issignal(td) : 0);
193 }
194
195 /*
196 * Arrange for ast() to handle unmasked pending signals on return to user
197 * mode. This must be called whenever a signal is added to td_siglist or
198 * unmasked in td_sigmask.
199 */
200 void
201 signotify(struct thread *td)
202 {
203 struct proc *p;
204 sigset_t set, saved;
205
206 p = td->td_proc;
207
208 PROC_LOCK_ASSERT(p, MA_OWNED);
209
210 /*
211 * If our mask changed we may have to move signal that were
212 * previously masked by all threads to our siglist.
213 */
214 set = p->p_siglist;
215 if (p->p_flag & P_SA)
216 saved = p->p_siglist;
217 SIGSETNAND(set, td->td_sigmask);
218 SIGSETNAND(p->p_siglist, set);
219 SIGSETOR(td->td_siglist, set);
220
221 if (SIGPENDING(td)) {
222 mtx_lock_spin(&sched_lock);
223 td->td_flags |= TDF_NEEDSIGCHK | TDF_ASTPENDING;
224 mtx_unlock_spin(&sched_lock);
225 }
226 if ((p->p_flag & P_SA) && !(p->p_flag & P_SIGEVENT)) {
227 if (!SIGSETEQ(saved, p->p_siglist)) {
228 /* pending set changed */
229 p->p_flag |= P_SIGEVENT;
230 wakeup(&p->p_siglist);
231 }
232 }
233 }
234
235 int
236 sigonstack(size_t sp)
237 {
238 struct thread *td = curthread;
239
240 return ((td->td_pflags & TDP_ALTSTACK) ?
241 #if defined(COMPAT_43)
242 ((td->td_sigstk.ss_size == 0) ?
243 (td->td_sigstk.ss_flags & SS_ONSTACK) :
244 ((sp - (size_t)td->td_sigstk.ss_sp) < td->td_sigstk.ss_size))
245 #else
246 ((sp - (size_t)td->td_sigstk.ss_sp) < td->td_sigstk.ss_size)
247 #endif
248 : 0);
249 }
250
251 static __inline int
252 sigprop(int sig)
253 {
254
255 if (sig > 0 && sig < NSIG)
256 return (sigproptbl[_SIG_IDX(sig)]);
257 return (0);
258 }
259
260 int
261 sig_ffs(sigset_t *set)
262 {
263 int i;
264
265 for (i = 0; i < _SIG_WORDS; i++)
266 if (set->__bits[i])
267 return (ffs(set->__bits[i]) + (i * 32));
268 return (0);
269 }
270
271 /*
272 * kern_sigaction
273 * sigaction
274 * freebsd4_sigaction
275 * osigaction
276 *
277 * MPSAFE
278 */
279 int
280 kern_sigaction(td, sig, act, oact, flags)
281 struct thread *td;
282 register int sig;
283 struct sigaction *act, *oact;
284 int flags;
285 {
286 struct sigacts *ps;
287 struct thread *td0;
288 struct proc *p = td->td_proc;
289
290 if (!_SIG_VALID(sig))
291 return (EINVAL);
292
293 PROC_LOCK(p);
294 ps = p->p_sigacts;
295 mtx_lock(&ps->ps_mtx);
296 if (oact) {
297 oact->sa_handler = ps->ps_sigact[_SIG_IDX(sig)];
298 oact->sa_mask = ps->ps_catchmask[_SIG_IDX(sig)];
299 oact->sa_flags = 0;
300 if (SIGISMEMBER(ps->ps_sigonstack, sig))
301 oact->sa_flags |= SA_ONSTACK;
302 if (!SIGISMEMBER(ps->ps_sigintr, sig))
303 oact->sa_flags |= SA_RESTART;
304 if (SIGISMEMBER(ps->ps_sigreset, sig))
305 oact->sa_flags |= SA_RESETHAND;
306 if (SIGISMEMBER(ps->ps_signodefer, sig))
307 oact->sa_flags |= SA_NODEFER;
308 if (SIGISMEMBER(ps->ps_siginfo, sig))
309 oact->sa_flags |= SA_SIGINFO;
310 if (sig == SIGCHLD && ps->ps_flag & PS_NOCLDSTOP)
311 oact->sa_flags |= SA_NOCLDSTOP;
312 if (sig == SIGCHLD && ps->ps_flag & PS_NOCLDWAIT)
313 oact->sa_flags |= SA_NOCLDWAIT;
314 }
315 if (act) {
316 if ((sig == SIGKILL || sig == SIGSTOP) &&
317 act->sa_handler != SIG_DFL) {
318 mtx_unlock(&ps->ps_mtx);
319 PROC_UNLOCK(p);
320 return (EINVAL);
321 }
322
323 /*
324 * Change setting atomically.
325 */
326
327 ps->ps_catchmask[_SIG_IDX(sig)] = act->sa_mask;
328 SIG_CANTMASK(ps->ps_catchmask[_SIG_IDX(sig)]);
329 if (act->sa_flags & SA_SIGINFO) {
330 ps->ps_sigact[_SIG_IDX(sig)] =
331 (__sighandler_t *)act->sa_sigaction;
332 SIGADDSET(ps->ps_siginfo, sig);
333 } else {
334 ps->ps_sigact[_SIG_IDX(sig)] = act->sa_handler;
335 SIGDELSET(ps->ps_siginfo, sig);
336 }
337 if (!(act->sa_flags & SA_RESTART))
338 SIGADDSET(ps->ps_sigintr, sig);
339 else
340 SIGDELSET(ps->ps_sigintr, sig);
341 if (act->sa_flags & SA_ONSTACK)
342 SIGADDSET(ps->ps_sigonstack, sig);
343 else
344 SIGDELSET(ps->ps_sigonstack, sig);
345 if (act->sa_flags & SA_RESETHAND)
346 SIGADDSET(ps->ps_sigreset, sig);
347 else
348 SIGDELSET(ps->ps_sigreset, sig);
349 if (act->sa_flags & SA_NODEFER)
350 SIGADDSET(ps->ps_signodefer, sig);
351 else
352 SIGDELSET(ps->ps_signodefer, sig);
353 if (sig == SIGCHLD) {
354 if (act->sa_flags & SA_NOCLDSTOP)
355 ps->ps_flag |= PS_NOCLDSTOP;
356 else
357 ps->ps_flag &= ~PS_NOCLDSTOP;
358 if (act->sa_flags & SA_NOCLDWAIT) {
359 /*
360 * Paranoia: since SA_NOCLDWAIT is implemented
361 * by reparenting the dying child to PID 1 (and
362 * trust it to reap the zombie), PID 1 itself
363 * is forbidden to set SA_NOCLDWAIT.
364 */
365 if (p->p_pid == 1)
366 ps->ps_flag &= ~PS_NOCLDWAIT;
367 else
368 ps->ps_flag |= PS_NOCLDWAIT;
369 } else
370 ps->ps_flag &= ~PS_NOCLDWAIT;
371 if (ps->ps_sigact[_SIG_IDX(SIGCHLD)] == SIG_IGN)
372 ps->ps_flag |= PS_CLDSIGIGN;
373 else
374 ps->ps_flag &= ~PS_CLDSIGIGN;
375 }
376 /*
377 * Set bit in ps_sigignore for signals that are set to SIG_IGN,
378 * and for signals set to SIG_DFL where the default is to
379 * ignore. However, don't put SIGCONT in ps_sigignore, as we
380 * have to restart the process.
381 */
382 if (ps->ps_sigact[_SIG_IDX(sig)] == SIG_IGN ||
383 (sigprop(sig) & SA_IGNORE &&
384 ps->ps_sigact[_SIG_IDX(sig)] == SIG_DFL)) {
385 if ((p->p_flag & P_SA) &&
386 SIGISMEMBER(p->p_siglist, sig)) {
387 p->p_flag |= P_SIGEVENT;
388 wakeup(&p->p_siglist);
389 }
390 /* never to be seen again */
391 SIGDELSET(p->p_siglist, sig);
392 mtx_lock_spin(&sched_lock);
393 FOREACH_THREAD_IN_PROC(p, td0)
394 SIGDELSET(td0->td_siglist, sig);
395 mtx_unlock_spin(&sched_lock);
396 if (sig != SIGCONT)
397 /* easier in psignal */
398 SIGADDSET(ps->ps_sigignore, sig);
399 SIGDELSET(ps->ps_sigcatch, sig);
400 } else {
401 SIGDELSET(ps->ps_sigignore, sig);
402 if (ps->ps_sigact[_SIG_IDX(sig)] == SIG_DFL)
403 SIGDELSET(ps->ps_sigcatch, sig);
404 else
405 SIGADDSET(ps->ps_sigcatch, sig);
406 }
407 #ifdef COMPAT_FREEBSD4
408 if (ps->ps_sigact[_SIG_IDX(sig)] == SIG_IGN ||
409 ps->ps_sigact[_SIG_IDX(sig)] == SIG_DFL ||
410 (flags & KSA_FREEBSD4) == 0)
411 SIGDELSET(ps->ps_freebsd4, sig);
412 else
413 SIGADDSET(ps->ps_freebsd4, sig);
414 #endif
415 #ifdef COMPAT_43
416 if (ps->ps_sigact[_SIG_IDX(sig)] == SIG_IGN ||
417 ps->ps_sigact[_SIG_IDX(sig)] == SIG_DFL ||
418 (flags & KSA_OSIGSET) == 0)
419 SIGDELSET(ps->ps_osigset, sig);
420 else
421 SIGADDSET(ps->ps_osigset, sig);
422 #endif
423 }
424 mtx_unlock(&ps->ps_mtx);
425 PROC_UNLOCK(p);
426 return (0);
427 }
428
429 #ifndef _SYS_SYSPROTO_H_
430 struct sigaction_args {
431 int sig;
432 struct sigaction *act;
433 struct sigaction *oact;
434 };
435 #endif
436 /*
437 * MPSAFE
438 */
439 int
440 sigaction(td, uap)
441 struct thread *td;
442 register struct sigaction_args *uap;
443 {
444 struct sigaction act, oact;
445 register struct sigaction *actp, *oactp;
446 int error;
447
448 actp = (uap->act != NULL) ? &act : NULL;
449 oactp = (uap->oact != NULL) ? &oact : NULL;
450 if (actp) {
451 error = copyin(uap->act, actp, sizeof(act));
452 if (error)
453 return (error);
454 }
455 error = kern_sigaction(td, uap->sig, actp, oactp, 0);
456 if (oactp && !error)
457 error = copyout(oactp, uap->oact, sizeof(oact));
458 return (error);
459 }
460
461 #ifdef COMPAT_FREEBSD4
462 #ifndef _SYS_SYSPROTO_H_
463 struct freebsd4_sigaction_args {
464 int sig;
465 struct sigaction *act;
466 struct sigaction *oact;
467 };
468 #endif
469 /*
470 * MPSAFE
471 */
472 int
473 freebsd4_sigaction(td, uap)
474 struct thread *td;
475 register struct freebsd4_sigaction_args *uap;
476 {
477 struct sigaction act, oact;
478 register struct sigaction *actp, *oactp;
479 int error;
480
481
482 actp = (uap->act != NULL) ? &act : NULL;
483 oactp = (uap->oact != NULL) ? &oact : NULL;
484 if (actp) {
485 error = copyin(uap->act, actp, sizeof(act));
486 if (error)
487 return (error);
488 }
489 error = kern_sigaction(td, uap->sig, actp, oactp, KSA_FREEBSD4);
490 if (oactp && !error)
491 error = copyout(oactp, uap->oact, sizeof(oact));
492 return (error);
493 }
494 #endif /* COMAPT_FREEBSD4 */
495
496 #ifdef COMPAT_43 /* XXX - COMPAT_FBSD3 */
497 #ifndef _SYS_SYSPROTO_H_
498 struct osigaction_args {
499 int signum;
500 struct osigaction *nsa;
501 struct osigaction *osa;
502 };
503 #endif
504 /*
505 * MPSAFE
506 */
507 int
508 osigaction(td, uap)
509 struct thread *td;
510 register struct osigaction_args *uap;
511 {
512 struct osigaction sa;
513 struct sigaction nsa, osa;
514 register struct sigaction *nsap, *osap;
515 int error;
516
517 if (uap->signum <= 0 || uap->signum >= ONSIG)
518 return (EINVAL);
519
520 nsap = (uap->nsa != NULL) ? &nsa : NULL;
521 osap = (uap->osa != NULL) ? &osa : NULL;
522
523 if (nsap) {
524 error = copyin(uap->nsa, &sa, sizeof(sa));
525 if (error)
526 return (error);
527 nsap->sa_handler = sa.sa_handler;
528 nsap->sa_flags = sa.sa_flags;
529 OSIG2SIG(sa.sa_mask, nsap->sa_mask);
530 }
531 error = kern_sigaction(td, uap->signum, nsap, osap, KSA_OSIGSET);
532 if (osap && !error) {
533 sa.sa_handler = osap->sa_handler;
534 sa.sa_flags = osap->sa_flags;
535 SIG2OSIG(osap->sa_mask, sa.sa_mask);
536 error = copyout(&sa, uap->osa, sizeof(sa));
537 }
538 return (error);
539 }
540
541 #if !defined(__i386__) && !defined(__alpha__)
542 /* Avoid replicating the same stub everywhere */
543 int
544 osigreturn(td, uap)
545 struct thread *td;
546 struct osigreturn_args *uap;
547 {
548
549 return (nosys(td, (struct nosys_args *)uap));
550 }
551 #endif
552 #endif /* COMPAT_43 */
553
554 /*
555 * Initialize signal state for process 0;
556 * set to ignore signals that are ignored by default.
557 */
558 void
559 siginit(p)
560 struct proc *p;
561 {
562 register int i;
563 struct sigacts *ps;
564
565 PROC_LOCK(p);
566 ps = p->p_sigacts;
567 mtx_lock(&ps->ps_mtx);
568 for (i = 1; i <= NSIG; i++)
569 if (sigprop(i) & SA_IGNORE && i != SIGCONT)
570 SIGADDSET(ps->ps_sigignore, i);
571 mtx_unlock(&ps->ps_mtx);
572 PROC_UNLOCK(p);
573 }
574
575 /*
576 * Reset signals for an exec of the specified process.
577 */
578 void
579 execsigs(struct proc *p)
580 {
581 struct sigacts *ps;
582 int sig;
583 struct thread *td;
584
585 /*
586 * Reset caught signals. Held signals remain held
587 * through td_sigmask (unless they were caught,
588 * and are now ignored by default).
589 */
590 PROC_LOCK_ASSERT(p, MA_OWNED);
591 td = FIRST_THREAD_IN_PROC(p);
592 ps = p->p_sigacts;
593 mtx_lock(&ps->ps_mtx);
594 while (SIGNOTEMPTY(ps->ps_sigcatch)) {
595 sig = sig_ffs(&ps->ps_sigcatch);
596 SIGDELSET(ps->ps_sigcatch, sig);
597 if (sigprop(sig) & SA_IGNORE) {
598 if (sig != SIGCONT)
599 SIGADDSET(ps->ps_sigignore, sig);
600 SIGDELSET(p->p_siglist, sig);
601 /*
602 * There is only one thread at this point.
603 */
604 SIGDELSET(td->td_siglist, sig);
605 }
606 ps->ps_sigact[_SIG_IDX(sig)] = SIG_DFL;
607 }
608 /*
609 * Reset stack state to the user stack.
610 * Clear set of signals caught on the signal stack.
611 */
612 td->td_sigstk.ss_flags = SS_DISABLE;
613 td->td_sigstk.ss_size = 0;
614 td->td_sigstk.ss_sp = 0;
615 td->td_pflags &= ~TDP_ALTSTACK;
616 /*
617 * Reset no zombies if child dies flag as Solaris does.
618 */
619 ps->ps_flag &= ~(PS_NOCLDWAIT | PS_CLDSIGIGN);
620 if (ps->ps_sigact[_SIG_IDX(SIGCHLD)] == SIG_IGN)
621 ps->ps_sigact[_SIG_IDX(SIGCHLD)] = SIG_DFL;
622 mtx_unlock(&ps->ps_mtx);
623 }
624
625 /*
626 * kern_sigprocmask()
627 *
628 * Manipulate signal mask.
629 */
630 int
631 kern_sigprocmask(td, how, set, oset, old)
632 struct thread *td;
633 int how;
634 sigset_t *set, *oset;
635 int old;
636 {
637 int error;
638
639 PROC_LOCK(td->td_proc);
640 if (oset != NULL)
641 *oset = td->td_sigmask;
642
643 error = 0;
644 if (set != NULL) {
645 switch (how) {
646 case SIG_BLOCK:
647 SIG_CANTMASK(*set);
648 SIGSETOR(td->td_sigmask, *set);
649 break;
650 case SIG_UNBLOCK:
651 SIGSETNAND(td->td_sigmask, *set);
652 signotify(td);
653 break;
654 case SIG_SETMASK:
655 SIG_CANTMASK(*set);
656 if (old)
657 SIGSETLO(td->td_sigmask, *set);
658 else
659 td->td_sigmask = *set;
660 signotify(td);
661 break;
662 default:
663 error = EINVAL;
664 break;
665 }
666 }
667 PROC_UNLOCK(td->td_proc);
668 return (error);
669 }
670
671 /*
672 * sigprocmask() - MP SAFE
673 */
674
675 #ifndef _SYS_SYSPROTO_H_
676 struct sigprocmask_args {
677 int how;
678 const sigset_t *set;
679 sigset_t *oset;
680 };
681 #endif
682 int
683 sigprocmask(td, uap)
684 register struct thread *td;
685 struct sigprocmask_args *uap;
686 {
687 sigset_t set, oset;
688 sigset_t *setp, *osetp;
689 int error;
690
691 setp = (uap->set != NULL) ? &set : NULL;
692 osetp = (uap->oset != NULL) ? &oset : NULL;
693 if (setp) {
694 error = copyin(uap->set, setp, sizeof(set));
695 if (error)
696 return (error);
697 }
698 error = kern_sigprocmask(td, uap->how, setp, osetp, 0);
699 if (osetp && !error) {
700 error = copyout(osetp, uap->oset, sizeof(oset));
701 }
702 return (error);
703 }
704
705 #ifdef COMPAT_43 /* XXX - COMPAT_FBSD3 */
706 /*
707 * osigprocmask() - MP SAFE
708 */
709 #ifndef _SYS_SYSPROTO_H_
710 struct osigprocmask_args {
711 int how;
712 osigset_t mask;
713 };
714 #endif
715 int
716 osigprocmask(td, uap)
717 register struct thread *td;
718 struct osigprocmask_args *uap;
719 {
720 sigset_t set, oset;
721 int error;
722
723 OSIG2SIG(uap->mask, set);
724 error = kern_sigprocmask(td, uap->how, &set, &oset, 1);
725 SIG2OSIG(oset, td->td_retval[0]);
726 return (error);
727 }
728 #endif /* COMPAT_43 */
729
730 #ifndef _SYS_SYSPROTO_H_
731 struct sigpending_args {
732 sigset_t *set;
733 };
734 #endif
735 /*
736 * MPSAFE
737 */
738 int
739 sigwait(struct thread *td, struct sigwait_args *uap)
740 {
741 siginfo_t info;
742 sigset_t set;
743 int error;
744
745 error = copyin(uap->set, &set, sizeof(set));
746 if (error) {
747 td->td_retval[0] = error;
748 return (0);
749 }
750
751 error = kern_sigtimedwait(td, set, &info, NULL);
752 if (error) {
753 if (error == ERESTART)
754 return (error);
755 td->td_retval[0] = error;
756 return (0);
757 }
758
759 error = copyout(&info.si_signo, uap->sig, sizeof(info.si_signo));
760 /* Repost if we got an error. */
761 if (error && info.si_signo) {
762 PROC_LOCK(td->td_proc);
763 tdsignal(td, info.si_signo, SIGTARGET_TD);
764 PROC_UNLOCK(td->td_proc);
765 }
766 td->td_retval[0] = error;
767 return (0);
768 }
769 /*
770 * MPSAFE
771 */
772 int
773 sigtimedwait(struct thread *td, struct sigtimedwait_args *uap)
774 {
775 struct timespec ts;
776 struct timespec *timeout;
777 sigset_t set;
778 siginfo_t info;
779 int error;
780
781 if (uap->timeout) {
782 error = copyin(uap->timeout, &ts, sizeof(ts));
783 if (error)
784 return (error);
785
786 timeout = &ts;
787 } else
788 timeout = NULL;
789
790 error = copyin(uap->set, &set, sizeof(set));
791 if (error)
792 return (error);
793
794 error = kern_sigtimedwait(td, set, &info, timeout);
795 if (error)
796 return (error);
797
798 if (uap->info)
799 error = copyout(&info, uap->info, sizeof(info));
800 /* Repost if we got an error. */
801 if (error && info.si_signo) {
802 PROC_LOCK(td->td_proc);
803 tdsignal(td, info.si_signo, SIGTARGET_TD);
804 PROC_UNLOCK(td->td_proc);
805 } else {
806 td->td_retval[0] = info.si_signo;
807 }
808 return (error);
809 }
810
811 /*
812 * MPSAFE
813 */
814 int
815 sigwaitinfo(struct thread *td, struct sigwaitinfo_args *uap)
816 {
817 siginfo_t info;
818 sigset_t set;
819 int error;
820
821 error = copyin(uap->set, &set, sizeof(set));
822 if (error)
823 return (error);
824
825 error = kern_sigtimedwait(td, set, &info, NULL);
826 if (error)
827 return (error);
828
829 if (uap->info)
830 error = copyout(&info, uap->info, sizeof(info));
831 /* Repost if we got an error. */
832 if (error && info.si_signo) {
833 PROC_LOCK(td->td_proc);
834 tdsignal(td, info.si_signo, SIGTARGET_TD);
835 PROC_UNLOCK(td->td_proc);
836 } else {
837 td->td_retval[0] = info.si_signo;
838 }
839 return (error);
840 }
841
842 static int
843 kern_sigtimedwait(struct thread *td, sigset_t waitset, siginfo_t *info,
844 struct timespec *timeout)
845 {
846 struct sigacts *ps;
847 sigset_t savedmask;
848 struct proc *p;
849 int error, sig, hz, i, timevalid = 0;
850 struct timespec rts, ets, ts;
851 struct timeval tv;
852
853 p = td->td_proc;
854 error = 0;
855 sig = 0;
856 SIG_CANTMASK(waitset);
857
858 PROC_LOCK(p);
859 ps = p->p_sigacts;
860 savedmask = td->td_sigmask;
861 if (timeout) {
862 if (timeout->tv_nsec >= 0 && timeout->tv_nsec < 1000000000) {
863 timevalid = 1;
864 getnanouptime(&rts);
865 ets = rts;
866 timespecadd(&ets, timeout);
867 }
868 }
869
870 restart:
871 for (i = 1; i <= _SIG_MAXSIG; ++i) {
872 if (!SIGISMEMBER(waitset, i))
873 continue;
874 if (!SIGISMEMBER(td->td_siglist, i)) {
875 if (SIGISMEMBER(p->p_siglist, i)) {
876 if (p->p_flag & P_SA) {
877 p->p_flag |= P_SIGEVENT;
878 wakeup(&p->p_siglist);
879 }
880 SIGDELSET(p->p_siglist, i);
881 SIGADDSET(td->td_siglist, i);
882 } else
883 continue;
884 }
885
886 SIGFILLSET(td->td_sigmask);
887 SIG_CANTMASK(td->td_sigmask);
888 SIGDELSET(td->td_sigmask, i);
889 mtx_lock(&ps->ps_mtx);
890 sig = cursig(td);
891 mtx_unlock(&ps->ps_mtx);
892 if (sig)
893 goto out;
894 else {
895 /*
896 * Because cursig() may have stopped current thread,
897 * after it is resumed, things may have already been
898 * changed, it should rescan any pending signals.
899 */
900 goto restart;
901 }
902 }
903 if (error)
904 goto out;
905
906 /*
907 * POSIX says this must be checked after looking for pending
908 * signals.
909 */
910 if (timeout) {
911 if (!timevalid) {
912 error = EINVAL;
913 goto out;
914 }
915 getnanouptime(&rts);
916 if (timespeccmp(&rts, &ets, >=)) {
917 error = EAGAIN;
918 goto out;
919 }
920 ts = ets;
921 timespecsub(&ts, &rts);
922 TIMESPEC_TO_TIMEVAL(&tv, &ts);
923 hz = tvtohz(&tv);
924 } else
925 hz = 0;
926
927 td->td_sigmask = savedmask;
928 SIGSETNAND(td->td_sigmask, waitset);
929 signotify(td);
930 error = msleep(&ps, &p->p_mtx, PPAUSE|PCATCH, "sigwait", hz);
931 if (timeout) {
932 if (error == ERESTART) {
933 /* timeout can not be restarted. */
934 error = EINTR;
935 } else if (error == EAGAIN) {
936 /* will calculate timeout by ourself. */
937 error = 0;
938 }
939 }
940 goto restart;
941
942 out:
943 td->td_sigmask = savedmask;
944 signotify(td);
945 if (sig) {
946 SIGDELSET(td->td_siglist, sig);
947 bzero(info, sizeof(*info));
948 info->si_signo = sig;
949 info->si_code = 0;
950 error = 0;
951
952 #ifdef KTRACE
953 if (KTRPOINT(td, KTR_PSIG)) {
954 sig_t action;
955
956 mtx_lock(&ps->ps_mtx);
957 action = ps->ps_sigact[_SIG_IDX(sig)];
958 mtx_unlock(&ps->ps_mtx);
959 ktrpsig(sig, action, &td->td_sigmask, 0);
960 }
961 #endif
962 _STOPEVENT(p, S_SIG, sig);
963
964 if (sig == SIGKILL) {
965 p->p_code = 0;
966 p->p_sig = sig;
967 sigexit(td, sig);
968 }
969 }
970 PROC_UNLOCK(p);
971 return (error);
972 }
973
974 /*
975 * MPSAFE
976 */
977 int
978 sigpending(td, uap)
979 struct thread *td;
980 struct sigpending_args *uap;
981 {
982 struct proc *p = td->td_proc;
983 sigset_t siglist;
984
985 PROC_LOCK(p);
986 siglist = p->p_siglist;
987 SIGSETOR(siglist, td->td_siglist);
988 PROC_UNLOCK(p);
989 return (copyout(&siglist, uap->set, sizeof(sigset_t)));
990 }
991
992 #ifdef COMPAT_43 /* XXX - COMPAT_FBSD3 */
993 #ifndef _SYS_SYSPROTO_H_
994 struct osigpending_args {
995 int dummy;
996 };
997 #endif
998 /*
999 * MPSAFE
1000 */
1001 int
1002 osigpending(td, uap)
1003 struct thread *td;
1004 struct osigpending_args *uap;
1005 {
1006 struct proc *p = td->td_proc;
1007 sigset_t siglist;
1008
1009 PROC_LOCK(p);
1010 siglist = p->p_siglist;
1011 SIGSETOR(siglist, td->td_siglist);
1012 PROC_UNLOCK(p);
1013 SIG2OSIG(siglist, td->td_retval[0]);
1014 return (0);
1015 }
1016 #endif /* COMPAT_43 */
1017
1018 #if defined(COMPAT_43)
1019 /*
1020 * Generalized interface signal handler, 4.3-compatible.
1021 */
1022 #ifndef _SYS_SYSPROTO_H_
1023 struct osigvec_args {
1024 int signum;
1025 struct sigvec *nsv;
1026 struct sigvec *osv;
1027 };
1028 #endif
1029 /*
1030 * MPSAFE
1031 */
1032 /* ARGSUSED */
1033 int
1034 osigvec(td, uap)
1035 struct thread *td;
1036 register struct osigvec_args *uap;
1037 {
1038 struct sigvec vec;
1039 struct sigaction nsa, osa;
1040 register struct sigaction *nsap, *osap;
1041 int error;
1042
1043 if (uap->signum <= 0 || uap->signum >= ONSIG)
1044 return (EINVAL);
1045 nsap = (uap->nsv != NULL) ? &nsa : NULL;
1046 osap = (uap->osv != NULL) ? &osa : NULL;
1047 if (nsap) {
1048 error = copyin(uap->nsv, &vec, sizeof(vec));
1049 if (error)
1050 return (error);
1051 nsap->sa_handler = vec.sv_handler;
1052 OSIG2SIG(vec.sv_mask, nsap->sa_mask);
1053 nsap->sa_flags = vec.sv_flags;
1054 nsap->sa_flags ^= SA_RESTART; /* opposite of SV_INTERRUPT */
1055 }
1056 error = kern_sigaction(td, uap->signum, nsap, osap, KSA_OSIGSET);
1057 if (osap && !error) {
1058 vec.sv_handler = osap->sa_handler;
1059 SIG2OSIG(osap->sa_mask, vec.sv_mask);
1060 vec.sv_flags = osap->sa_flags;
1061 vec.sv_flags &= ~SA_NOCLDWAIT;
1062 vec.sv_flags ^= SA_RESTART;
1063 error = copyout(&vec, uap->osv, sizeof(vec));
1064 }
1065 return (error);
1066 }
1067
1068 #ifndef _SYS_SYSPROTO_H_
1069 struct osigblock_args {
1070 int mask;
1071 };
1072 #endif
1073 /*
1074 * MPSAFE
1075 */
1076 int
1077 osigblock(td, uap)
1078 register struct thread *td;
1079 struct osigblock_args *uap;
1080 {
1081 struct proc *p = td->td_proc;
1082 sigset_t set;
1083
1084 OSIG2SIG(uap->mask, set);
1085 SIG_CANTMASK(set);
1086 PROC_LOCK(p);
1087 SIG2OSIG(td->td_sigmask, td->td_retval[0]);
1088 SIGSETOR(td->td_sigmask, set);
1089 PROC_UNLOCK(p);
1090 return (0);
1091 }
1092
1093 #ifndef _SYS_SYSPROTO_H_
1094 struct osigsetmask_args {
1095 int mask;
1096 };
1097 #endif
1098 /*
1099 * MPSAFE
1100 */
1101 int
1102 osigsetmask(td, uap)
1103 struct thread *td;
1104 struct osigsetmask_args *uap;
1105 {
1106 struct proc *p = td->td_proc;
1107 sigset_t set;
1108
1109 OSIG2SIG(uap->mask, set);
1110 SIG_CANTMASK(set);
1111 PROC_LOCK(p);
1112 SIG2OSIG(td->td_sigmask, td->td_retval[0]);
1113 SIGSETLO(td->td_sigmask, set);
1114 signotify(td);
1115 PROC_UNLOCK(p);
1116 return (0);
1117 }
1118 #endif /* COMPAT_43 */
1119
1120 /*
1121 * Suspend process until signal, providing mask to be set
1122 * in the meantime.
1123 ***** XXXKSE this doesn't make sense under KSE.
1124 ***** Do we suspend the thread or all threads in the process?
1125 ***** How do we suspend threads running NOW on another processor?
1126 */
1127 #ifndef _SYS_SYSPROTO_H_
1128 struct sigsuspend_args {
1129 const sigset_t *sigmask;
1130 };
1131 #endif
1132 /*
1133 * MPSAFE
1134 */
1135 /* ARGSUSED */
1136 int
1137 sigsuspend(td, uap)
1138 struct thread *td;
1139 struct sigsuspend_args *uap;
1140 {
1141 sigset_t mask;
1142 int error;
1143
1144 error = copyin(uap->sigmask, &mask, sizeof(mask));
1145 if (error)
1146 return (error);
1147 return (kern_sigsuspend(td, mask));
1148 }
1149
1150 int
1151 kern_sigsuspend(struct thread *td, sigset_t mask)
1152 {
1153 struct proc *p = td->td_proc;
1154
1155 /*
1156 * When returning from sigsuspend, we want
1157 * the old mask to be restored after the
1158 * signal handler has finished. Thus, we
1159 * save it here and mark the sigacts structure
1160 * to indicate this.
1161 */
1162 PROC_LOCK(p);
1163 td->td_oldsigmask = td->td_sigmask;
1164 td->td_pflags |= TDP_OLDMASK;
1165 SIG_CANTMASK(mask);
1166 td->td_sigmask = mask;
1167 signotify(td);
1168 while (msleep(&p->p_sigacts, &p->p_mtx, PPAUSE|PCATCH, "pause", 0) == 0)
1169 /* void */;
1170 PROC_UNLOCK(p);
1171 /* always return EINTR rather than ERESTART... */
1172 return (EINTR);
1173 }
1174
1175 #ifdef COMPAT_43 /* XXX - COMPAT_FBSD3 */
1176 /*
1177 * Compatibility sigsuspend call for old binaries. Note nonstandard calling
1178 * convention: libc stub passes mask, not pointer, to save a copyin.
1179 */
1180 #ifndef _SYS_SYSPROTO_H_
1181 struct osigsuspend_args {
1182 osigset_t mask;
1183 };
1184 #endif
1185 /*
1186 * MPSAFE
1187 */
1188 /* ARGSUSED */
1189 int
1190 osigsuspend(td, uap)
1191 struct thread *td;
1192 struct osigsuspend_args *uap;
1193 {
1194 struct proc *p = td->td_proc;
1195 sigset_t mask;
1196
1197 PROC_LOCK(p);
1198 td->td_oldsigmask = td->td_sigmask;
1199 td->td_pflags |= TDP_OLDMASK;
1200 OSIG2SIG(uap->mask, mask);
1201 SIG_CANTMASK(mask);
1202 SIGSETLO(td->td_sigmask, mask);
1203 signotify(td);
1204 while (msleep(&p->p_sigacts, &p->p_mtx, PPAUSE|PCATCH, "opause", 0) == 0)
1205 /* void */;
1206 PROC_UNLOCK(p);
1207 /* always return EINTR rather than ERESTART... */
1208 return (EINTR);
1209 }
1210 #endif /* COMPAT_43 */
1211
1212 #if defined(COMPAT_43)
1213 #ifndef _SYS_SYSPROTO_H_
1214 struct osigstack_args {
1215 struct sigstack *nss;
1216 struct sigstack *oss;
1217 };
1218 #endif
1219 /*
1220 * MPSAFE
1221 */
1222 /* ARGSUSED */
1223 int
1224 osigstack(td, uap)
1225 struct thread *td;
1226 register struct osigstack_args *uap;
1227 {
1228 struct sigstack nss, oss;
1229 int error = 0;
1230
1231 if (uap->nss != NULL) {
1232 error = copyin(uap->nss, &nss, sizeof(nss));
1233 if (error)
1234 return (error);
1235 }
1236 oss.ss_sp = td->td_sigstk.ss_sp;
1237 oss.ss_onstack = sigonstack(cpu_getstack(td));
1238 if (uap->nss != NULL) {
1239 td->td_sigstk.ss_sp = nss.ss_sp;
1240 td->td_sigstk.ss_size = 0;
1241 td->td_sigstk.ss_flags |= nss.ss_onstack & SS_ONSTACK;
1242 td->td_pflags |= TDP_ALTSTACK;
1243 }
1244 if (uap->oss != NULL)
1245 error = copyout(&oss, uap->oss, sizeof(oss));
1246
1247 return (error);
1248 }
1249 #endif /* COMPAT_43 */
1250
1251 #ifndef _SYS_SYSPROTO_H_
1252 struct sigaltstack_args {
1253 stack_t *ss;
1254 stack_t *oss;
1255 };
1256 #endif
1257 /*
1258 * MPSAFE
1259 */
1260 /* ARGSUSED */
1261 int
1262 sigaltstack(td, uap)
1263 struct thread *td;
1264 register struct sigaltstack_args *uap;
1265 {
1266 stack_t ss, oss;
1267 int error;
1268
1269 if (uap->ss != NULL) {
1270 error = copyin(uap->ss, &ss, sizeof(ss));
1271 if (error)
1272 return (error);
1273 }
1274 error = kern_sigaltstack(td, (uap->ss != NULL) ? &ss : NULL,
1275 (uap->oss != NULL) ? &oss : NULL);
1276 if (error)
1277 return (error);
1278 if (uap->oss != NULL)
1279 error = copyout(&oss, uap->oss, sizeof(stack_t));
1280 return (error);
1281 }
1282
1283 int
1284 kern_sigaltstack(struct thread *td, stack_t *ss, stack_t *oss)
1285 {
1286 struct proc *p = td->td_proc;
1287 int oonstack;
1288
1289 oonstack = sigonstack(cpu_getstack(td));
1290
1291 if (oss != NULL) {
1292 *oss = td->td_sigstk;
1293 oss->ss_flags = (td->td_pflags & TDP_ALTSTACK)
1294 ? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE;
1295 }
1296
1297 if (ss != NULL) {
1298 if (oonstack)
1299 return (EPERM);
1300 if ((ss->ss_flags & ~SS_DISABLE) != 0)
1301 return (EINVAL);
1302 if (!(ss->ss_flags & SS_DISABLE)) {
1303 if (ss->ss_size < p->p_sysent->sv_minsigstksz) {
1304 return (ENOMEM);
1305 }
1306 td->td_sigstk = *ss;
1307 td->td_pflags |= TDP_ALTSTACK;
1308 } else {
1309 td->td_pflags &= ~TDP_ALTSTACK;
1310 }
1311 }
1312 return (0);
1313 }
1314
1315 /*
1316 * Common code for kill process group/broadcast kill.
1317 * cp is calling process.
1318 */
1319 static int
1320 killpg1(td, sig, pgid, all)
1321 register struct thread *td;
1322 int sig, pgid, all;
1323 {
1324 register struct proc *p;
1325 struct pgrp *pgrp;
1326 int nfound = 0;
1327
1328 if (all) {
1329 /*
1330 * broadcast
1331 */
1332 sx_slock(&allproc_lock);
1333 LIST_FOREACH(p, &allproc, p_list) {
1334 PROC_LOCK(p);
1335 if (p->p_pid <= 1 || p->p_flag & P_SYSTEM ||
1336 p == td->td_proc) {
1337 PROC_UNLOCK(p);
1338 continue;
1339 }
1340 if (p_cansignal(td, p, sig) == 0) {
1341 nfound++;
1342 if (sig)
1343 psignal(p, sig);
1344 }
1345 PROC_UNLOCK(p);
1346 }
1347 sx_sunlock(&allproc_lock);
1348 } else {
1349 sx_slock(&proctree_lock);
1350 if (pgid == 0) {
1351 /*
1352 * zero pgid means send to my process group.
1353 */
1354 pgrp = td->td_proc->p_pgrp;
1355 PGRP_LOCK(pgrp);
1356 } else {
1357 pgrp = pgfind(pgid);
1358 if (pgrp == NULL) {
1359 sx_sunlock(&proctree_lock);
1360 return (ESRCH);
1361 }
1362 }
1363 sx_sunlock(&proctree_lock);
1364 LIST_FOREACH(p, &pgrp->pg_members, p_pglist) {
1365 PROC_LOCK(p);
1366 if (p->p_pid <= 1 || p->p_flag & P_SYSTEM) {
1367 PROC_UNLOCK(p);
1368 continue;
1369 }
1370 if (p_cansignal(td, p, sig) == 0) {
1371 nfound++;
1372 if (sig)
1373 psignal(p, sig);
1374 }
1375 PROC_UNLOCK(p);
1376 }
1377 PGRP_UNLOCK(pgrp);
1378 }
1379 return (nfound ? 0 : ESRCH);
1380 }
1381
1382 #ifndef _SYS_SYSPROTO_H_
1383 struct kill_args {
1384 int pid;
1385 int signum;
1386 };
1387 #endif
1388 /*
1389 * MPSAFE
1390 */
1391 /* ARGSUSED */
1392 int
1393 kill(td, uap)
1394 register struct thread *td;
1395 register struct kill_args *uap;
1396 {
1397 register struct proc *p;
1398 int error;
1399
1400 if ((u_int)uap->signum > _SIG_MAXSIG)
1401 return (EINVAL);
1402
1403 if (uap->pid > 0) {
1404 /* kill single process */
1405 if ((p = pfind(uap->pid)) == NULL) {
1406 if ((p = zpfind(uap->pid)) == NULL)
1407 return (ESRCH);
1408 }
1409 error = p_cansignal(td, p, uap->signum);
1410 if (error == 0 && uap->signum)
1411 psignal(p, uap->signum);
1412 PROC_UNLOCK(p);
1413 return (error);
1414 }
1415 switch (uap->pid) {
1416 case -1: /* broadcast signal */
1417 return (killpg1(td, uap->signum, 0, 1));
1418 case 0: /* signal own process group */
1419 return (killpg1(td, uap->signum, 0, 0));
1420 default: /* negative explicit process group */
1421 return (killpg1(td, uap->signum, -uap->pid, 0));
1422 }
1423 /* NOTREACHED */
1424 }
1425
1426 #if defined(COMPAT_43)
1427 #ifndef _SYS_SYSPROTO_H_
1428 struct okillpg_args {
1429 int pgid;
1430 int signum;
1431 };
1432 #endif
1433 /*
1434 * MPSAFE
1435 */
1436 /* ARGSUSED */
1437 int
1438 okillpg(td, uap)
1439 struct thread *td;
1440 register struct okillpg_args *uap;
1441 {
1442
1443 if ((u_int)uap->signum > _SIG_MAXSIG)
1444 return (EINVAL);
1445 return (killpg1(td, uap->signum, uap->pgid, 0));
1446 }
1447 #endif /* COMPAT_43 */
1448
1449 /*
1450 * Send a signal to a process group.
1451 */
1452 void
1453 gsignal(pgid, sig)
1454 int pgid, sig;
1455 {
1456 struct pgrp *pgrp;
1457
1458 if (pgid != 0) {
1459 sx_slock(&proctree_lock);
1460 pgrp = pgfind(pgid);
1461 sx_sunlock(&proctree_lock);
1462 if (pgrp != NULL) {
1463 pgsignal(pgrp, sig, 0);
1464 PGRP_UNLOCK(pgrp);
1465 }
1466 }
1467 }
1468
1469 /*
1470 * Send a signal to a process group. If checktty is 1,
1471 * limit to members which have a controlling terminal.
1472 */
1473 void
1474 pgsignal(pgrp, sig, checkctty)
1475 struct pgrp *pgrp;
1476 int sig, checkctty;
1477 {
1478 register struct proc *p;
1479
1480 if (pgrp) {
1481 PGRP_LOCK_ASSERT(pgrp, MA_OWNED);
1482 LIST_FOREACH(p, &pgrp->pg_members, p_pglist) {
1483 PROC_LOCK(p);
1484 if (checkctty == 0 || p->p_flag & P_CONTROLT)
1485 psignal(p, sig);
1486 PROC_UNLOCK(p);
1487 }
1488 }
1489 }
1490
1491 /*
1492 * Send a signal caused by a trap to the current thread.
1493 * If it will be caught immediately, deliver it with correct code.
1494 * Otherwise, post it normally.
1495 *
1496 * MPSAFE
1497 */
1498 void
1499 trapsignal(struct thread *td, int sig, u_long code)
1500 {
1501 struct sigacts *ps;
1502 struct proc *p;
1503 siginfo_t siginfo;
1504 int error;
1505
1506 p = td->td_proc;
1507 if (td->td_pflags & TDP_SA) {
1508 if (td->td_mailbox == NULL)
1509 thread_user_enter(td);
1510 PROC_LOCK(p);
1511 SIGDELSET(td->td_sigmask, sig);
1512 mtx_lock_spin(&sched_lock);
1513 /*
1514 * Force scheduling an upcall, so UTS has chance to
1515 * process the signal before thread runs again in
1516 * userland.
1517 */
1518 if (td->td_upcall)
1519 td->td_upcall->ku_flags |= KUF_DOUPCALL;
1520 mtx_unlock_spin(&sched_lock);
1521 } else {
1522 PROC_LOCK(p);
1523 }
1524 ps = p->p_sigacts;
1525 mtx_lock(&ps->ps_mtx);
1526 if ((p->p_flag & P_TRACED) == 0 && SIGISMEMBER(ps->ps_sigcatch, sig) &&
1527 !SIGISMEMBER(td->td_sigmask, sig)) {
1528 p->p_stats->p_ru.ru_nsignals++;
1529 #ifdef KTRACE
1530 if (KTRPOINT(curthread, KTR_PSIG))
1531 ktrpsig(sig, ps->ps_sigact[_SIG_IDX(sig)],
1532 &td->td_sigmask, code);
1533 #endif
1534 if (!(td->td_pflags & TDP_SA))
1535 (*p->p_sysent->sv_sendsig)(
1536 ps->ps_sigact[_SIG_IDX(sig)], sig,
1537 &td->td_sigmask, code);
1538 else if (td->td_mailbox == NULL) {
1539 mtx_unlock(&ps->ps_mtx);
1540 /* UTS caused a sync signal */
1541 p->p_code = code; /* XXX for core dump/debugger */
1542 p->p_sig = sig; /* XXX to verify code */
1543 sigexit(td, sig);
1544 } else {
1545 cpu_thread_siginfo(sig, code, &siginfo);
1546 mtx_unlock(&ps->ps_mtx);
1547 SIGADDSET(td->td_sigmask, sig);
1548 PROC_UNLOCK(p);
1549 error = copyout(&siginfo, &td->td_mailbox->tm_syncsig,
1550 sizeof(siginfo));
1551 PROC_LOCK(p);
1552 /* UTS memory corrupted */
1553 if (error)
1554 sigexit(td, SIGSEGV);
1555 mtx_lock(&ps->ps_mtx);
1556 }
1557 SIGSETOR(td->td_sigmask, ps->ps_catchmask[_SIG_IDX(sig)]);
1558 if (!SIGISMEMBER(ps->ps_signodefer, sig))
1559 SIGADDSET(td->td_sigmask, sig);
1560 if (SIGISMEMBER(ps->ps_sigreset, sig)) {
1561 /*
1562 * See kern_sigaction() for origin of this code.
1563 */
1564 SIGDELSET(ps->ps_sigcatch, sig);
1565 if (sig != SIGCONT &&
1566 sigprop(sig) & SA_IGNORE)
1567 SIGADDSET(ps->ps_sigignore, sig);
1568 ps->ps_sigact[_SIG_IDX(sig)] = SIG_DFL;
1569 }
1570 mtx_unlock(&ps->ps_mtx);
1571 } else {
1572 mtx_unlock(&ps->ps_mtx);
1573 p->p_code = code; /* XXX for core dump/debugger */
1574 p->p_sig = sig; /* XXX to verify code */
1575 tdsignal(td, sig, SIGTARGET_TD);
1576 }
1577 PROC_UNLOCK(p);
1578 }
1579
1580 static struct thread *
1581 sigtd(struct proc *p, int sig, int prop)
1582 {
1583 struct thread *td, *signal_td;
1584
1585 PROC_LOCK_ASSERT(p, MA_OWNED);
1586
1587 /*
1588 * Check if current thread can handle the signal without
1589 * switching conetxt to another thread.
1590 */
1591 if (curproc == p && !SIGISMEMBER(curthread->td_sigmask, sig))
1592 return (curthread);
1593 signal_td = NULL;
1594 mtx_lock_spin(&sched_lock);
1595 FOREACH_THREAD_IN_PROC(p, td) {
1596 if (!SIGISMEMBER(td->td_sigmask, sig)) {
1597 signal_td = td;
1598 break;
1599 }
1600 }
1601 if (signal_td == NULL)
1602 signal_td = FIRST_THREAD_IN_PROC(p);
1603 mtx_unlock_spin(&sched_lock);
1604 return (signal_td);
1605 }
1606
1607 /*
1608 * Send the signal to the process. If the signal has an action, the action
1609 * is usually performed by the target process rather than the caller; we add
1610 * the signal to the set of pending signals for the process.
1611 *
1612 * Exceptions:
1613 * o When a stop signal is sent to a sleeping process that takes the
1614 * default action, the process is stopped without awakening it.
1615 * o SIGCONT restarts stopped processes (or puts them back to sleep)
1616 * regardless of the signal action (eg, blocked or ignored).
1617 *
1618 * Other ignored signals are discarded immediately.
1619 *
1620 * MPSAFE
1621 */
1622 void
1623 psignal(struct proc *p, int sig)
1624 {
1625 struct thread *td;
1626 int prop;
1627
1628 if (!_SIG_VALID(sig))
1629 panic("psignal(): invalid signal");
1630
1631 PROC_LOCK_ASSERT(p, MA_OWNED);
1632 /*
1633 * IEEE Std 1003.1-2001: return success when killing a zombie.
1634 */
1635 if (p->p_state == PRS_ZOMBIE)
1636 return;
1637 prop = sigprop(sig);
1638
1639 /*
1640 * Find a thread to deliver the signal to.
1641 */
1642 td = sigtd(p, sig, prop);
1643
1644 tdsignal(td, sig, SIGTARGET_P);
1645 }
1646
1647 /*
1648 * MPSAFE
1649 */
1650 void
1651 tdsignal(struct thread *td, int sig, sigtarget_t target)
1652 {
1653 sigset_t saved;
1654 struct proc *p = td->td_proc;
1655
1656 if (p->p_flag & P_SA)
1657 saved = p->p_siglist;
1658 do_tdsignal(td, sig, target);
1659 if ((p->p_flag & P_SA) && !(p->p_flag & P_SIGEVENT)) {
1660 if (!SIGSETEQ(saved, p->p_siglist)) {
1661 /* pending set changed */
1662 p->p_flag |= P_SIGEVENT;
1663 wakeup(&p->p_siglist);
1664 }
1665 }
1666 }
1667
1668 static void
1669 do_tdsignal(struct thread *td, int sig, sigtarget_t target)
1670 {
1671 struct proc *p;
1672 register sig_t action;
1673 sigset_t *siglist;
1674 struct thread *td0;
1675 register int prop;
1676 struct sigacts *ps;
1677 int intrval;
1678
1679 if (!_SIG_VALID(sig))
1680 panic("do_tdsignal(): invalid signal");
1681
1682 p = td->td_proc;
1683 ps = p->p_sigacts;
1684
1685 PROC_LOCK_ASSERT(p, MA_OWNED);
1686 KNOTE_LOCKED(&p->p_klist, NOTE_SIGNAL | sig);
1687
1688 prop = sigprop(sig);
1689
1690 /*
1691 * If the signal is blocked and not destined for this thread, then
1692 * assign it to the process so that we can find it later in the first
1693 * thread that unblocks it. Otherwise, assign it to this thread now.
1694 */
1695 if (target == SIGTARGET_TD) {
1696 siglist = &td->td_siglist;
1697 } else {
1698 if (!SIGISMEMBER(td->td_sigmask, sig))
1699 siglist = &td->td_siglist;
1700 else
1701 siglist = &p->p_siglist;
1702 }
1703
1704 /*
1705 * If the signal is being ignored,
1706 * then we forget about it immediately.
1707 * (Note: we don't set SIGCONT in ps_sigignore,
1708 * and if it is set to SIG_IGN,
1709 * action will be SIG_DFL here.)
1710 */
1711 mtx_lock(&ps->ps_mtx);
1712 if (SIGISMEMBER(ps->ps_sigignore, sig) ||
1713 (p->p_flag & P_WEXIT)) {
1714 mtx_unlock(&ps->ps_mtx);
1715 return;
1716 }
1717 if (SIGISMEMBER(td->td_sigmask, sig))
1718 action = SIG_HOLD;
1719 else if (SIGISMEMBER(ps->ps_sigcatch, sig))
1720 action = SIG_CATCH;
1721 else
1722 action = SIG_DFL;
1723 if (SIGISMEMBER(ps->ps_sigintr, sig))
1724 intrval = EINTR;
1725 else
1726 intrval = ERESTART;
1727 mtx_unlock(&ps->ps_mtx);
1728
1729 if (prop & SA_CONT) {
1730 SIG_STOPSIGMASK(p->p_siglist);
1731 /*
1732 * XXX Should investigate leaving STOP and CONT sigs only in
1733 * the proc's siglist.
1734 */
1735 mtx_lock_spin(&sched_lock);
1736 FOREACH_THREAD_IN_PROC(p, td0)
1737 SIG_STOPSIGMASK(td0->td_siglist);
1738 mtx_unlock_spin(&sched_lock);
1739 }
1740
1741 if (prop & SA_STOP) {
1742 /*
1743 * If sending a tty stop signal to a member of an orphaned
1744 * process group, discard the signal here if the action
1745 * is default; don't stop the process below if sleeping,
1746 * and don't clear any pending SIGCONT.
1747 */
1748 if ((prop & SA_TTYSTOP) &&
1749 (p->p_pgrp->pg_jobc == 0) &&
1750 (action == SIG_DFL))
1751 return;
1752 SIG_CONTSIGMASK(p->p_siglist);
1753 mtx_lock_spin(&sched_lock);
1754 FOREACH_THREAD_IN_PROC(p, td0)
1755 SIG_CONTSIGMASK(td0->td_siglist);
1756 mtx_unlock_spin(&sched_lock);
1757 p->p_flag &= ~P_CONTINUED;
1758 }
1759
1760 SIGADDSET(*siglist, sig);
1761 signotify(td); /* uses schedlock */
1762 /*
1763 * Defer further processing for signals which are held,
1764 * except that stopped processes must be continued by SIGCONT.
1765 */
1766 if (action == SIG_HOLD &&
1767 !((prop & SA_CONT) && (p->p_flag & P_STOPPED_SIG)))
1768 return;
1769 /*
1770 * SIGKILL: Remove procfs STOPEVENTs.
1771 */
1772 if (sig == SIGKILL) {
1773 /* from procfs_ioctl.c: PIOCBIC */
1774 p->p_stops = 0;
1775 /* from procfs_ioctl.c: PIOCCONT */
1776 p->p_step = 0;
1777 wakeup(&p->p_step);
1778 }
1779 /*
1780 * Some signals have a process-wide effect and a per-thread
1781 * component. Most processing occurs when the process next
1782 * tries to cross the user boundary, however there are some
1783 * times when processing needs to be done immediatly, such as
1784 * waking up threads so that they can cross the user boundary.
1785 * We try do the per-process part here.
1786 */
1787 if (P_SHOULDSTOP(p)) {
1788 /*
1789 * The process is in stopped mode. All the threads should be
1790 * either winding down or already on the suspended queue.
1791 */
1792 if (p->p_flag & P_TRACED) {
1793 /*
1794 * The traced process is already stopped,
1795 * so no further action is necessary.
1796 * No signal can restart us.
1797 */
1798 goto out;
1799 }
1800
1801 if (sig == SIGKILL) {
1802 /*
1803 * SIGKILL sets process running.
1804 * It will die elsewhere.
1805 * All threads must be restarted.
1806 */
1807 p->p_flag &= ~P_STOPPED_SIG;
1808 goto runfast;
1809 }
1810
1811 if (prop & SA_CONT) {
1812 /*
1813 * If SIGCONT is default (or ignored), we continue the
1814 * process but don't leave the signal in siglist as
1815 * it has no further action. If SIGCONT is held, we
1816 * continue the process and leave the signal in
1817 * siglist. If the process catches SIGCONT, let it
1818 * handle the signal itself. If it isn't waiting on
1819 * an event, it goes back to run state.
1820 * Otherwise, process goes back to sleep state.
1821 */
1822 p->p_flag &= ~P_STOPPED_SIG;
1823 p->p_flag |= P_CONTINUED;
1824 if (action == SIG_DFL) {
1825 SIGDELSET(*siglist, sig);
1826 } else if (action == SIG_CATCH) {
1827 /*
1828 * The process wants to catch it so it needs
1829 * to run at least one thread, but which one?
1830 * It would seem that the answer would be to
1831 * run an upcall in the next KSE to run, and
1832 * deliver the signal that way. In a NON KSE
1833 * process, we need to make sure that the
1834 * single thread is runnable asap.
1835 * XXXKSE for now however, make them all run.
1836 */
1837 goto runfast;
1838 }
1839 /*
1840 * The signal is not ignored or caught.
1841 */
1842 mtx_lock_spin(&sched_lock);
1843 thread_unsuspend(p);
1844 mtx_unlock_spin(&sched_lock);
1845 goto out;
1846 }
1847
1848 if (prop & SA_STOP) {
1849 /*
1850 * Already stopped, don't need to stop again
1851 * (If we did the shell could get confused).
1852 * Just make sure the signal STOP bit set.
1853 */
1854 p->p_flag |= P_STOPPED_SIG;
1855 SIGDELSET(*siglist, sig);
1856 goto out;
1857 }
1858
1859 /*
1860 * All other kinds of signals:
1861 * If a thread is sleeping interruptibly, simulate a
1862 * wakeup so that when it is continued it will be made
1863 * runnable and can look at the signal. However, don't make
1864 * the PROCESS runnable, leave it stopped.
1865 * It may run a bit until it hits a thread_suspend_check().
1866 */
1867 mtx_lock_spin(&sched_lock);
1868 if (TD_ON_SLEEPQ(td) && (td->td_flags & TDF_SINTR))
1869 sleepq_abort(td, intrval);
1870 mtx_unlock_spin(&sched_lock);
1871 goto out;
1872 /*
1873 * Mutexes are short lived. Threads waiting on them will
1874 * hit thread_suspend_check() soon.
1875 */
1876 } else if (p->p_state == PRS_NORMAL) {
1877 if (p->p_flag & P_TRACED || action == SIG_CATCH) {
1878 mtx_lock_spin(&sched_lock);
1879 tdsigwakeup(td, sig, action, intrval);
1880 mtx_unlock_spin(&sched_lock);
1881 goto out;
1882 }
1883
1884 MPASS(action == SIG_DFL);
1885
1886 if (prop & SA_STOP) {
1887 if (p->p_flag & P_PPWAIT)
1888 goto out;
1889 p->p_flag |= P_STOPPED_SIG;
1890 p->p_xstat = sig;
1891 mtx_lock_spin(&sched_lock);
1892 sig_suspend_threads(td, p, 1);
1893 thread_stopped(p);
1894 if (p->p_numthreads == p->p_suspcount) {
1895 SIGDELSET(p->p_siglist, p->p_xstat);
1896 FOREACH_THREAD_IN_PROC(p, td0)
1897 SIGDELSET(td0->td_siglist, p->p_xstat);
1898 }
1899 mtx_unlock_spin(&sched_lock);
1900 goto out;
1901 }
1902 else
1903 goto runfast;
1904 /* NOTREACHED */
1905 } else {
1906 /* Not in "NORMAL" state. discard the signal. */
1907 SIGDELSET(*siglist, sig);
1908 goto out;
1909 }
1910
1911 /*
1912 * The process is not stopped so we need to apply the signal to all the
1913 * running threads.
1914 */
1915
1916 runfast:
1917 mtx_lock_spin(&sched_lock);
1918 tdsigwakeup(td, sig, action, intrval);
1919 thread_unsuspend(p);
1920 mtx_unlock_spin(&sched_lock);
1921 out:
1922 /* If we jump here, sched_lock should not be owned. */
1923 mtx_assert(&sched_lock, MA_NOTOWNED);
1924 }
1925
1926 /*
1927 * The force of a signal has been directed against a single
1928 * thread. We need to see what we can do about knocking it
1929 * out of any sleep it may be in etc.
1930 */
1931 static void
1932 tdsigwakeup(struct thread *td, int sig, sig_t action, int intrval)
1933 {
1934 struct proc *p = td->td_proc;
1935 register int prop;
1936
1937 PROC_LOCK_ASSERT(p, MA_OWNED);
1938 mtx_assert(&sched_lock, MA_OWNED);
1939 prop = sigprop(sig);
1940
1941 /*
1942 * Bring the priority of a thread up if we want it to get
1943 * killed in this lifetime.
1944 */
1945 if (action == SIG_DFL && (prop & SA_KILL)) {
1946 if (p->p_nice > 0)
1947 sched_nice(td->td_proc, 0);
1948 if (td->td_priority > PUSER)
1949 sched_prio(td, PUSER);
1950 }
1951
1952 if (TD_ON_SLEEPQ(td)) {
1953 /*
1954 * If thread is sleeping uninterruptibly
1955 * we can't interrupt the sleep... the signal will
1956 * be noticed when the process returns through
1957 * trap() or syscall().
1958 */
1959 if ((td->td_flags & TDF_SINTR) == 0)
1960 return;
1961 /*
1962 * If SIGCONT is default (or ignored) and process is
1963 * asleep, we are finished; the process should not
1964 * be awakened.
1965 */
1966 if ((prop & SA_CONT) && action == SIG_DFL) {
1967 SIGDELSET(p->p_siglist, sig);
1968 /*
1969 * It may be on either list in this state.
1970 * Remove from both for now.
1971 */
1972 SIGDELSET(td->td_siglist, sig);
1973 return;
1974 }
1975
1976 /*
1977 * Give low priority threads a better chance to run.
1978 */
1979 if (td->td_priority > PUSER)
1980 sched_prio(td, PUSER);
1981
1982 sleepq_abort(td, intrval);
1983 } else {
1984 /*
1985 * Other states do nothing with the signal immediately,
1986 * other than kicking ourselves if we are running.
1987 * It will either never be noticed, or noticed very soon.
1988 */
1989 #ifdef SMP
1990 if (TD_IS_RUNNING(td) && td != curthread)
1991 forward_signal(td);
1992 #endif
1993 }
1994 }
1995
1996 static void
1997 sig_suspend_threads(struct thread *td, struct proc *p, int sending)
1998 {
1999 struct thread *td2;
2000
2001 PROC_LOCK_ASSERT(p, MA_OWNED);
2002 mtx_assert(&sched_lock, MA_OWNED);
2003
2004 FOREACH_THREAD_IN_PROC(p, td2) {
2005 if ((TD_IS_SLEEPING(td2) || TD_IS_SWAPPED(td2)) &&
2006 (td2->td_flags & TDF_SINTR) &&
2007 !TD_IS_SUSPENDED(td2)) {
2008 thread_suspend_one(td2);
2009 } else {
2010 if (sending || td != td2)
2011 td2->td_flags |= TDF_ASTPENDING;
2012 #ifdef SMP
2013 if (TD_IS_RUNNING(td2) && td2 != td)
2014 forward_signal(td2);
2015 #endif
2016 }
2017 }
2018 }
2019
2020 int
2021 ptracestop(struct thread *td, int sig)
2022 {
2023 struct proc *p = td->td_proc;
2024
2025 PROC_LOCK_ASSERT(p, MA_OWNED);
2026 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK,
2027 &p->p_mtx.mtx_object, "Stopping for traced signal");
2028
2029 mtx_lock_spin(&sched_lock);
2030 td->td_flags |= TDF_XSIG;
2031 mtx_unlock_spin(&sched_lock);
2032 td->td_xsig = sig;
2033 while ((p->p_flag & P_TRACED) && (td->td_flags & TDF_XSIG)) {
2034 if (p->p_flag & P_SINGLE_EXIT) {
2035 mtx_lock_spin(&sched_lock);
2036 td->td_flags &= ~TDF_XSIG;
2037 mtx_unlock_spin(&sched_lock);
2038 return (sig);
2039 }
2040 /*
2041 * Just make wait() to work, the last stopped thread
2042 * will win.
2043 */
2044 p->p_xstat = sig;
2045 p->p_xthread = td;
2046 p->p_flag |= (P_STOPPED_SIG|P_STOPPED_TRACE);
2047 mtx_lock_spin(&sched_lock);
2048 sig_suspend_threads(td, p, 0);
2049 stopme:
2050 thread_stopped(p);
2051 thread_suspend_one(td);
2052 PROC_UNLOCK(p);
2053 DROP_GIANT();
2054 mi_switch(SW_VOL, NULL);
2055 mtx_unlock_spin(&sched_lock);
2056 PICKUP_GIANT();
2057 PROC_LOCK(p);
2058 if (!(p->p_flag & P_TRACED))
2059 break;
2060 if (td->td_flags & TDF_DBSUSPEND) {
2061 if (p->p_flag & P_SINGLE_EXIT)
2062 break;
2063 mtx_lock_spin(&sched_lock);
2064 goto stopme;
2065 }
2066 }
2067 return (td->td_xsig);
2068 }
2069
2070 /*
2071 * If the current process has received a signal (should be caught or cause
2072 * termination, should interrupt current syscall), return the signal number.
2073 * Stop signals with default action are processed immediately, then cleared;
2074 * they aren't returned. This is checked after each entry to the system for
2075 * a syscall or trap (though this can usually be done without calling issignal
2076 * by checking the pending signal masks in cursig.) The normal call
2077 * sequence is
2078 *
2079 * while (sig = cursig(curthread))
2080 * postsig(sig);
2081 */
2082 static int
2083 issignal(td)
2084 struct thread *td;
2085 {
2086 struct proc *p;
2087 struct sigacts *ps;
2088 sigset_t sigpending;
2089 int sig, prop, newsig;
2090
2091 p = td->td_proc;
2092 ps = p->p_sigacts;
2093 mtx_assert(&ps->ps_mtx, MA_OWNED);
2094 PROC_LOCK_ASSERT(p, MA_OWNED);
2095 for (;;) {
2096 int traced = (p->p_flag & P_TRACED) || (p->p_stops & S_SIG);
2097
2098 sigpending = td->td_siglist;
2099 SIGSETNAND(sigpending, td->td_sigmask);
2100
2101 if (p->p_flag & P_PPWAIT)
2102 SIG_STOPSIGMASK(sigpending);
2103 if (SIGISEMPTY(sigpending)) /* no signal to send */
2104 return (0);
2105 sig = sig_ffs(&sigpending);
2106
2107 if (p->p_stops & S_SIG) {
2108 mtx_unlock(&ps->ps_mtx);
2109 stopevent(p, S_SIG, sig);
2110 mtx_lock(&ps->ps_mtx);
2111 }
2112
2113 /*
2114 * We should see pending but ignored signals
2115 * only if P_TRACED was on when they were posted.
2116 */
2117 if (SIGISMEMBER(ps->ps_sigignore, sig) && (traced == 0)) {
2118 SIGDELSET(td->td_siglist, sig);
2119 if (td->td_pflags & TDP_SA)
2120 SIGADDSET(td->td_sigmask, sig);
2121 continue;
2122 }
2123 if (p->p_flag & P_TRACED && (p->p_flag & P_PPWAIT) == 0) {
2124 /*
2125 * If traced, always stop.
2126 */
2127 mtx_unlock(&ps->ps_mtx);
2128 newsig = ptracestop(td, sig);
2129 mtx_lock(&ps->ps_mtx);
2130
2131 /*
2132 * If parent wants us to take the signal,
2133 * then it will leave it in p->p_xstat;
2134 * otherwise we just look for signals again.
2135 */
2136 SIGDELSET(td->td_siglist, sig); /* clear old signal */
2137 if (td->td_pflags & TDP_SA)
2138 SIGADDSET(td->td_sigmask, sig);
2139 if (newsig == 0)
2140 continue;
2141 sig = newsig;
2142 /*
2143 * If the traced bit got turned off, go back up
2144 * to the top to rescan signals. This ensures
2145 * that p_sig* and p_sigact are consistent.
2146 */
2147 if ((p->p_flag & P_TRACED) == 0)
2148 continue;
2149
2150 /*
2151 * Put the new signal into td_siglist. If the
2152 * signal is being masked, look for other signals.
2153 */
2154 SIGADDSET(td->td_siglist, sig);
2155 if (td->td_pflags & TDP_SA)
2156 SIGDELSET(td->td_sigmask, sig);
2157 if (SIGISMEMBER(td->td_sigmask, sig))
2158 continue;
2159 signotify(td);
2160 }
2161
2162 prop = sigprop(sig);
2163
2164 /*
2165 * Decide whether the signal should be returned.
2166 * Return the signal's number, or fall through
2167 * to clear it from the pending mask.
2168 */
2169 switch ((intptr_t)p->p_sigacts->ps_sigact[_SIG_IDX(sig)]) {
2170
2171 case (intptr_t)SIG_DFL:
2172 /*
2173 * Don't take default actions on system processes.
2174 */
2175 if (p->p_pid <= 1) {
2176 #ifdef DIAGNOSTIC
2177 /*
2178 * Are you sure you want to ignore SIGSEGV
2179 * in init? XXX
2180 */
2181 printf("Process (pid %lu) got signal %d\n",
2182 (u_long)p->p_pid, sig);
2183 #endif
2184 break; /* == ignore */
2185 }
2186 /*
2187 * If there is a pending stop signal to process
2188 * with default action, stop here,
2189 * then clear the signal. However,
2190 * if process is member of an orphaned
2191 * process group, ignore tty stop signals.
2192 */
2193 if (prop & SA_STOP) {
2194 if (p->p_flag & P_TRACED ||
2195 (p->p_pgrp->pg_jobc == 0 &&
2196 prop & SA_TTYSTOP))
2197 break; /* == ignore */
2198 mtx_unlock(&ps->ps_mtx);
2199 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK,
2200 &p->p_mtx.mtx_object, "Catching SIGSTOP");
2201 p->p_flag |= P_STOPPED_SIG;
2202 p->p_xstat = sig;
2203 mtx_lock_spin(&sched_lock);
2204 sig_suspend_threads(td, p, 0);
2205 thread_stopped(p);
2206 thread_suspend_one(td);
2207 PROC_UNLOCK(p);
2208 DROP_GIANT();
2209 mi_switch(SW_INVOL, NULL);
2210 mtx_unlock_spin(&sched_lock);
2211 PICKUP_GIANT();
2212 PROC_LOCK(p);
2213 mtx_lock(&ps->ps_mtx);
2214 break;
2215 } else if (prop & SA_IGNORE) {
2216 /*
2217 * Except for SIGCONT, shouldn't get here.
2218 * Default action is to ignore; drop it.
2219 */
2220 break; /* == ignore */
2221 } else
2222 return (sig);
2223 /*NOTREACHED*/
2224
2225 case (intptr_t)SIG_IGN:
2226 /*
2227 * Masking above should prevent us ever trying
2228 * to take action on an ignored signal other
2229 * than SIGCONT, unless process is traced.
2230 */
2231 if ((prop & SA_CONT) == 0 &&
2232 (p->p_flag & P_TRACED) == 0)
2233 printf("issignal\n");
2234 break; /* == ignore */
2235
2236 default:
2237 /*
2238 * This signal has an action, let
2239 * postsig() process it.
2240 */
2241 return (sig);
2242 }
2243 SIGDELSET(td->td_siglist, sig); /* take the signal! */
2244 }
2245 /* NOTREACHED */
2246 }
2247
2248 /*
2249 * MPSAFE
2250 */
2251 void
2252 thread_stopped(struct proc *p)
2253 {
2254 struct proc *p1 = curthread->td_proc;
2255 struct sigacts *ps;
2256 int n;
2257
2258 PROC_LOCK_ASSERT(p, MA_OWNED);
2259 mtx_assert(&sched_lock, MA_OWNED);
2260 n = p->p_suspcount;
2261 if (p == p1)
2262 n++;
2263 if ((p->p_flag & P_STOPPED_SIG) && (n == p->p_numthreads)) {
2264 mtx_unlock_spin(&sched_lock);
2265 p->p_flag &= ~P_WAITED;
2266 PROC_LOCK(p->p_pptr);
2267 /*
2268 * Wake up parent sleeping in kern_wait(), also send
2269 * SIGCHLD to parent, but SIGCHLD does not guarantee
2270 * that parent will awake, because parent may masked
2271 * the signal.
2272 */
2273 p->p_pptr->p_flag |= P_STATCHILD;
2274 wakeup(p->p_pptr);
2275 ps = p->p_pptr->p_sigacts;
2276 mtx_lock(&ps->ps_mtx);
2277 if ((ps->ps_flag & PS_NOCLDSTOP) == 0) {
2278 mtx_unlock(&ps->ps_mtx);
2279 psignal(p->p_pptr, SIGCHLD);
2280 } else
2281 mtx_unlock(&ps->ps_mtx);
2282 PROC_UNLOCK(p->p_pptr);
2283 mtx_lock_spin(&sched_lock);
2284 }
2285 }
2286
2287 /*
2288 * Take the action for the specified signal
2289 * from the current set of pending signals.
2290 */
2291 void
2292 postsig(sig)
2293 register int sig;
2294 {
2295 struct thread *td = curthread;
2296 register struct proc *p = td->td_proc;
2297 struct sigacts *ps;
2298 sig_t action;
2299 sigset_t returnmask;
2300 int code;
2301
2302 KASSERT(sig != 0, ("postsig"));
2303
2304 PROC_LOCK_ASSERT(p, MA_OWNED);
2305 ps = p->p_sigacts;
2306 mtx_assert(&ps->ps_mtx, MA_OWNED);
2307 SIGDELSET(td->td_siglist, sig);
2308 action = ps->ps_sigact[_SIG_IDX(sig)];
2309 #ifdef KTRACE
2310 if (KTRPOINT(td, KTR_PSIG))
2311 ktrpsig(sig, action, td->td_pflags & TDP_OLDMASK ?
2312 &td->td_oldsigmask : &td->td_sigmask, 0);
2313 #endif
2314 if (p->p_stops & S_SIG) {
2315 mtx_unlock(&ps->ps_mtx);
2316 stopevent(p, S_SIG, sig);
2317 mtx_lock(&ps->ps_mtx);
2318 }
2319
2320 if (!(td->td_pflags & TDP_SA) && action == SIG_DFL) {
2321 /*
2322 * Default action, where the default is to kill
2323 * the process. (Other cases were ignored above.)
2324 */
2325 mtx_unlock(&ps->ps_mtx);
2326 sigexit(td, sig);
2327 /* NOTREACHED */
2328 } else {
2329 if (td->td_pflags & TDP_SA) {
2330 if (sig == SIGKILL) {
2331 mtx_unlock(&ps->ps_mtx);
2332 sigexit(td, sig);
2333 }
2334 }
2335
2336 /*
2337 * If we get here, the signal must be caught.
2338 */
2339 KASSERT(action != SIG_IGN && !SIGISMEMBER(td->td_sigmask, sig),
2340 ("postsig action"));
2341 /*
2342 * Set the new mask value and also defer further
2343 * occurrences of this signal.
2344 *
2345 * Special case: user has done a sigsuspend. Here the
2346 * current mask is not of interest, but rather the
2347 * mask from before the sigsuspend is what we want
2348 * restored after the signal processing is completed.
2349 */
2350 if (td->td_pflags & TDP_OLDMASK) {
2351 returnmask = td->td_oldsigmask;
2352 td->td_pflags &= ~TDP_OLDMASK;
2353 } else
2354 returnmask = td->td_sigmask;
2355
2356 SIGSETOR(td->td_sigmask, ps->ps_catchmask[_SIG_IDX(sig)]);
2357 if (!SIGISMEMBER(ps->ps_signodefer, sig))
2358 SIGADDSET(td->td_sigmask, sig);
2359
2360 if (SIGISMEMBER(ps->ps_sigreset, sig)) {
2361 /*
2362 * See kern_sigaction() for origin of this code.
2363 */
2364 SIGDELSET(ps->ps_sigcatch, sig);
2365 if (sig != SIGCONT &&
2366 sigprop(sig) & SA_IGNORE)
2367 SIGADDSET(ps->ps_sigignore, sig);
2368 ps->ps_sigact[_SIG_IDX(sig)] = SIG_DFL;
2369 }
2370 p->p_stats->p_ru.ru_nsignals++;
2371 if (p->p_sig != sig) {
2372 code = 0;
2373 } else {
2374 code = p->p_code;
2375 p->p_code = 0;
2376 p->p_sig = 0;
2377 }
2378 if (td->td_pflags & TDP_SA)
2379 thread_signal_add(curthread, sig);
2380 else
2381 (*p->p_sysent->sv_sendsig)(action, sig,
2382 &returnmask, code);
2383 }
2384 }
2385
2386 /*
2387 * Kill the current process for stated reason.
2388 */
2389 void
2390 killproc(p, why)
2391 struct proc *p;
2392 char *why;
2393 {
2394
2395 PROC_LOCK_ASSERT(p, MA_OWNED);
2396 CTR3(KTR_PROC, "killproc: proc %p (pid %d, %s)",
2397 p, p->p_pid, p->p_comm);
2398 log(LOG_ERR, "pid %d (%s), uid %d, was killed: %s\n", p->p_pid, p->p_comm,
2399 p->p_ucred ? p->p_ucred->cr_uid : -1, why);
2400 psignal(p, SIGKILL);
2401 }
2402
2403 /*
2404 * Force the current process to exit with the specified signal, dumping core
2405 * if appropriate. We bypass the normal tests for masked and caught signals,
2406 * allowing unrecoverable failures to terminate the process without changing
2407 * signal state. Mark the accounting record with the signal termination.
2408 * If dumping core, save the signal number for the debugger. Calls exit and
2409 * does not return.
2410 *
2411 * MPSAFE
2412 */
2413 void
2414 sigexit(td, sig)
2415 struct thread *td;
2416 int sig;
2417 {
2418 struct proc *p = td->td_proc;
2419
2420 PROC_LOCK_ASSERT(p, MA_OWNED);
2421 p->p_acflag |= AXSIG;
2422 /*
2423 * We must be single-threading to generate a core dump. This
2424 * ensures that the registers in the core file are up-to-date.
2425 * Also, the ELF dump handler assumes that the thread list doesn't
2426 * change out from under it.
2427 *
2428 * XXX If another thread attempts to single-thread before us
2429 * (e.g. via fork()), we won't get a dump at all.
2430 */
2431 if ((sigprop(sig) & SA_CORE) && (thread_single(SINGLE_NO_EXIT) == 0)) {
2432 p->p_sig = sig;
2433 /*
2434 * Log signals which would cause core dumps
2435 * (Log as LOG_INFO to appease those who don't want
2436 * these messages.)
2437 * XXX : Todo, as well as euid, write out ruid too
2438 * Note that coredump() drops proc lock.
2439 */
2440 if (coredump(td) == 0)
2441 sig |= WCOREFLAG;
2442 if (kern_logsigexit)
2443 log(LOG_INFO,
2444 "pid %d (%s), uid %d: exited on signal %d%s\n",
2445 p->p_pid, p->p_comm,
2446 td->td_ucred ? td->td_ucred->cr_uid : -1,
2447 sig &~ WCOREFLAG,
2448 sig & WCOREFLAG ? " (core dumped)" : "");
2449 } else
2450 PROC_UNLOCK(p);
2451 exit1(td, W_EXITCODE(0, sig));
2452 /* NOTREACHED */
2453 }
2454
2455 static char corefilename[MAXPATHLEN] = {"%N.core"};
2456 SYSCTL_STRING(_kern, OID_AUTO, corefile, CTLFLAG_RW, corefilename,
2457 sizeof(corefilename), "process corefile name format string");
2458
2459 /*
2460 * expand_name(name, uid, pid)
2461 * Expand the name described in corefilename, using name, uid, and pid.
2462 * corefilename is a printf-like string, with three format specifiers:
2463 * %N name of process ("name")
2464 * %P process id (pid)
2465 * %U user id (uid)
2466 * For example, "%N.core" is the default; they can be disabled completely
2467 * by using "/dev/null", or all core files can be stored in "/cores/%U/%N-%P".
2468 * This is controlled by the sysctl variable kern.corefile (see above).
2469 */
2470
2471 static char *
2472 expand_name(name, uid, pid)
2473 const char *name;
2474 uid_t uid;
2475 pid_t pid;
2476 {
2477 const char *format, *appendstr;
2478 char *temp;
2479 char buf[11]; /* Buffer for pid/uid -- max 4B */
2480 size_t i, l, n;
2481
2482 format = corefilename;
2483 temp = malloc(MAXPATHLEN, M_TEMP, M_NOWAIT | M_ZERO);
2484 if (temp == NULL)
2485 return (NULL);
2486 for (i = 0, n = 0; n < MAXPATHLEN && format[i]; i++) {
2487 switch (format[i]) {
2488 case '%': /* Format character */
2489 i++;
2490 switch (format[i]) {
2491 case '%':
2492 appendstr = "%";
2493 break;
2494 case 'N': /* process name */
2495 appendstr = name;
2496 break;
2497 case 'P': /* process id */
2498 sprintf(buf, "%u", pid);
2499 appendstr = buf;
2500 break;
2501 case 'U': /* user id */
2502 sprintf(buf, "%u", uid);
2503 appendstr = buf;
2504 break;
2505 default:
2506 appendstr = "";
2507 log(LOG_ERR,
2508 "Unknown format character %c in `%s'\n",
2509 format[i], format);
2510 }
2511 l = strlen(appendstr);
2512 if ((n + l) >= MAXPATHLEN)
2513 goto toolong;
2514 memcpy(temp + n, appendstr, l);
2515 n += l;
2516 break;
2517 default:
2518 temp[n++] = format[i];
2519 }
2520 }
2521 if (format[i] != '\0')
2522 goto toolong;
2523 return (temp);
2524 toolong:
2525 log(LOG_ERR, "pid %ld (%s), uid (%lu): corename is too long\n",
2526 (long)pid, name, (u_long)uid);
2527 free(temp, M_TEMP);
2528 return (NULL);
2529 }
2530
2531 /*
2532 * Dump a process' core. The main routine does some
2533 * policy checking, and creates the name of the coredump;
2534 * then it passes on a vnode and a size limit to the process-specific
2535 * coredump routine if there is one; if there _is not_ one, it returns
2536 * ENOSYS; otherwise it returns the error from the process-specific routine.
2537 */
2538
2539 static int
2540 coredump(struct thread *td)
2541 {
2542 struct proc *p = td->td_proc;
2543 register struct vnode *vp;
2544 register struct ucred *cred = td->td_ucred;
2545 struct flock lf;
2546 struct nameidata nd;
2547 struct vattr vattr;
2548 int error, error1, flags, locked;
2549 struct mount *mp;
2550 char *name; /* name of corefile */
2551 off_t limit;
2552
2553 PROC_LOCK_ASSERT(p, MA_OWNED);
2554 MPASS((p->p_flag & P_HADTHREADS) == 0 || p->p_singlethread == td);
2555 _STOPEVENT(p, S_CORE, 0);
2556
2557 if (((sugid_coredump == 0) && p->p_flag & P_SUGID) || do_coredump == 0) {
2558 PROC_UNLOCK(p);
2559 return (EFAULT);
2560 }
2561
2562 /*
2563 * Note that the bulk of limit checking is done after
2564 * the corefile is created. The exception is if the limit
2565 * for corefiles is 0, in which case we don't bother
2566 * creating the corefile at all. This layout means that
2567 * a corefile is truncated instead of not being created,
2568 * if it is larger than the limit.
2569 */
2570 limit = (off_t)lim_cur(p, RLIMIT_CORE);
2571 PROC_UNLOCK(p);
2572 if (limit == 0)
2573 return (EFBIG);
2574
2575 mtx_lock(&Giant);
2576 restart:
2577 name = expand_name(p->p_comm, td->td_ucred->cr_uid, p->p_pid);
2578 if (name == NULL) {
2579 mtx_unlock(&Giant);
2580 return (EINVAL);
2581 }
2582 NDINIT(&nd, LOOKUP, NOFOLLOW, UIO_SYSSPACE, name, td); /* XXXKSE */
2583 flags = O_CREAT | FWRITE | O_NOFOLLOW;
2584 error = vn_open(&nd, &flags, S_IRUSR | S_IWUSR, -1);
2585 free(name, M_TEMP);
2586 if (error) {
2587 mtx_unlock(&Giant);
2588 return (error);
2589 }
2590 NDFREE(&nd, NDF_ONLY_PNBUF);
2591 vp = nd.ni_vp;
2592
2593 /* Don't dump to non-regular files or files with links. */
2594 if (vp->v_type != VREG ||
2595 VOP_GETATTR(vp, &vattr, cred, td) || vattr.va_nlink != 1) {
2596 VOP_UNLOCK(vp, 0, td);
2597 error = EFAULT;
2598 goto out;
2599 }
2600
2601 VOP_UNLOCK(vp, 0, td);
2602 lf.l_whence = SEEK_SET;
2603 lf.l_start = 0;
2604 lf.l_len = 0;
2605 lf.l_type = F_WRLCK;
2606 locked = (VOP_ADVLOCK(vp, (caddr_t)p, F_SETLK, &lf, F_FLOCK) == 0);
2607
2608 if (vn_start_write(vp, &mp, V_NOWAIT) != 0) {
2609 lf.l_type = F_UNLCK;
2610 if (locked)
2611 VOP_ADVLOCK(vp, (caddr_t)p, F_UNLCK, &lf, F_FLOCK);
2612 if ((error = vn_close(vp, FWRITE, cred, td)) != 0)
2613 return (error);
2614 if ((error = vn_start_write(NULL, &mp, V_XSLEEP | PCATCH)) != 0)
2615 return (error);
2616 goto restart;
2617 }
2618
2619 VATTR_NULL(&vattr);
2620 vattr.va_size = 0;
2621 if (set_core_nodump_flag)
2622 vattr.va_flags = UF_NODUMP;
2623 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
2624 VOP_LEASE(vp, td, cred, LEASE_WRITE);
2625 VOP_SETATTR(vp, &vattr, cred, td);
2626 VOP_UNLOCK(vp, 0, td);
2627 PROC_LOCK(p);
2628 p->p_acflag |= ACORE;
2629 PROC_UNLOCK(p);
2630
2631 error = p->p_sysent->sv_coredump ?
2632 p->p_sysent->sv_coredump(td, vp, limit) :
2633 ENOSYS;
2634
2635 if (locked) {
2636 lf.l_type = F_UNLCK;
2637 VOP_ADVLOCK(vp, (caddr_t)p, F_UNLCK, &lf, F_FLOCK);
2638 }
2639 vn_finished_write(mp);
2640 out:
2641 error1 = vn_close(vp, FWRITE, cred, td);
2642 mtx_unlock(&Giant);
2643 if (error == 0)
2644 error = error1;
2645 return (error);
2646 }
2647
2648 /*
2649 * Nonexistent system call-- signal process (may want to handle it).
2650 * Flag error in case process won't see signal immediately (blocked or ignored).
2651 */
2652 #ifndef _SYS_SYSPROTO_H_
2653 struct nosys_args {
2654 int dummy;
2655 };
2656 #endif
2657 /*
2658 * MPSAFE
2659 */
2660 /* ARGSUSED */
2661 int
2662 nosys(td, args)
2663 struct thread *td;
2664 struct nosys_args *args;
2665 {
2666 struct proc *p = td->td_proc;
2667
2668 PROC_LOCK(p);
2669 psignal(p, SIGSYS);
2670 PROC_UNLOCK(p);
2671 return (ENOSYS);
2672 }
2673
2674 /*
2675 * Send a SIGIO or SIGURG signal to a process or process group using
2676 * stored credentials rather than those of the current process.
2677 */
2678 void
2679 pgsigio(sigiop, sig, checkctty)
2680 struct sigio **sigiop;
2681 int sig, checkctty;
2682 {
2683 struct sigio *sigio;
2684
2685 SIGIO_LOCK();
2686 sigio = *sigiop;
2687 if (sigio == NULL) {
2688 SIGIO_UNLOCK();
2689 return;
2690 }
2691 if (sigio->sio_pgid > 0) {
2692 PROC_LOCK(sigio->sio_proc);
2693 if (CANSIGIO(sigio->sio_ucred, sigio->sio_proc->p_ucred))
2694 psignal(sigio->sio_proc, sig);
2695 PROC_UNLOCK(sigio->sio_proc);
2696 } else if (sigio->sio_pgid < 0) {
2697 struct proc *p;
2698
2699 PGRP_LOCK(sigio->sio_pgrp);
2700 LIST_FOREACH(p, &sigio->sio_pgrp->pg_members, p_pglist) {
2701 PROC_LOCK(p);
2702 if (CANSIGIO(sigio->sio_ucred, p->p_ucred) &&
2703 (checkctty == 0 || (p->p_flag & P_CONTROLT)))
2704 psignal(p, sig);
2705 PROC_UNLOCK(p);
2706 }
2707 PGRP_UNLOCK(sigio->sio_pgrp);
2708 }
2709 SIGIO_UNLOCK();
2710 }
2711
2712 static int
2713 filt_sigattach(struct knote *kn)
2714 {
2715 struct proc *p = curproc;
2716
2717 kn->kn_ptr.p_proc = p;
2718 kn->kn_flags |= EV_CLEAR; /* automatically set */
2719
2720 knlist_add(&p->p_klist, kn, 0);
2721
2722 return (0);
2723 }
2724
2725 static void
2726 filt_sigdetach(struct knote *kn)
2727 {
2728 struct proc *p = kn->kn_ptr.p_proc;
2729
2730 knlist_remove(&p->p_klist, kn, 0);
2731 }
2732
2733 /*
2734 * signal knotes are shared with proc knotes, so we apply a mask to
2735 * the hint in order to differentiate them from process hints. This
2736 * could be avoided by using a signal-specific knote list, but probably
2737 * isn't worth the trouble.
2738 */
2739 static int
2740 filt_signal(struct knote *kn, long hint)
2741 {
2742
2743 if (hint & NOTE_SIGNAL) {
2744 hint &= ~NOTE_SIGNAL;
2745
2746 if (kn->kn_id == hint)
2747 kn->kn_data++;
2748 }
2749 return (kn->kn_data != 0);
2750 }
2751
2752 struct sigacts *
2753 sigacts_alloc(void)
2754 {
2755 struct sigacts *ps;
2756
2757 ps = malloc(sizeof(struct sigacts), M_SUBPROC, M_WAITOK | M_ZERO);
2758 ps->ps_refcnt = 1;
2759 mtx_init(&ps->ps_mtx, "sigacts", NULL, MTX_DEF);
2760 return (ps);
2761 }
2762
2763 void
2764 sigacts_free(struct sigacts *ps)
2765 {
2766
2767 mtx_lock(&ps->ps_mtx);
2768 ps->ps_refcnt--;
2769 if (ps->ps_refcnt == 0) {
2770 mtx_destroy(&ps->ps_mtx);
2771 free(ps, M_SUBPROC);
2772 } else
2773 mtx_unlock(&ps->ps_mtx);
2774 }
2775
2776 struct sigacts *
2777 sigacts_hold(struct sigacts *ps)
2778 {
2779 mtx_lock(&ps->ps_mtx);
2780 ps->ps_refcnt++;
2781 mtx_unlock(&ps->ps_mtx);
2782 return (ps);
2783 }
2784
2785 void
2786 sigacts_copy(struct sigacts *dest, struct sigacts *src)
2787 {
2788
2789 KASSERT(dest->ps_refcnt == 1, ("sigacts_copy to shared dest"));
2790 mtx_lock(&src->ps_mtx);
2791 bcopy(src, dest, offsetof(struct sigacts, ps_refcnt));
2792 mtx_unlock(&src->ps_mtx);
2793 }
2794
2795 int
2796 sigacts_shared(struct sigacts *ps)
2797 {
2798 int shared;
2799
2800 mtx_lock(&ps->ps_mtx);
2801 shared = ps->ps_refcnt > 1;
2802 mtx_unlock(&ps->ps_mtx);
2803 return (shared);
2804 }
Cache object: e9204ef680a3cc8d24c55eceb38f3e7d
|