FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_sig.c
1 /*
2 * Copyright (c) 1982, 1986, 1989, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the University of
21 * California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 *
38 * @(#)kern_sig.c 8.7 (Berkeley) 4/18/94
39 * $FreeBSD: releng/5.1/sys/kern/kern_sig.c 118752 2003-08-10 23:17:49Z nectar $
40 */
41
42 #include "opt_compat.h"
43 #include "opt_ktrace.h"
44
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/signalvar.h>
48 #include <sys/vnode.h>
49 #include <sys/acct.h>
50 #include <sys/condvar.h>
51 #include <sys/event.h>
52 #include <sys/fcntl.h>
53 #include <sys/kernel.h>
54 #include <sys/ktr.h>
55 #include <sys/ktrace.h>
56 #include <sys/lock.h>
57 #include <sys/malloc.h>
58 #include <sys/mutex.h>
59 #include <sys/namei.h>
60 #include <sys/proc.h>
61 #include <sys/pioctl.h>
62 #include <sys/resourcevar.h>
63 #include <sys/smp.h>
64 #include <sys/stat.h>
65 #include <sys/sx.h>
66 #include <sys/syscallsubr.h>
67 #include <sys/sysctl.h>
68 #include <sys/sysent.h>
69 #include <sys/syslog.h>
70 #include <sys/sysproto.h>
71 #include <sys/unistd.h>
72 #include <sys/wait.h>
73
74 #include <machine/cpu.h>
75
76 #if defined (__alpha__) && !defined(COMPAT_43)
77 #error "You *really* need COMPAT_43 on the alpha for longjmp(3)"
78 #endif
79
80 #define ONSIG 32 /* NSIG for osig* syscalls. XXX. */
81
82 static int coredump(struct thread *);
83 static char *expand_name(const char *, uid_t, pid_t);
84 static int killpg1(struct thread *td, int sig, int pgid, int all);
85 static int issignal(struct thread *p);
86 static int sigprop(int sig);
87 static void stop(struct proc *);
88 static void tdsigwakeup(struct thread *td, int sig, sig_t action);
89 static int filt_sigattach(struct knote *kn);
90 static void filt_sigdetach(struct knote *kn);
91 static int filt_signal(struct knote *kn, long hint);
92 static struct thread *sigtd(struct proc *p, int sig, int prop);
93 static int kern_sigtimedwait(struct thread *td, sigset_t set,
94 siginfo_t *info, struct timespec *timeout);
95
96 struct filterops sig_filtops =
97 { 0, filt_sigattach, filt_sigdetach, filt_signal };
98
99 static int kern_logsigexit = 1;
100 SYSCTL_INT(_kern, KERN_LOGSIGEXIT, logsigexit, CTLFLAG_RW,
101 &kern_logsigexit, 0,
102 "Log processes quitting on abnormal signals to syslog(3)");
103
104 /*
105 * Policy -- Can ucred cr1 send SIGIO to process cr2?
106 * Should use cr_cansignal() once cr_cansignal() allows SIGIO and SIGURG
107 * in the right situations.
108 */
109 #define CANSIGIO(cr1, cr2) \
110 ((cr1)->cr_uid == 0 || \
111 (cr1)->cr_ruid == (cr2)->cr_ruid || \
112 (cr1)->cr_uid == (cr2)->cr_ruid || \
113 (cr1)->cr_ruid == (cr2)->cr_uid || \
114 (cr1)->cr_uid == (cr2)->cr_uid)
115
116 int sugid_coredump;
117 SYSCTL_INT(_kern, OID_AUTO, sugid_coredump, CTLFLAG_RW,
118 &sugid_coredump, 0, "Enable coredumping set user/group ID processes");
119
120 static int do_coredump = 1;
121 SYSCTL_INT(_kern, OID_AUTO, coredump, CTLFLAG_RW,
122 &do_coredump, 0, "Enable/Disable coredumps");
123
124 /*
125 * Signal properties and actions.
126 * The array below categorizes the signals and their default actions
127 * according to the following properties:
128 */
129 #define SA_KILL 0x01 /* terminates process by default */
130 #define SA_CORE 0x02 /* ditto and coredumps */
131 #define SA_STOP 0x04 /* suspend process */
132 #define SA_TTYSTOP 0x08 /* ditto, from tty */
133 #define SA_IGNORE 0x10 /* ignore by default */
134 #define SA_CONT 0x20 /* continue if suspended */
135 #define SA_CANTMASK 0x40 /* non-maskable, catchable */
136 #define SA_PROC 0x80 /* deliverable to any thread */
137
138 static int sigproptbl[NSIG] = {
139 SA_KILL|SA_PROC, /* SIGHUP */
140 SA_KILL|SA_PROC, /* SIGINT */
141 SA_KILL|SA_CORE|SA_PROC, /* SIGQUIT */
142 SA_KILL|SA_CORE, /* SIGILL */
143 SA_KILL|SA_CORE, /* SIGTRAP */
144 SA_KILL|SA_CORE, /* SIGABRT */
145 SA_KILL|SA_CORE|SA_PROC, /* SIGEMT */
146 SA_KILL|SA_CORE, /* SIGFPE */
147 SA_KILL|SA_PROC, /* SIGKILL */
148 SA_KILL|SA_CORE, /* SIGBUS */
149 SA_KILL|SA_CORE, /* SIGSEGV */
150 SA_KILL|SA_CORE, /* SIGSYS */
151 SA_KILL|SA_PROC, /* SIGPIPE */
152 SA_KILL|SA_PROC, /* SIGALRM */
153 SA_KILL|SA_PROC, /* SIGTERM */
154 SA_IGNORE|SA_PROC, /* SIGURG */
155 SA_STOP|SA_PROC, /* SIGSTOP */
156 SA_STOP|SA_TTYSTOP|SA_PROC, /* SIGTSTP */
157 SA_IGNORE|SA_CONT|SA_PROC, /* SIGCONT */
158 SA_IGNORE|SA_PROC, /* SIGCHLD */
159 SA_STOP|SA_TTYSTOP|SA_PROC, /* SIGTTIN */
160 SA_STOP|SA_TTYSTOP|SA_PROC, /* SIGTTOU */
161 SA_IGNORE|SA_PROC, /* SIGIO */
162 SA_KILL, /* SIGXCPU */
163 SA_KILL, /* SIGXFSZ */
164 SA_KILL|SA_PROC, /* SIGVTALRM */
165 SA_KILL|SA_PROC, /* SIGPROF */
166 SA_IGNORE|SA_PROC, /* SIGWINCH */
167 SA_IGNORE|SA_PROC, /* SIGINFO */
168 SA_KILL|SA_PROC, /* SIGUSR1 */
169 SA_KILL|SA_PROC, /* SIGUSR2 */
170 };
171
172 /*
173 * Determine signal that should be delivered to process p, the current
174 * process, 0 if none. If there is a pending stop signal with default
175 * action, the process stops in issignal().
176 * XXXKSE the check for a pending stop is not done under KSE
177 *
178 * MP SAFE.
179 */
180 int
181 cursig(struct thread *td)
182 {
183 PROC_LOCK_ASSERT(td->td_proc, MA_OWNED);
184 mtx_assert(&td->td_proc->p_sigacts->ps_mtx, MA_OWNED);
185 mtx_assert(&sched_lock, MA_NOTOWNED);
186 return (SIGPENDING(td) ? issignal(td) : 0);
187 }
188
189 /*
190 * Arrange for ast() to handle unmasked pending signals on return to user
191 * mode. This must be called whenever a signal is added to td_siglist or
192 * unmasked in td_sigmask.
193 */
194 void
195 signotify(struct thread *td)
196 {
197 struct proc *p;
198 sigset_t set;
199
200 p = td->td_proc;
201
202 PROC_LOCK_ASSERT(p, MA_OWNED);
203
204 /*
205 * If our mask changed we may have to move signal that were
206 * previously masked by all threads to our siglist.
207 */
208 set = p->p_siglist;
209 SIGSETNAND(set, td->td_sigmask);
210 SIGSETNAND(p->p_siglist, set);
211 SIGSETOR(td->td_siglist, set);
212
213 if (SIGPENDING(td)) {
214 mtx_lock_spin(&sched_lock);
215 td->td_flags |= TDF_NEEDSIGCHK | TDF_ASTPENDING;
216 mtx_unlock_spin(&sched_lock);
217 }
218 }
219
220 int
221 sigonstack(size_t sp)
222 {
223 struct proc *p = curthread->td_proc;
224
225 PROC_LOCK_ASSERT(p, MA_OWNED);
226 return ((p->p_flag & P_ALTSTACK) ?
227 #if defined(COMPAT_43) || defined(COMPAT_SUNOS)
228 ((p->p_sigstk.ss_size == 0) ? (p->p_sigstk.ss_flags & SS_ONSTACK) :
229 ((sp - (size_t)p->p_sigstk.ss_sp) < p->p_sigstk.ss_size))
230 #else
231 ((sp - (size_t)p->p_sigstk.ss_sp) < p->p_sigstk.ss_size)
232 #endif
233 : 0);
234 }
235
236 static __inline int
237 sigprop(int sig)
238 {
239
240 if (sig > 0 && sig < NSIG)
241 return (sigproptbl[_SIG_IDX(sig)]);
242 return (0);
243 }
244
245 int
246 sig_ffs(sigset_t *set)
247 {
248 int i;
249
250 for (i = 0; i < _SIG_WORDS; i++)
251 if (set->__bits[i])
252 return (ffs(set->__bits[i]) + (i * 32));
253 return (0);
254 }
255
256 /*
257 * kern_sigaction
258 * sigaction
259 * freebsd4_sigaction
260 * osigaction
261 *
262 * MPSAFE
263 */
264 int
265 kern_sigaction(td, sig, act, oact, flags)
266 struct thread *td;
267 register int sig;
268 struct sigaction *act, *oact;
269 int flags;
270 {
271 struct sigacts *ps;
272 struct thread *td0;
273 struct proc *p = td->td_proc;
274
275 if (!_SIG_VALID(sig))
276 return (EINVAL);
277
278 PROC_LOCK(p);
279 ps = p->p_sigacts;
280 mtx_lock(&ps->ps_mtx);
281 if (oact) {
282 oact->sa_handler = ps->ps_sigact[_SIG_IDX(sig)];
283 oact->sa_mask = ps->ps_catchmask[_SIG_IDX(sig)];
284 oact->sa_flags = 0;
285 if (SIGISMEMBER(ps->ps_sigonstack, sig))
286 oact->sa_flags |= SA_ONSTACK;
287 if (!SIGISMEMBER(ps->ps_sigintr, sig))
288 oact->sa_flags |= SA_RESTART;
289 if (SIGISMEMBER(ps->ps_sigreset, sig))
290 oact->sa_flags |= SA_RESETHAND;
291 if (SIGISMEMBER(ps->ps_signodefer, sig))
292 oact->sa_flags |= SA_NODEFER;
293 if (SIGISMEMBER(ps->ps_siginfo, sig))
294 oact->sa_flags |= SA_SIGINFO;
295 if (sig == SIGCHLD && ps->ps_flag & PS_NOCLDSTOP)
296 oact->sa_flags |= SA_NOCLDSTOP;
297 if (sig == SIGCHLD && ps->ps_flag & PS_NOCLDWAIT)
298 oact->sa_flags |= SA_NOCLDWAIT;
299 }
300 if (act) {
301 if ((sig == SIGKILL || sig == SIGSTOP) &&
302 act->sa_handler != SIG_DFL) {
303 mtx_unlock(&ps->ps_mtx);
304 PROC_UNLOCK(p);
305 return (EINVAL);
306 }
307
308 /*
309 * Change setting atomically.
310 */
311
312 ps->ps_catchmask[_SIG_IDX(sig)] = act->sa_mask;
313 SIG_CANTMASK(ps->ps_catchmask[_SIG_IDX(sig)]);
314 if (act->sa_flags & SA_SIGINFO) {
315 ps->ps_sigact[_SIG_IDX(sig)] =
316 (__sighandler_t *)act->sa_sigaction;
317 SIGADDSET(ps->ps_siginfo, sig);
318 } else {
319 ps->ps_sigact[_SIG_IDX(sig)] = act->sa_handler;
320 SIGDELSET(ps->ps_siginfo, sig);
321 }
322 if (!(act->sa_flags & SA_RESTART))
323 SIGADDSET(ps->ps_sigintr, sig);
324 else
325 SIGDELSET(ps->ps_sigintr, sig);
326 if (act->sa_flags & SA_ONSTACK)
327 SIGADDSET(ps->ps_sigonstack, sig);
328 else
329 SIGDELSET(ps->ps_sigonstack, sig);
330 if (act->sa_flags & SA_RESETHAND)
331 SIGADDSET(ps->ps_sigreset, sig);
332 else
333 SIGDELSET(ps->ps_sigreset, sig);
334 if (act->sa_flags & SA_NODEFER)
335 SIGADDSET(ps->ps_signodefer, sig);
336 else
337 SIGDELSET(ps->ps_signodefer, sig);
338 #ifdef COMPAT_SUNOS
339 if (act->sa_flags & SA_USERTRAMP)
340 SIGADDSET(ps->ps_usertramp, sig);
341 else
342 SIGDELSET(ps->ps_usertramp, sig);
343 #endif
344 if (sig == SIGCHLD) {
345 if (act->sa_flags & SA_NOCLDSTOP)
346 ps->ps_flag |= PS_NOCLDSTOP;
347 else
348 ps->ps_flag &= ~PS_NOCLDSTOP;
349 if (act->sa_flags & SA_NOCLDWAIT) {
350 /*
351 * Paranoia: since SA_NOCLDWAIT is implemented
352 * by reparenting the dying child to PID 1 (and
353 * trust it to reap the zombie), PID 1 itself
354 * is forbidden to set SA_NOCLDWAIT.
355 */
356 if (p->p_pid == 1)
357 ps->ps_flag &= ~PS_NOCLDWAIT;
358 else
359 ps->ps_flag |= PS_NOCLDWAIT;
360 } else
361 ps->ps_flag &= ~PS_NOCLDWAIT;
362 if (ps->ps_sigact[_SIG_IDX(SIGCHLD)] == SIG_IGN)
363 ps->ps_flag |= PS_CLDSIGIGN;
364 else
365 ps->ps_flag &= ~PS_CLDSIGIGN;
366 }
367 /*
368 * Set bit in ps_sigignore for signals that are set to SIG_IGN,
369 * and for signals set to SIG_DFL where the default is to
370 * ignore. However, don't put SIGCONT in ps_sigignore, as we
371 * have to restart the process.
372 */
373 if (ps->ps_sigact[_SIG_IDX(sig)] == SIG_IGN ||
374 (sigprop(sig) & SA_IGNORE &&
375 ps->ps_sigact[_SIG_IDX(sig)] == SIG_DFL)) {
376 /* never to be seen again */
377 SIGDELSET(p->p_siglist, sig);
378 FOREACH_THREAD_IN_PROC(p, td0)
379 SIGDELSET(td0->td_siglist, sig);
380 if (sig != SIGCONT)
381 /* easier in psignal */
382 SIGADDSET(ps->ps_sigignore, sig);
383 SIGDELSET(ps->ps_sigcatch, sig);
384 } else {
385 SIGDELSET(ps->ps_sigignore, sig);
386 if (ps->ps_sigact[_SIG_IDX(sig)] == SIG_DFL)
387 SIGDELSET(ps->ps_sigcatch, sig);
388 else
389 SIGADDSET(ps->ps_sigcatch, sig);
390 }
391 #ifdef COMPAT_FREEBSD4
392 if (ps->ps_sigact[_SIG_IDX(sig)] == SIG_IGN ||
393 ps->ps_sigact[_SIG_IDX(sig)] == SIG_DFL ||
394 (flags & KSA_FREEBSD4) == 0)
395 SIGDELSET(ps->ps_freebsd4, sig);
396 else
397 SIGADDSET(ps->ps_freebsd4, sig);
398 #endif
399 #ifdef COMPAT_43
400 if (ps->ps_sigact[_SIG_IDX(sig)] == SIG_IGN ||
401 ps->ps_sigact[_SIG_IDX(sig)] == SIG_DFL ||
402 (flags & KSA_OSIGSET) == 0)
403 SIGDELSET(ps->ps_osigset, sig);
404 else
405 SIGADDSET(ps->ps_osigset, sig);
406 #endif
407 }
408 mtx_unlock(&ps->ps_mtx);
409 PROC_UNLOCK(p);
410 return (0);
411 }
412
413 #ifndef _SYS_SYSPROTO_H_
414 struct sigaction_args {
415 int sig;
416 struct sigaction *act;
417 struct sigaction *oact;
418 };
419 #endif
420 /*
421 * MPSAFE
422 */
423 int
424 sigaction(td, uap)
425 struct thread *td;
426 register struct sigaction_args *uap;
427 {
428 struct sigaction act, oact;
429 register struct sigaction *actp, *oactp;
430 int error;
431
432 actp = (uap->act != NULL) ? &act : NULL;
433 oactp = (uap->oact != NULL) ? &oact : NULL;
434 if (actp) {
435 error = copyin(uap->act, actp, sizeof(act));
436 if (error)
437 return (error);
438 }
439 error = kern_sigaction(td, uap->sig, actp, oactp, 0);
440 if (oactp && !error)
441 error = copyout(oactp, uap->oact, sizeof(oact));
442 return (error);
443 }
444
445 #ifdef COMPAT_FREEBSD4
446 #ifndef _SYS_SYSPROTO_H_
447 struct freebsd4_sigaction_args {
448 int sig;
449 struct sigaction *act;
450 struct sigaction *oact;
451 };
452 #endif
453 /*
454 * MPSAFE
455 */
456 int
457 freebsd4_sigaction(td, uap)
458 struct thread *td;
459 register struct freebsd4_sigaction_args *uap;
460 {
461 struct sigaction act, oact;
462 register struct sigaction *actp, *oactp;
463 int error;
464
465
466 actp = (uap->act != NULL) ? &act : NULL;
467 oactp = (uap->oact != NULL) ? &oact : NULL;
468 if (actp) {
469 error = copyin(uap->act, actp, sizeof(act));
470 if (error)
471 return (error);
472 }
473 error = kern_sigaction(td, uap->sig, actp, oactp, KSA_FREEBSD4);
474 if (oactp && !error)
475 error = copyout(oactp, uap->oact, sizeof(oact));
476 return (error);
477 }
478 #endif /* COMAPT_FREEBSD4 */
479
480 #ifdef COMPAT_43 /* XXX - COMPAT_FBSD3 */
481 #ifndef _SYS_SYSPROTO_H_
482 struct osigaction_args {
483 int signum;
484 struct osigaction *nsa;
485 struct osigaction *osa;
486 };
487 #endif
488 /*
489 * MPSAFE
490 */
491 int
492 osigaction(td, uap)
493 struct thread *td;
494 register struct osigaction_args *uap;
495 {
496 struct osigaction sa;
497 struct sigaction nsa, osa;
498 register struct sigaction *nsap, *osap;
499 int error;
500
501 if (uap->signum <= 0 || uap->signum >= ONSIG)
502 return (EINVAL);
503
504 nsap = (uap->nsa != NULL) ? &nsa : NULL;
505 osap = (uap->osa != NULL) ? &osa : NULL;
506
507 if (nsap) {
508 error = copyin(uap->nsa, &sa, sizeof(sa));
509 if (error)
510 return (error);
511 nsap->sa_handler = sa.sa_handler;
512 nsap->sa_flags = sa.sa_flags;
513 OSIG2SIG(sa.sa_mask, nsap->sa_mask);
514 }
515 error = kern_sigaction(td, uap->signum, nsap, osap, KSA_OSIGSET);
516 if (osap && !error) {
517 sa.sa_handler = osap->sa_handler;
518 sa.sa_flags = osap->sa_flags;
519 SIG2OSIG(osap->sa_mask, sa.sa_mask);
520 error = copyout(&sa, uap->osa, sizeof(sa));
521 }
522 return (error);
523 }
524
525 #if !defined(__i386__) && !defined(__alpha__)
526 /* Avoid replicating the same stub everywhere */
527 int
528 osigreturn(td, uap)
529 struct thread *td;
530 struct osigreturn_args *uap;
531 {
532
533 return (nosys(td, (struct nosys_args *)uap));
534 }
535 #endif
536 #endif /* COMPAT_43 */
537
538 /*
539 * Initialize signal state for process 0;
540 * set to ignore signals that are ignored by default.
541 */
542 void
543 siginit(p)
544 struct proc *p;
545 {
546 register int i;
547 struct sigacts *ps;
548
549 PROC_LOCK(p);
550 ps = p->p_sigacts;
551 mtx_lock(&ps->ps_mtx);
552 for (i = 1; i <= NSIG; i++)
553 if (sigprop(i) & SA_IGNORE && i != SIGCONT)
554 SIGADDSET(ps->ps_sigignore, i);
555 mtx_unlock(&ps->ps_mtx);
556 PROC_UNLOCK(p);
557 }
558
559 /*
560 * Reset signals for an exec of the specified process.
561 */
562 void
563 execsigs(p)
564 register struct proc *p;
565 {
566 register struct sigacts *ps;
567 register int sig;
568
569 /*
570 * Reset caught signals. Held signals remain held
571 * through td_sigmask (unless they were caught,
572 * and are now ignored by default).
573 */
574 PROC_LOCK_ASSERT(p, MA_OWNED);
575 ps = p->p_sigacts;
576 mtx_lock(&ps->ps_mtx);
577 while (SIGNOTEMPTY(ps->ps_sigcatch)) {
578 sig = sig_ffs(&ps->ps_sigcatch);
579 SIGDELSET(ps->ps_sigcatch, sig);
580 if (sigprop(sig) & SA_IGNORE) {
581 if (sig != SIGCONT)
582 SIGADDSET(ps->ps_sigignore, sig);
583 SIGDELSET(p->p_siglist, sig);
584 /*
585 * There is only one thread at this point.
586 */
587 SIGDELSET(FIRST_THREAD_IN_PROC(p)->td_siglist, sig);
588 }
589 ps->ps_sigact[_SIG_IDX(sig)] = SIG_DFL;
590 }
591 /*
592 * Clear out the td's sigmask. Normal processes use the proc sigmask.
593 */
594 SIGEMPTYSET(FIRST_THREAD_IN_PROC(p)->td_sigmask);
595 /*
596 * Reset stack state to the user stack.
597 * Clear set of signals caught on the signal stack.
598 */
599 p->p_sigstk.ss_flags = SS_DISABLE;
600 p->p_sigstk.ss_size = 0;
601 p->p_sigstk.ss_sp = 0;
602 p->p_flag &= ~P_ALTSTACK;
603 /*
604 * Reset no zombies if child dies flag as Solaris does.
605 */
606 ps->ps_flag &= ~(PS_NOCLDWAIT | PS_CLDSIGIGN);
607 if (ps->ps_sigact[_SIG_IDX(SIGCHLD)] == SIG_IGN)
608 ps->ps_sigact[_SIG_IDX(SIGCHLD)] = SIG_DFL;
609 mtx_unlock(&ps->ps_mtx);
610 }
611
612 /*
613 * kern_sigprocmask()
614 *
615 * Manipulate signal mask.
616 */
617 int
618 kern_sigprocmask(td, how, set, oset, old)
619 struct thread *td;
620 int how;
621 sigset_t *set, *oset;
622 int old;
623 {
624 int error;
625
626 PROC_LOCK(td->td_proc);
627 if (oset != NULL)
628 *oset = td->td_sigmask;
629
630 error = 0;
631 if (set != NULL) {
632 switch (how) {
633 case SIG_BLOCK:
634 SIG_CANTMASK(*set);
635 SIGSETOR(td->td_sigmask, *set);
636 break;
637 case SIG_UNBLOCK:
638 SIGSETNAND(td->td_sigmask, *set);
639 signotify(td);
640 break;
641 case SIG_SETMASK:
642 SIG_CANTMASK(*set);
643 if (old)
644 SIGSETLO(td->td_sigmask, *set);
645 else
646 td->td_sigmask = *set;
647 signotify(td);
648 break;
649 default:
650 error = EINVAL;
651 break;
652 }
653 }
654 PROC_UNLOCK(td->td_proc);
655 return (error);
656 }
657
658 /*
659 * sigprocmask() - MP SAFE
660 */
661
662 #ifndef _SYS_SYSPROTO_H_
663 struct sigprocmask_args {
664 int how;
665 const sigset_t *set;
666 sigset_t *oset;
667 };
668 #endif
669 int
670 sigprocmask(td, uap)
671 register struct thread *td;
672 struct sigprocmask_args *uap;
673 {
674 sigset_t set, oset;
675 sigset_t *setp, *osetp;
676 int error;
677
678 setp = (uap->set != NULL) ? &set : NULL;
679 osetp = (uap->oset != NULL) ? &oset : NULL;
680 if (setp) {
681 error = copyin(uap->set, setp, sizeof(set));
682 if (error)
683 return (error);
684 }
685 error = kern_sigprocmask(td, uap->how, setp, osetp, 0);
686 if (osetp && !error) {
687 error = copyout(osetp, uap->oset, sizeof(oset));
688 }
689 return (error);
690 }
691
692 #ifdef COMPAT_43 /* XXX - COMPAT_FBSD3 */
693 /*
694 * osigprocmask() - MP SAFE
695 */
696 #ifndef _SYS_SYSPROTO_H_
697 struct osigprocmask_args {
698 int how;
699 osigset_t mask;
700 };
701 #endif
702 int
703 osigprocmask(td, uap)
704 register struct thread *td;
705 struct osigprocmask_args *uap;
706 {
707 sigset_t set, oset;
708 int error;
709
710 OSIG2SIG(uap->mask, set);
711 error = kern_sigprocmask(td, uap->how, &set, &oset, 1);
712 SIG2OSIG(oset, td->td_retval[0]);
713 return (error);
714 }
715 #endif /* COMPAT_43 */
716
717 #ifndef _SYS_SYSPROTO_H_
718 struct sigpending_args {
719 sigset_t *set;
720 };
721 #endif
722 /*
723 * MPSAFE
724 */
725 int
726 sigwait(struct thread *td, struct sigwait_args *uap)
727 {
728 siginfo_t info;
729 sigset_t set;
730 int error;
731
732 error = copyin(uap->set, &set, sizeof(set));
733 if (error)
734 return (error);
735
736 error = kern_sigtimedwait(td, set, &info, NULL);
737 if (error)
738 return (error);
739
740 error = copyout(&info.si_signo, uap->sig, sizeof(info.si_signo));
741 /* Repost if we got an error. */
742 if (error && info.si_signo) {
743 PROC_LOCK(td->td_proc);
744 tdsignal(td, info.si_signo);
745 PROC_UNLOCK(td->td_proc);
746 }
747 return (error);
748 }
749 /*
750 * MPSAFE
751 */
752 int
753 sigtimedwait(struct thread *td, struct sigtimedwait_args *uap)
754 {
755 struct timespec ts;
756 struct timespec *timeout;
757 sigset_t set;
758 siginfo_t info;
759 int error;
760
761 if (uap->timeout) {
762 error = copyin(uap->timeout, &ts, sizeof(ts));
763 if (error)
764 return (error);
765
766 timeout = &ts;
767 } else
768 timeout = NULL;
769
770 error = copyin(uap->set, &set, sizeof(set));
771 if (error)
772 return (error);
773
774 error = kern_sigtimedwait(td, set, &info, timeout);
775 if (error)
776 return (error);
777
778 error = copyout(&info, uap->info, sizeof(info));
779 /* Repost if we got an error. */
780 if (error && info.si_signo) {
781 PROC_LOCK(td->td_proc);
782 tdsignal(td, info.si_signo);
783 PROC_UNLOCK(td->td_proc);
784 }
785 return (error);
786 }
787
788 /*
789 * MPSAFE
790 */
791 int
792 sigwaitinfo(struct thread *td, struct sigwaitinfo_args *uap)
793 {
794 siginfo_t info;
795 sigset_t set;
796 int error;
797
798 error = copyin(uap->set, &set, sizeof(set));
799 if (error)
800 return (error);
801
802 error = kern_sigtimedwait(td, set, &info, NULL);
803 if (error)
804 return (error);
805
806 error = copyout(&info, uap->info, sizeof(info));
807 /* Repost if we got an error. */
808 if (error && info.si_signo) {
809 PROC_LOCK(td->td_proc);
810 tdsignal(td, info.si_signo);
811 PROC_UNLOCK(td->td_proc);
812 }
813 return (error);
814 }
815
816 static int
817 kern_sigtimedwait(struct thread *td, sigset_t set, siginfo_t *info,
818 struct timespec *timeout)
819 {
820 register struct sigacts *ps;
821 sigset_t oldmask;
822 struct proc *p;
823 int error;
824 int sig;
825 int hz;
826
827 p = td->td_proc;
828 error = 0;
829 sig = 0;
830 SIG_CANTMASK(set);
831
832 PROC_LOCK(p);
833 ps = p->p_sigacts;
834 oldmask = td->td_sigmask;
835 td->td_sigmask = set;
836 signotify(td);
837
838 mtx_lock(&ps->ps_mtx);
839 sig = cursig(td);
840 if (sig)
841 goto out;
842
843 /*
844 * POSIX says this must be checked after looking for pending
845 * signals.
846 */
847 if (timeout) {
848 struct timeval tv;
849
850 if (timeout->tv_nsec > 1000000000) {
851 error = EINVAL;
852 goto out;
853 }
854 TIMESPEC_TO_TIMEVAL(&tv, timeout);
855 hz = tvtohz(&tv);
856 } else
857 hz = 0;
858
859 mtx_unlock(&ps->ps_mtx);
860 error = msleep(ps, &p->p_mtx, PPAUSE|PCATCH, "pause", hz);
861 mtx_lock(&ps->ps_mtx);
862 if (error == EINTR)
863 error = 0;
864 else if (error)
865 goto out;
866
867 sig = cursig(td);
868 out:
869 td->td_sigmask = oldmask;
870 if (sig) {
871 sig_t action;
872
873 action = ps->ps_sigact[_SIG_IDX(sig)];
874 mtx_unlock(&ps->ps_mtx);
875 #ifdef KTRACE
876 if (KTRPOINT(td, KTR_PSIG))
877 ktrpsig(sig, action, td->td_flags & TDF_OLDMASK ?
878 &td->td_oldsigmask : &td->td_sigmask, 0);
879 #endif
880 _STOPEVENT(p, S_SIG, sig);
881
882 if (action == SIG_DFL)
883 sigexit(td, sig);
884 /* NOTREACHED */
885
886 SIGDELSET(td->td_siglist, sig);
887 info->si_signo = sig;
888 info->si_code = 0;
889 } else
890 mtx_unlock(&ps->ps_mtx);
891 PROC_UNLOCK(p);
892 return (error);
893 }
894
895 /*
896 * MPSAFE
897 */
898 int
899 sigpending(td, uap)
900 struct thread *td;
901 struct sigpending_args *uap;
902 {
903 struct proc *p = td->td_proc;
904 sigset_t siglist;
905
906 PROC_LOCK(p);
907 siglist = p->p_siglist;
908 SIGSETOR(siglist, td->td_siglist);
909 PROC_UNLOCK(p);
910 return (copyout(&siglist, uap->set, sizeof(sigset_t)));
911 }
912
913 #ifdef COMPAT_43 /* XXX - COMPAT_FBSD3 */
914 #ifndef _SYS_SYSPROTO_H_
915 struct osigpending_args {
916 int dummy;
917 };
918 #endif
919 /*
920 * MPSAFE
921 */
922 int
923 osigpending(td, uap)
924 struct thread *td;
925 struct osigpending_args *uap;
926 {
927 struct proc *p = td->td_proc;
928 sigset_t siglist;
929
930 PROC_LOCK(p);
931 siglist = p->p_siglist;
932 SIGSETOR(siglist, td->td_siglist);
933 PROC_UNLOCK(p);
934 SIG2OSIG(siglist, td->td_retval[0]);
935 return (0);
936 }
937 #endif /* COMPAT_43 */
938
939 #if defined(COMPAT_43) || defined(COMPAT_SUNOS)
940 /*
941 * Generalized interface signal handler, 4.3-compatible.
942 */
943 #ifndef _SYS_SYSPROTO_H_
944 struct osigvec_args {
945 int signum;
946 struct sigvec *nsv;
947 struct sigvec *osv;
948 };
949 #endif
950 /*
951 * MPSAFE
952 */
953 /* ARGSUSED */
954 int
955 osigvec(td, uap)
956 struct thread *td;
957 register struct osigvec_args *uap;
958 {
959 struct sigvec vec;
960 struct sigaction nsa, osa;
961 register struct sigaction *nsap, *osap;
962 int error;
963
964 if (uap->signum <= 0 || uap->signum >= ONSIG)
965 return (EINVAL);
966 nsap = (uap->nsv != NULL) ? &nsa : NULL;
967 osap = (uap->osv != NULL) ? &osa : NULL;
968 if (nsap) {
969 error = copyin(uap->nsv, &vec, sizeof(vec));
970 if (error)
971 return (error);
972 nsap->sa_handler = vec.sv_handler;
973 OSIG2SIG(vec.sv_mask, nsap->sa_mask);
974 nsap->sa_flags = vec.sv_flags;
975 nsap->sa_flags ^= SA_RESTART; /* opposite of SV_INTERRUPT */
976 #ifdef COMPAT_SUNOS
977 nsap->sa_flags |= SA_USERTRAMP;
978 #endif
979 }
980 error = kern_sigaction(td, uap->signum, nsap, osap, KSA_OSIGSET);
981 if (osap && !error) {
982 vec.sv_handler = osap->sa_handler;
983 SIG2OSIG(osap->sa_mask, vec.sv_mask);
984 vec.sv_flags = osap->sa_flags;
985 vec.sv_flags &= ~SA_NOCLDWAIT;
986 vec.sv_flags ^= SA_RESTART;
987 #ifdef COMPAT_SUNOS
988 vec.sv_flags &= ~SA_NOCLDSTOP;
989 #endif
990 error = copyout(&vec, uap->osv, sizeof(vec));
991 }
992 return (error);
993 }
994
995 #ifndef _SYS_SYSPROTO_H_
996 struct osigblock_args {
997 int mask;
998 };
999 #endif
1000 /*
1001 * MPSAFE
1002 */
1003 int
1004 osigblock(td, uap)
1005 register struct thread *td;
1006 struct osigblock_args *uap;
1007 {
1008 struct proc *p = td->td_proc;
1009 sigset_t set;
1010
1011 OSIG2SIG(uap->mask, set);
1012 SIG_CANTMASK(set);
1013 PROC_LOCK(p);
1014 SIG2OSIG(td->td_sigmask, td->td_retval[0]);
1015 SIGSETOR(td->td_sigmask, set);
1016 PROC_UNLOCK(p);
1017 return (0);
1018 }
1019
1020 #ifndef _SYS_SYSPROTO_H_
1021 struct osigsetmask_args {
1022 int mask;
1023 };
1024 #endif
1025 /*
1026 * MPSAFE
1027 */
1028 int
1029 osigsetmask(td, uap)
1030 struct thread *td;
1031 struct osigsetmask_args *uap;
1032 {
1033 struct proc *p = td->td_proc;
1034 sigset_t set;
1035
1036 OSIG2SIG(uap->mask, set);
1037 SIG_CANTMASK(set);
1038 PROC_LOCK(p);
1039 SIG2OSIG(td->td_sigmask, td->td_retval[0]);
1040 SIGSETLO(td->td_sigmask, set);
1041 signotify(td);
1042 PROC_UNLOCK(p);
1043 return (0);
1044 }
1045 #endif /* COMPAT_43 || COMPAT_SUNOS */
1046
1047 /*
1048 * Suspend process until signal, providing mask to be set
1049 * in the meantime. Note nonstandard calling convention:
1050 * libc stub passes mask, not pointer, to save a copyin.
1051 ***** XXXKSE this doesn't make sense under KSE.
1052 ***** Do we suspend the thread or all threads in the process?
1053 ***** How do we suspend threads running NOW on another processor?
1054 */
1055 #ifndef _SYS_SYSPROTO_H_
1056 struct sigsuspend_args {
1057 const sigset_t *sigmask;
1058 };
1059 #endif
1060 /*
1061 * MPSAFE
1062 */
1063 /* ARGSUSED */
1064 int
1065 sigsuspend(td, uap)
1066 struct thread *td;
1067 struct sigsuspend_args *uap;
1068 {
1069 sigset_t mask;
1070 int error;
1071
1072 error = copyin(uap->sigmask, &mask, sizeof(mask));
1073 if (error)
1074 return (error);
1075 return (kern_sigsuspend(td, mask));
1076 }
1077
1078 int
1079 kern_sigsuspend(struct thread *td, sigset_t mask)
1080 {
1081 struct proc *p = td->td_proc;
1082
1083 /*
1084 * When returning from sigsuspend, we want
1085 * the old mask to be restored after the
1086 * signal handler has finished. Thus, we
1087 * save it here and mark the sigacts structure
1088 * to indicate this.
1089 */
1090 PROC_LOCK(p);
1091 td->td_oldsigmask = td->td_sigmask;
1092 mtx_lock_spin(&sched_lock);
1093 td->td_flags |= TDF_OLDMASK;
1094 mtx_unlock_spin(&sched_lock);
1095 SIG_CANTMASK(mask);
1096 td->td_sigmask = mask;
1097 signotify(td);
1098 while (msleep(p->p_sigacts, &p->p_mtx, PPAUSE|PCATCH, "pause", 0) == 0)
1099 /* void */;
1100 PROC_UNLOCK(p);
1101 /* always return EINTR rather than ERESTART... */
1102 return (EINTR);
1103 }
1104
1105 #ifdef COMPAT_43 /* XXX - COMPAT_FBSD3 */
1106 #ifndef _SYS_SYSPROTO_H_
1107 struct osigsuspend_args {
1108 osigset_t mask;
1109 };
1110 #endif
1111 /*
1112 * MPSAFE
1113 */
1114 /* ARGSUSED */
1115 int
1116 osigsuspend(td, uap)
1117 struct thread *td;
1118 struct osigsuspend_args *uap;
1119 {
1120 struct proc *p = td->td_proc;
1121 sigset_t mask;
1122
1123 PROC_LOCK(p);
1124 td->td_oldsigmask = td->td_sigmask;
1125 mtx_lock_spin(&sched_lock);
1126 td->td_flags |= TDF_OLDMASK;
1127 mtx_unlock_spin(&sched_lock);
1128 OSIG2SIG(uap->mask, mask);
1129 SIG_CANTMASK(mask);
1130 SIGSETLO(td->td_sigmask, mask);
1131 signotify(td);
1132 while (msleep(p->p_sigacts, &p->p_mtx, PPAUSE|PCATCH, "opause", 0) == 0)
1133 /* void */;
1134 PROC_UNLOCK(p);
1135 /* always return EINTR rather than ERESTART... */
1136 return (EINTR);
1137 }
1138 #endif /* COMPAT_43 */
1139
1140 #if defined(COMPAT_43) || defined(COMPAT_SUNOS)
1141 #ifndef _SYS_SYSPROTO_H_
1142 struct osigstack_args {
1143 struct sigstack *nss;
1144 struct sigstack *oss;
1145 };
1146 #endif
1147 /*
1148 * MPSAFE
1149 */
1150 /* ARGSUSED */
1151 int
1152 osigstack(td, uap)
1153 struct thread *td;
1154 register struct osigstack_args *uap;
1155 {
1156 struct proc *p = td->td_proc;
1157 struct sigstack nss, oss;
1158 int error = 0;
1159
1160 if (uap->nss != NULL) {
1161 error = copyin(uap->nss, &nss, sizeof(nss));
1162 if (error)
1163 return (error);
1164 }
1165 PROC_LOCK(p);
1166 oss.ss_sp = p->p_sigstk.ss_sp;
1167 oss.ss_onstack = sigonstack(cpu_getstack(td));
1168 if (uap->nss != NULL) {
1169 p->p_sigstk.ss_sp = nss.ss_sp;
1170 p->p_sigstk.ss_size = 0;
1171 p->p_sigstk.ss_flags |= nss.ss_onstack & SS_ONSTACK;
1172 p->p_flag |= P_ALTSTACK;
1173 }
1174 PROC_UNLOCK(p);
1175 if (uap->oss != NULL)
1176 error = copyout(&oss, uap->oss, sizeof(oss));
1177
1178 return (error);
1179 }
1180 #endif /* COMPAT_43 || COMPAT_SUNOS */
1181
1182 #ifndef _SYS_SYSPROTO_H_
1183 struct sigaltstack_args {
1184 stack_t *ss;
1185 stack_t *oss;
1186 };
1187 #endif
1188 /*
1189 * MPSAFE
1190 */
1191 /* ARGSUSED */
1192 int
1193 sigaltstack(td, uap)
1194 struct thread *td;
1195 register struct sigaltstack_args *uap;
1196 {
1197 stack_t ss, oss;
1198 int error;
1199
1200 if (uap->ss != NULL) {
1201 error = copyin(uap->ss, &ss, sizeof(ss));
1202 if (error)
1203 return (error);
1204 }
1205 error = kern_sigaltstack(td, (uap->ss != NULL) ? &ss : NULL,
1206 (uap->oss != NULL) ? &oss : NULL);
1207 if (error)
1208 return (error);
1209 if (uap->oss != NULL)
1210 error = copyout(&oss, uap->oss, sizeof(stack_t));
1211 return (error);
1212 }
1213
1214 int
1215 kern_sigaltstack(struct thread *td, stack_t *ss, stack_t *oss)
1216 {
1217 struct proc *p = td->td_proc;
1218 int oonstack;
1219
1220 PROC_LOCK(p);
1221 oonstack = sigonstack(cpu_getstack(td));
1222
1223 if (oss != NULL) {
1224 *oss = p->p_sigstk;
1225 oss->ss_flags = (p->p_flag & P_ALTSTACK)
1226 ? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE;
1227 }
1228
1229 if (ss != NULL) {
1230 if (oonstack) {
1231 PROC_UNLOCK(p);
1232 return (EPERM);
1233 }
1234 if ((ss->ss_flags & ~SS_DISABLE) != 0) {
1235 PROC_UNLOCK(p);
1236 return (EINVAL);
1237 }
1238 if (!(ss->ss_flags & SS_DISABLE)) {
1239 if (ss->ss_size < p->p_sysent->sv_minsigstksz) {
1240 PROC_UNLOCK(p);
1241 return (ENOMEM);
1242 }
1243 p->p_sigstk = *ss;
1244 p->p_flag |= P_ALTSTACK;
1245 } else {
1246 p->p_flag &= ~P_ALTSTACK;
1247 }
1248 }
1249 PROC_UNLOCK(p);
1250 return (0);
1251 }
1252
1253 /*
1254 * Common code for kill process group/broadcast kill.
1255 * cp is calling process.
1256 */
1257 static int
1258 killpg1(td, sig, pgid, all)
1259 register struct thread *td;
1260 int sig, pgid, all;
1261 {
1262 register struct proc *p;
1263 struct pgrp *pgrp;
1264 int nfound = 0;
1265
1266 if (all) {
1267 /*
1268 * broadcast
1269 */
1270 sx_slock(&allproc_lock);
1271 LIST_FOREACH(p, &allproc, p_list) {
1272 PROC_LOCK(p);
1273 if (p->p_pid <= 1 || p->p_flag & P_SYSTEM ||
1274 p == td->td_proc) {
1275 PROC_UNLOCK(p);
1276 continue;
1277 }
1278 if (p_cansignal(td, p, sig) == 0) {
1279 nfound++;
1280 if (sig)
1281 psignal(p, sig);
1282 }
1283 PROC_UNLOCK(p);
1284 }
1285 sx_sunlock(&allproc_lock);
1286 } else {
1287 sx_slock(&proctree_lock);
1288 if (pgid == 0) {
1289 /*
1290 * zero pgid means send to my process group.
1291 */
1292 pgrp = td->td_proc->p_pgrp;
1293 PGRP_LOCK(pgrp);
1294 } else {
1295 pgrp = pgfind(pgid);
1296 if (pgrp == NULL) {
1297 sx_sunlock(&proctree_lock);
1298 return (ESRCH);
1299 }
1300 }
1301 sx_sunlock(&proctree_lock);
1302 LIST_FOREACH(p, &pgrp->pg_members, p_pglist) {
1303 PROC_LOCK(p);
1304 if (p->p_pid <= 1 || p->p_flag & P_SYSTEM) {
1305 PROC_UNLOCK(p);
1306 continue;
1307 }
1308 if (p->p_state == PRS_ZOMBIE) {
1309 PROC_UNLOCK(p);
1310 continue;
1311 }
1312 if (p_cansignal(td, p, sig) == 0) {
1313 nfound++;
1314 if (sig)
1315 psignal(p, sig);
1316 }
1317 PROC_UNLOCK(p);
1318 }
1319 PGRP_UNLOCK(pgrp);
1320 }
1321 return (nfound ? 0 : ESRCH);
1322 }
1323
1324 #ifndef _SYS_SYSPROTO_H_
1325 struct kill_args {
1326 int pid;
1327 int signum;
1328 };
1329 #endif
1330 /*
1331 * MPSAFE
1332 */
1333 /* ARGSUSED */
1334 int
1335 kill(td, uap)
1336 register struct thread *td;
1337 register struct kill_args *uap;
1338 {
1339 register struct proc *p;
1340 int error;
1341
1342 if ((u_int)uap->signum > _SIG_MAXSIG)
1343 return (EINVAL);
1344
1345 if (uap->pid > 0) {
1346 /* kill single process */
1347 if ((p = pfind(uap->pid)) == NULL)
1348 return (ESRCH);
1349 error = p_cansignal(td, p, uap->signum);
1350 if (error == 0 && uap->signum)
1351 psignal(p, uap->signum);
1352 PROC_UNLOCK(p);
1353 return (error);
1354 }
1355 switch (uap->pid) {
1356 case -1: /* broadcast signal */
1357 return (killpg1(td, uap->signum, 0, 1));
1358 case 0: /* signal own process group */
1359 return (killpg1(td, uap->signum, 0, 0));
1360 default: /* negative explicit process group */
1361 return (killpg1(td, uap->signum, -uap->pid, 0));
1362 }
1363 /* NOTREACHED */
1364 }
1365
1366 #if defined(COMPAT_43) || defined(COMPAT_SUNOS)
1367 #ifndef _SYS_SYSPROTO_H_
1368 struct okillpg_args {
1369 int pgid;
1370 int signum;
1371 };
1372 #endif
1373 /*
1374 * MPSAFE
1375 */
1376 /* ARGSUSED */
1377 int
1378 okillpg(td, uap)
1379 struct thread *td;
1380 register struct okillpg_args *uap;
1381 {
1382
1383 if ((u_int)uap->signum > _SIG_MAXSIG)
1384 return (EINVAL);
1385 return (killpg1(td, uap->signum, uap->pgid, 0));
1386 }
1387 #endif /* COMPAT_43 || COMPAT_SUNOS */
1388
1389 /*
1390 * Send a signal to a process group.
1391 */
1392 void
1393 gsignal(pgid, sig)
1394 int pgid, sig;
1395 {
1396 struct pgrp *pgrp;
1397
1398 if (pgid != 0) {
1399 sx_slock(&proctree_lock);
1400 pgrp = pgfind(pgid);
1401 sx_sunlock(&proctree_lock);
1402 if (pgrp != NULL) {
1403 pgsignal(pgrp, sig, 0);
1404 PGRP_UNLOCK(pgrp);
1405 }
1406 }
1407 }
1408
1409 /*
1410 * Send a signal to a process group. If checktty is 1,
1411 * limit to members which have a controlling terminal.
1412 */
1413 void
1414 pgsignal(pgrp, sig, checkctty)
1415 struct pgrp *pgrp;
1416 int sig, checkctty;
1417 {
1418 register struct proc *p;
1419
1420 if (pgrp) {
1421 PGRP_LOCK_ASSERT(pgrp, MA_OWNED);
1422 LIST_FOREACH(p, &pgrp->pg_members, p_pglist) {
1423 PROC_LOCK(p);
1424 if (checkctty == 0 || p->p_flag & P_CONTROLT)
1425 psignal(p, sig);
1426 PROC_UNLOCK(p);
1427 }
1428 }
1429 }
1430
1431 /*
1432 * Send a signal caused by a trap to the current thread.
1433 * If it will be caught immediately, deliver it with correct code.
1434 * Otherwise, post it normally.
1435 *
1436 * MPSAFE
1437 */
1438 void
1439 trapsignal(struct thread *td, int sig, u_long code)
1440 {
1441 struct sigacts *ps;
1442 struct proc *p;
1443
1444 p = td->td_proc;
1445
1446 PROC_LOCK(p);
1447 ps = p->p_sigacts;
1448 mtx_lock(&ps->ps_mtx);
1449 if ((p->p_flag & P_TRACED) == 0 && SIGISMEMBER(ps->ps_sigcatch, sig) &&
1450 !SIGISMEMBER(td->td_sigmask, sig)) {
1451 p->p_stats->p_ru.ru_nsignals++;
1452 #ifdef KTRACE
1453 if (KTRPOINT(curthread, KTR_PSIG))
1454 ktrpsig(sig, ps->ps_sigact[_SIG_IDX(sig)],
1455 &td->td_sigmask, code);
1456 #endif
1457 (*p->p_sysent->sv_sendsig)(ps->ps_sigact[_SIG_IDX(sig)], sig,
1458 &td->td_sigmask, code);
1459 SIGSETOR(td->td_sigmask, ps->ps_catchmask[_SIG_IDX(sig)]);
1460 if (!SIGISMEMBER(ps->ps_signodefer, sig))
1461 SIGADDSET(td->td_sigmask, sig);
1462 if (SIGISMEMBER(ps->ps_sigreset, sig)) {
1463 /*
1464 * See kern_sigaction() for origin of this code.
1465 */
1466 SIGDELSET(ps->ps_sigcatch, sig);
1467 if (sig != SIGCONT &&
1468 sigprop(sig) & SA_IGNORE)
1469 SIGADDSET(ps->ps_sigignore, sig);
1470 ps->ps_sigact[_SIG_IDX(sig)] = SIG_DFL;
1471 }
1472 mtx_unlock(&ps->ps_mtx);
1473 } else {
1474 mtx_unlock(&ps->ps_mtx);
1475 p->p_code = code; /* XXX for core dump/debugger */
1476 p->p_sig = sig; /* XXX to verify code */
1477 tdsignal(td, sig);
1478 }
1479 PROC_UNLOCK(p);
1480 }
1481
1482 static struct thread *
1483 sigtd(struct proc *p, int sig, int prop)
1484 {
1485 struct thread *td;
1486
1487 PROC_LOCK_ASSERT(p, MA_OWNED);
1488
1489 /*
1490 * If we know the signal is bound for a specific thread then we
1491 * assume that we are in that threads context. This is the case
1492 * for SIGXCPU, SIGILL, etc. Otherwise someone did a kill() from
1493 * userland and the real thread doesn't actually matter.
1494 */
1495 if ((prop & SA_PROC) != 0 && curthread->td_proc == p)
1496 return (curthread);
1497
1498 /*
1499 * We should search for the first thread that is blocked in
1500 * sigsuspend with this signal unmasked.
1501 */
1502
1503 /* XXX */
1504
1505 /*
1506 * Find the first thread in the proc that doesn't have this signal
1507 * masked.
1508 */
1509 FOREACH_THREAD_IN_PROC(p, td)
1510 if (!SIGISMEMBER(td->td_sigmask, sig))
1511 return (td);
1512
1513 return (FIRST_THREAD_IN_PROC(p));
1514 }
1515
1516 /*
1517 * Send the signal to the process. If the signal has an action, the action
1518 * is usually performed by the target process rather than the caller; we add
1519 * the signal to the set of pending signals for the process.
1520 *
1521 * Exceptions:
1522 * o When a stop signal is sent to a sleeping process that takes the
1523 * default action, the process is stopped without awakening it.
1524 * o SIGCONT restarts stopped processes (or puts them back to sleep)
1525 * regardless of the signal action (eg, blocked or ignored).
1526 *
1527 * Other ignored signals are discarded immediately.
1528 *
1529 * MPSAFE
1530 */
1531 void
1532 psignal(struct proc *p, int sig)
1533 {
1534 struct thread *td;
1535 int prop;
1536
1537 if (!_SIG_VALID(sig))
1538 panic("psignal(): invalid signal");
1539
1540 PROC_LOCK_ASSERT(p, MA_OWNED);
1541 prop = sigprop(sig);
1542
1543 /*
1544 * Find a thread to deliver the signal to.
1545 */
1546 td = sigtd(p, sig, prop);
1547
1548 tdsignal(td, sig);
1549 }
1550
1551 /*
1552 * MPSAFE
1553 */
1554 void
1555 tdsignal(struct thread *td, int sig)
1556 {
1557 struct proc *p;
1558 register sig_t action;
1559 sigset_t *siglist;
1560 struct thread *td0;
1561 register int prop;
1562 struct sigacts *ps;
1563
1564 if (!_SIG_VALID(sig))
1565 panic("do_tdsignal(): invalid signal");
1566
1567 p = td->td_proc;
1568 ps = p->p_sigacts;
1569
1570 PROC_LOCK_ASSERT(p, MA_OWNED);
1571 KNOTE(&p->p_klist, NOTE_SIGNAL | sig);
1572
1573 prop = sigprop(sig);
1574
1575 /*
1576 * If this thread is blocking this signal then we'll leave it in the
1577 * proc so that we can find it in the first thread that unblocks it.
1578 */
1579 if (SIGISMEMBER(td->td_sigmask, sig))
1580 siglist = &p->p_siglist;
1581 else
1582 siglist = &td->td_siglist;
1583
1584 /*
1585 * If proc is traced, always give parent a chance;
1586 * if signal event is tracked by procfs, give *that*
1587 * a chance, as well.
1588 */
1589 if ((p->p_flag & P_TRACED) || (p->p_stops & S_SIG)) {
1590 action = SIG_DFL;
1591 } else {
1592 /*
1593 * If the signal is being ignored,
1594 * then we forget about it immediately.
1595 * (Note: we don't set SIGCONT in ps_sigignore,
1596 * and if it is set to SIG_IGN,
1597 * action will be SIG_DFL here.)
1598 */
1599 mtx_lock(&ps->ps_mtx);
1600 if (SIGISMEMBER(ps->ps_sigignore, sig) ||
1601 (p->p_flag & P_WEXIT)) {
1602 mtx_unlock(&ps->ps_mtx);
1603 return;
1604 }
1605 if (SIGISMEMBER(td->td_sigmask, sig))
1606 action = SIG_HOLD;
1607 else if (SIGISMEMBER(ps->ps_sigcatch, sig))
1608 action = SIG_CATCH;
1609 else
1610 action = SIG_DFL;
1611 mtx_unlock(&ps->ps_mtx);
1612 }
1613
1614 if (prop & SA_CONT) {
1615 SIG_STOPSIGMASK(p->p_siglist);
1616 /*
1617 * XXX Should investigate leaving STOP and CONT sigs only in
1618 * the proc's siglist.
1619 */
1620 FOREACH_THREAD_IN_PROC(p, td0)
1621 SIG_STOPSIGMASK(td0->td_siglist);
1622 }
1623
1624 if (prop & SA_STOP) {
1625 /*
1626 * If sending a tty stop signal to a member of an orphaned
1627 * process group, discard the signal here if the action
1628 * is default; don't stop the process below if sleeping,
1629 * and don't clear any pending SIGCONT.
1630 */
1631 if ((prop & SA_TTYSTOP) &&
1632 (p->p_pgrp->pg_jobc == 0) &&
1633 (action == SIG_DFL))
1634 return;
1635 SIG_CONTSIGMASK(p->p_siglist);
1636 FOREACH_THREAD_IN_PROC(p, td0)
1637 SIG_CONTSIGMASK(td0->td_siglist);
1638 p->p_flag &= ~P_CONTINUED;
1639 }
1640 SIGADDSET(*siglist, sig);
1641 signotify(td); /* uses schedlock */
1642 /*
1643 * Defer further processing for signals which are held,
1644 * except that stopped processes must be continued by SIGCONT.
1645 */
1646 if (action == SIG_HOLD &&
1647 !((prop & SA_CONT) && (p->p_flag & P_STOPPED_SIG)))
1648 return;
1649 /*
1650 * Some signals have a process-wide effect and a per-thread
1651 * component. Most processing occurs when the process next
1652 * tries to cross the user boundary, however there are some
1653 * times when processing needs to be done immediatly, such as
1654 * waking up threads so that they can cross the user boundary.
1655 * We try do the per-process part here.
1656 */
1657 if (P_SHOULDSTOP(p)) {
1658 /*
1659 * The process is in stopped mode. All the threads should be
1660 * either winding down or already on the suspended queue.
1661 */
1662 if (p->p_flag & P_TRACED) {
1663 /*
1664 * The traced process is already stopped,
1665 * so no further action is necessary.
1666 * No signal can restart us.
1667 */
1668 goto out;
1669 }
1670
1671 if (sig == SIGKILL) {
1672 /*
1673 * SIGKILL sets process running.
1674 * It will die elsewhere.
1675 * All threads must be restarted.
1676 */
1677 p->p_flag &= ~P_STOPPED;
1678 goto runfast;
1679 }
1680
1681 if (prop & SA_CONT) {
1682 /*
1683 * If SIGCONT is default (or ignored), we continue the
1684 * process but don't leave the signal in siglist as
1685 * it has no further action. If SIGCONT is held, we
1686 * continue the process and leave the signal in
1687 * siglist. If the process catches SIGCONT, let it
1688 * handle the signal itself. If it isn't waiting on
1689 * an event, it goes back to run state.
1690 * Otherwise, process goes back to sleep state.
1691 */
1692 p->p_flag &= ~P_STOPPED_SIG;
1693 p->p_flag |= P_CONTINUED;
1694 if (action == SIG_DFL) {
1695 SIGDELSET(*siglist, sig);
1696 } else if (action == SIG_CATCH) {
1697 /*
1698 * The process wants to catch it so it needs
1699 * to run at least one thread, but which one?
1700 * It would seem that the answer would be to
1701 * run an upcall in the next KSE to run, and
1702 * deliver the signal that way. In a NON KSE
1703 * process, we need to make sure that the
1704 * single thread is runnable asap.
1705 * XXXKSE for now however, make them all run.
1706 */
1707 goto runfast;
1708 }
1709 /*
1710 * The signal is not ignored or caught.
1711 */
1712 mtx_lock_spin(&sched_lock);
1713 thread_unsuspend(p);
1714 mtx_unlock_spin(&sched_lock);
1715 goto out;
1716 }
1717
1718 if (prop & SA_STOP) {
1719 /*
1720 * Already stopped, don't need to stop again
1721 * (If we did the shell could get confused).
1722 * Just make sure the signal STOP bit set.
1723 */
1724 p->p_flag |= P_STOPPED_SIG;
1725 SIGDELSET(*siglist, sig);
1726 goto out;
1727 }
1728
1729 /*
1730 * All other kinds of signals:
1731 * If a thread is sleeping interruptibly, simulate a
1732 * wakeup so that when it is continued it will be made
1733 * runnable and can look at the signal. However, don't make
1734 * the PROCESS runnable, leave it stopped.
1735 * It may run a bit until it hits a thread_suspend_check().
1736 */
1737 mtx_lock_spin(&sched_lock);
1738 if (TD_ON_SLEEPQ(td) && (td->td_flags & TDF_SINTR)) {
1739 if (td->td_flags & TDF_CVWAITQ)
1740 cv_abort(td);
1741 else
1742 abortsleep(td);
1743 }
1744 mtx_unlock_spin(&sched_lock);
1745 goto out;
1746 /*
1747 * XXXKSE What about threads that are waiting on mutexes?
1748 * Shouldn't they abort too?
1749 * No, hopefully mutexes are short lived.. They'll
1750 * eventually hit thread_suspend_check().
1751 */
1752 } else if (p->p_state == PRS_NORMAL) {
1753 if ((p->p_flag & P_TRACED) || (action != SIG_DFL) ||
1754 !(prop & SA_STOP)) {
1755 mtx_lock_spin(&sched_lock);
1756 tdsigwakeup(td, sig, action);
1757 mtx_unlock_spin(&sched_lock);
1758 goto out;
1759 }
1760 if (prop & SA_STOP) {
1761 if (p->p_flag & P_PPWAIT)
1762 goto out;
1763 p->p_flag |= P_STOPPED_SIG;
1764 p->p_xstat = sig;
1765 mtx_lock_spin(&sched_lock);
1766 FOREACH_THREAD_IN_PROC(p, td0) {
1767 if (TD_IS_SLEEPING(td0) &&
1768 (td->td_flags & TDF_SINTR))
1769 thread_suspend_one(td0);
1770 }
1771 thread_stopped(p);
1772 if (p->p_numthreads == p->p_suspcount) {
1773 SIGDELSET(p->p_siglist, p->p_xstat);
1774 FOREACH_THREAD_IN_PROC(p, td0)
1775 SIGDELSET(td0->td_siglist, p->p_xstat);
1776 }
1777 mtx_unlock_spin(&sched_lock);
1778 goto out;
1779 }
1780 else
1781 goto runfast;
1782 /* NOTREACHED */
1783 } else {
1784 /* Not in "NORMAL" state. discard the signal. */
1785 SIGDELSET(*siglist, sig);
1786 goto out;
1787 }
1788
1789 /*
1790 * The process is not stopped so we need to apply the signal to all the
1791 * running threads.
1792 */
1793
1794 runfast:
1795 mtx_lock_spin(&sched_lock);
1796 tdsigwakeup(td, sig, action);
1797 thread_unsuspend(p);
1798 mtx_unlock_spin(&sched_lock);
1799 out:
1800 /* If we jump here, sched_lock should not be owned. */
1801 mtx_assert(&sched_lock, MA_NOTOWNED);
1802 }
1803
1804 /*
1805 * The force of a signal has been directed against a single
1806 * thread. We need to see what we can do about knocking it
1807 * out of any sleep it may be in etc.
1808 */
1809 static void
1810 tdsigwakeup(struct thread *td, int sig, sig_t action)
1811 {
1812 struct proc *p = td->td_proc;
1813 register int prop;
1814
1815 PROC_LOCK_ASSERT(p, MA_OWNED);
1816 mtx_assert(&sched_lock, MA_OWNED);
1817 prop = sigprop(sig);
1818 /*
1819 * Bring the priority of a thread up if we want it to get
1820 * killed in this lifetime.
1821 */
1822 if ((action == SIG_DFL) && (prop & SA_KILL)) {
1823 if (td->td_priority > PUSER) {
1824 td->td_priority = PUSER;
1825 }
1826 }
1827 if (TD_IS_SLEEPING(td)) {
1828 /*
1829 * If thread is sleeping uninterruptibly
1830 * we can't interrupt the sleep... the signal will
1831 * be noticed when the process returns through
1832 * trap() or syscall().
1833 */
1834 if ((td->td_flags & TDF_SINTR) == 0) {
1835 return;
1836 }
1837 /*
1838 * Process is sleeping and traced. Make it runnable
1839 * so it can discover the signal in issignal() and stop
1840 * for its parent.
1841 */
1842 if (p->p_flag & P_TRACED) {
1843 p->p_flag &= ~P_STOPPED_TRACE;
1844 } else {
1845
1846 /*
1847 * If SIGCONT is default (or ignored) and process is
1848 * asleep, we are finished; the process should not
1849 * be awakened.
1850 */
1851 if ((prop & SA_CONT) && action == SIG_DFL) {
1852 SIGDELSET(p->p_siglist, sig);
1853 /*
1854 * It may be on either list in this state.
1855 * Remove from both for now.
1856 */
1857 SIGDELSET(td->td_siglist, sig);
1858 return;
1859 }
1860
1861 /*
1862 * Raise priority to at least PUSER.
1863 */
1864 if (td->td_priority > PUSER) {
1865 td->td_priority = PUSER;
1866 }
1867 }
1868 if (td->td_flags & TDF_CVWAITQ)
1869 cv_abort(td);
1870 else
1871 abortsleep(td);
1872 }
1873 #ifdef SMP
1874 else {
1875 /*
1876 * Other states do nothing with the signal immediatly,
1877 * other than kicking ourselves if we are running.
1878 * It will either never be noticed, or noticed very soon.
1879 */
1880 if (TD_IS_RUNNING(td) && td != curthread) {
1881 forward_signal(td);
1882 }
1883 }
1884 #endif
1885 }
1886
1887 /*
1888 * If the current process has received a signal (should be caught or cause
1889 * termination, should interrupt current syscall), return the signal number.
1890 * Stop signals with default action are processed immediately, then cleared;
1891 * they aren't returned. This is checked after each entry to the system for
1892 * a syscall or trap (though this can usually be done without calling issignal
1893 * by checking the pending signal masks in cursig.) The normal call
1894 * sequence is
1895 *
1896 * while (sig = cursig(curthread))
1897 * postsig(sig);
1898 */
1899 static int
1900 issignal(td)
1901 struct thread *td;
1902 {
1903 struct proc *p;
1904 struct sigacts *ps;
1905 sigset_t sigpending;
1906 register int sig, prop;
1907
1908 p = td->td_proc;
1909 ps = p->p_sigacts;
1910 mtx_assert(&ps->ps_mtx, MA_OWNED);
1911 PROC_LOCK_ASSERT(p, MA_OWNED);
1912 for (;;) {
1913 int traced = (p->p_flag & P_TRACED) || (p->p_stops & S_SIG);
1914
1915 sigpending = td->td_siglist;
1916 SIGSETNAND(sigpending, td->td_sigmask);
1917
1918 if (p->p_flag & P_PPWAIT)
1919 SIG_STOPSIGMASK(sigpending);
1920 if (SIGISEMPTY(sigpending)) /* no signal to send */
1921 return (0);
1922 sig = sig_ffs(&sigpending);
1923
1924 _STOPEVENT(p, S_SIG, sig);
1925
1926 /*
1927 * We should see pending but ignored signals
1928 * only if P_TRACED was on when they were posted.
1929 */
1930 if (SIGISMEMBER(ps->ps_sigignore, sig) && (traced == 0)) {
1931 SIGDELSET(td->td_siglist, sig);
1932 continue;
1933 }
1934 if (p->p_flag & P_TRACED && (p->p_flag & P_PPWAIT) == 0) {
1935 /*
1936 * If traced, always stop.
1937 */
1938 mtx_unlock(&ps->ps_mtx);
1939 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK,
1940 &p->p_mtx.mtx_object, "Stopping for traced signal");
1941 p->p_xstat = sig;
1942 PROC_LOCK(p->p_pptr);
1943 psignal(p->p_pptr, SIGCHLD);
1944 PROC_UNLOCK(p->p_pptr);
1945 mtx_lock_spin(&sched_lock);
1946 stop(p); /* uses schedlock too eventually */
1947 thread_suspend_one(td);
1948 PROC_UNLOCK(p);
1949 DROP_GIANT();
1950 p->p_stats->p_ru.ru_nivcsw++;
1951 mi_switch();
1952 mtx_unlock_spin(&sched_lock);
1953 PICKUP_GIANT();
1954 PROC_LOCK(p);
1955 mtx_lock(&ps->ps_mtx);
1956
1957 /*
1958 * If parent wants us to take the signal,
1959 * then it will leave it in p->p_xstat;
1960 * otherwise we just look for signals again.
1961 */
1962 SIGDELSET(td->td_siglist, sig); /* clear old signal */
1963 sig = p->p_xstat;
1964 if (sig == 0)
1965 continue;
1966
1967 /*
1968 * If the traced bit got turned off, go back up
1969 * to the top to rescan signals. This ensures
1970 * that p_sig* and p_sigact are consistent.
1971 */
1972 if ((p->p_flag & P_TRACED) == 0)
1973 continue;
1974
1975 /*
1976 * Put the new signal into td_siglist. If the
1977 * signal is being masked, look for other signals.
1978 */
1979 SIGADDSET(td->td_siglist, sig);
1980 if (SIGISMEMBER(td->td_sigmask, sig))
1981 continue;
1982 signotify(td);
1983 }
1984
1985 prop = sigprop(sig);
1986
1987 /*
1988 * Decide whether the signal should be returned.
1989 * Return the signal's number, or fall through
1990 * to clear it from the pending mask.
1991 */
1992 switch ((intptr_t)p->p_sigacts->ps_sigact[_SIG_IDX(sig)]) {
1993
1994 case (intptr_t)SIG_DFL:
1995 /*
1996 * Don't take default actions on system processes.
1997 */
1998 if (p->p_pid <= 1) {
1999 #ifdef DIAGNOSTIC
2000 /*
2001 * Are you sure you want to ignore SIGSEGV
2002 * in init? XXX
2003 */
2004 printf("Process (pid %lu) got signal %d\n",
2005 (u_long)p->p_pid, sig);
2006 #endif
2007 break; /* == ignore */
2008 }
2009 /*
2010 * If there is a pending stop signal to process
2011 * with default action, stop here,
2012 * then clear the signal. However,
2013 * if process is member of an orphaned
2014 * process group, ignore tty stop signals.
2015 */
2016 if (prop & SA_STOP) {
2017 if (p->p_flag & P_TRACED ||
2018 (p->p_pgrp->pg_jobc == 0 &&
2019 prop & SA_TTYSTOP))
2020 break; /* == ignore */
2021 mtx_unlock(&ps->ps_mtx);
2022 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK,
2023 &p->p_mtx.mtx_object, "Catching SIGSTOP");
2024 p->p_flag |= P_STOPPED_SIG;
2025 p->p_xstat = sig;
2026 mtx_lock_spin(&sched_lock);
2027 thread_stopped(p);
2028 thread_suspend_one(td);
2029 PROC_UNLOCK(p);
2030 DROP_GIANT();
2031 p->p_stats->p_ru.ru_nivcsw++;
2032 mi_switch();
2033 mtx_unlock_spin(&sched_lock);
2034 PICKUP_GIANT();
2035 PROC_LOCK(p);
2036 mtx_lock(&ps->ps_mtx);
2037 break;
2038 } else if (prop & SA_IGNORE) {
2039 /*
2040 * Except for SIGCONT, shouldn't get here.
2041 * Default action is to ignore; drop it.
2042 */
2043 break; /* == ignore */
2044 } else
2045 return (sig);
2046 /*NOTREACHED*/
2047
2048 case (intptr_t)SIG_IGN:
2049 /*
2050 * Masking above should prevent us ever trying
2051 * to take action on an ignored signal other
2052 * than SIGCONT, unless process is traced.
2053 */
2054 if ((prop & SA_CONT) == 0 &&
2055 (p->p_flag & P_TRACED) == 0)
2056 printf("issignal\n");
2057 break; /* == ignore */
2058
2059 default:
2060 /*
2061 * This signal has an action, let
2062 * postsig() process it.
2063 */
2064 return (sig);
2065 }
2066 SIGDELSET(td->td_siglist, sig); /* take the signal! */
2067 }
2068 /* NOTREACHED */
2069 }
2070
2071 /*
2072 * Put the argument process into the stopped state and notify the parent
2073 * via wakeup. Signals are handled elsewhere. The process must not be
2074 * on the run queue. Must be called with the proc p locked and the scheduler
2075 * lock held.
2076 */
2077 static void
2078 stop(struct proc *p)
2079 {
2080
2081 PROC_LOCK_ASSERT(p, MA_OWNED);
2082 p->p_flag |= P_STOPPED_SIG;
2083 p->p_flag &= ~P_WAITED;
2084 wakeup(p->p_pptr);
2085 }
2086
2087 /*
2088 * MPSAFE
2089 */
2090 void
2091 thread_stopped(struct proc *p)
2092 {
2093 struct proc *p1 = curthread->td_proc;
2094 struct sigacts *ps;
2095 int n;
2096
2097 PROC_LOCK_ASSERT(p, MA_OWNED);
2098 mtx_assert(&sched_lock, MA_OWNED);
2099 n = p->p_suspcount;
2100 if (p == p1)
2101 n++;
2102 if ((p->p_flag & P_STOPPED_SIG) && (n == p->p_numthreads)) {
2103 mtx_unlock_spin(&sched_lock);
2104 stop(p);
2105 PROC_LOCK(p->p_pptr);
2106 ps = p->p_pptr->p_sigacts;
2107 mtx_lock(&ps->ps_mtx);
2108 if ((ps->ps_flag & PS_NOCLDSTOP) == 0) {
2109 mtx_unlock(&ps->ps_mtx);
2110 psignal(p->p_pptr, SIGCHLD);
2111 } else
2112 mtx_unlock(&ps->ps_mtx);
2113 PROC_UNLOCK(p->p_pptr);
2114 mtx_lock_spin(&sched_lock);
2115 }
2116 }
2117
2118 /*
2119 * Take the action for the specified signal
2120 * from the current set of pending signals.
2121 */
2122 void
2123 postsig(sig)
2124 register int sig;
2125 {
2126 struct thread *td = curthread;
2127 register struct proc *p = td->td_proc;
2128 struct sigacts *ps;
2129 sig_t action;
2130 sigset_t returnmask;
2131 int code;
2132
2133 KASSERT(sig != 0, ("postsig"));
2134
2135 PROC_LOCK_ASSERT(p, MA_OWNED);
2136 ps = p->p_sigacts;
2137 mtx_assert(&ps->ps_mtx, MA_OWNED);
2138 SIGDELSET(td->td_siglist, sig);
2139 action = ps->ps_sigact[_SIG_IDX(sig)];
2140 #ifdef KTRACE
2141 if (KTRPOINT(td, KTR_PSIG))
2142 ktrpsig(sig, action, td->td_flags & TDF_OLDMASK ?
2143 &td->td_oldsigmask : &td->td_sigmask, 0);
2144 #endif
2145 _STOPEVENT(p, S_SIG, sig);
2146
2147 if (action == SIG_DFL) {
2148 /*
2149 * Default action, where the default is to kill
2150 * the process. (Other cases were ignored above.)
2151 */
2152 mtx_unlock(&ps->ps_mtx);
2153 sigexit(td, sig);
2154 /* NOTREACHED */
2155 } else {
2156 /*
2157 * If we get here, the signal must be caught.
2158 */
2159 KASSERT(action != SIG_IGN && !SIGISMEMBER(td->td_sigmask, sig),
2160 ("postsig action"));
2161 /*
2162 * Set the new mask value and also defer further
2163 * occurrences of this signal.
2164 *
2165 * Special case: user has done a sigsuspend. Here the
2166 * current mask is not of interest, but rather the
2167 * mask from before the sigsuspend is what we want
2168 * restored after the signal processing is completed.
2169 */
2170 if (td->td_flags & TDF_OLDMASK) {
2171 returnmask = td->td_oldsigmask;
2172 mtx_lock_spin(&sched_lock);
2173 td->td_flags &= ~TDF_OLDMASK;
2174 mtx_unlock_spin(&sched_lock);
2175 } else
2176 returnmask = td->td_sigmask;
2177
2178 SIGSETOR(td->td_sigmask, ps->ps_catchmask[_SIG_IDX(sig)]);
2179 if (!SIGISMEMBER(ps->ps_signodefer, sig))
2180 SIGADDSET(td->td_sigmask, sig);
2181
2182 if (SIGISMEMBER(ps->ps_sigreset, sig)) {
2183 /*
2184 * See kern_sigaction() for origin of this code.
2185 */
2186 SIGDELSET(ps->ps_sigcatch, sig);
2187 if (sig != SIGCONT &&
2188 sigprop(sig) & SA_IGNORE)
2189 SIGADDSET(ps->ps_sigignore, sig);
2190 ps->ps_sigact[_SIG_IDX(sig)] = SIG_DFL;
2191 }
2192 p->p_stats->p_ru.ru_nsignals++;
2193 if (p->p_sig != sig) {
2194 code = 0;
2195 } else {
2196 code = p->p_code;
2197 p->p_code = 0;
2198 p->p_sig = 0;
2199 }
2200 if (p->p_flag & P_THREADED)
2201 thread_signal_add(curthread, sig);
2202 else
2203 (*p->p_sysent->sv_sendsig)(action, sig,
2204 &returnmask, code);
2205 }
2206 }
2207
2208 /*
2209 * Kill the current process for stated reason.
2210 */
2211 void
2212 killproc(p, why)
2213 struct proc *p;
2214 char *why;
2215 {
2216
2217 PROC_LOCK_ASSERT(p, MA_OWNED);
2218 CTR3(KTR_PROC, "killproc: proc %p (pid %d, %s)",
2219 p, p->p_pid, p->p_comm);
2220 log(LOG_ERR, "pid %d (%s), uid %d, was killed: %s\n", p->p_pid, p->p_comm,
2221 p->p_ucred ? p->p_ucred->cr_uid : -1, why);
2222 psignal(p, SIGKILL);
2223 }
2224
2225 /*
2226 * Force the current process to exit with the specified signal, dumping core
2227 * if appropriate. We bypass the normal tests for masked and caught signals,
2228 * allowing unrecoverable failures to terminate the process without changing
2229 * signal state. Mark the accounting record with the signal termination.
2230 * If dumping core, save the signal number for the debugger. Calls exit and
2231 * does not return.
2232 *
2233 * MPSAFE
2234 */
2235 void
2236 sigexit(td, sig)
2237 struct thread *td;
2238 int sig;
2239 {
2240 struct proc *p = td->td_proc;
2241
2242 PROC_LOCK_ASSERT(p, MA_OWNED);
2243 p->p_acflag |= AXSIG;
2244 if (sigprop(sig) & SA_CORE) {
2245 p->p_sig = sig;
2246 /*
2247 * Log signals which would cause core dumps
2248 * (Log as LOG_INFO to appease those who don't want
2249 * these messages.)
2250 * XXX : Todo, as well as euid, write out ruid too
2251 */
2252 PROC_UNLOCK(p);
2253 if (!mtx_owned(&Giant))
2254 mtx_lock(&Giant);
2255 if (coredump(td) == 0)
2256 sig |= WCOREFLAG;
2257 if (kern_logsigexit)
2258 log(LOG_INFO,
2259 "pid %d (%s), uid %d: exited on signal %d%s\n",
2260 p->p_pid, p->p_comm,
2261 td->td_ucred ? td->td_ucred->cr_uid : -1,
2262 sig &~ WCOREFLAG,
2263 sig & WCOREFLAG ? " (core dumped)" : "");
2264 } else {
2265 PROC_UNLOCK(p);
2266 if (!mtx_owned(&Giant))
2267 mtx_lock(&Giant);
2268 }
2269 exit1(td, W_EXITCODE(0, sig));
2270 /* NOTREACHED */
2271 }
2272
2273 static char corefilename[MAXPATHLEN+1] = {"%N.core"};
2274 SYSCTL_STRING(_kern, OID_AUTO, corefile, CTLFLAG_RW, corefilename,
2275 sizeof(corefilename), "process corefile name format string");
2276
2277 /*
2278 * expand_name(name, uid, pid)
2279 * Expand the name described in corefilename, using name, uid, and pid.
2280 * corefilename is a printf-like string, with three format specifiers:
2281 * %N name of process ("name")
2282 * %P process id (pid)
2283 * %U user id (uid)
2284 * For example, "%N.core" is the default; they can be disabled completely
2285 * by using "/dev/null", or all core files can be stored in "/cores/%U/%N-%P".
2286 * This is controlled by the sysctl variable kern.corefile (see above).
2287 */
2288
2289 static char *
2290 expand_name(name, uid, pid)
2291 const char *name;
2292 uid_t uid;
2293 pid_t pid;
2294 {
2295 const char *format, *appendstr;
2296 char *temp;
2297 char buf[11]; /* Buffer for pid/uid -- max 4B */
2298 size_t i, l, n;
2299
2300 format = corefilename;
2301 temp = malloc(MAXPATHLEN, M_TEMP, M_NOWAIT | M_ZERO);
2302 if (temp == NULL)
2303 return (NULL);
2304 for (i = 0, n = 0; n < MAXPATHLEN && format[i]; i++) {
2305 switch (format[i]) {
2306 case '%': /* Format character */
2307 i++;
2308 switch (format[i]) {
2309 case '%':
2310 appendstr = "%";
2311 break;
2312 case 'N': /* process name */
2313 appendstr = name;
2314 break;
2315 case 'P': /* process id */
2316 sprintf(buf, "%u", pid);
2317 appendstr = buf;
2318 break;
2319 case 'U': /* user id */
2320 sprintf(buf, "%u", uid);
2321 appendstr = buf;
2322 break;
2323 default:
2324 appendstr = "";
2325 log(LOG_ERR,
2326 "Unknown format character %c in `%s'\n",
2327 format[i], format);
2328 }
2329 l = strlen(appendstr);
2330 if ((n + l) >= MAXPATHLEN)
2331 goto toolong;
2332 memcpy(temp + n, appendstr, l);
2333 n += l;
2334 break;
2335 default:
2336 temp[n++] = format[i];
2337 }
2338 }
2339 if (format[i] != '\0')
2340 goto toolong;
2341 return (temp);
2342 toolong:
2343 log(LOG_ERR, "pid %ld (%s), uid (%lu): corename is too long\n",
2344 (long)pid, name, (u_long)uid);
2345 free(temp, M_TEMP);
2346 return (NULL);
2347 }
2348
2349 /*
2350 * Dump a process' core. The main routine does some
2351 * policy checking, and creates the name of the coredump;
2352 * then it passes on a vnode and a size limit to the process-specific
2353 * coredump routine if there is one; if there _is not_ one, it returns
2354 * ENOSYS; otherwise it returns the error from the process-specific routine.
2355 */
2356
2357 static int
2358 coredump(struct thread *td)
2359 {
2360 struct proc *p = td->td_proc;
2361 register struct vnode *vp;
2362 register struct ucred *cred = td->td_ucred;
2363 struct flock lf;
2364 struct nameidata nd;
2365 struct vattr vattr;
2366 int error, error1, flags;
2367 struct mount *mp;
2368 char *name; /* name of corefile */
2369 off_t limit;
2370
2371 PROC_LOCK(p);
2372 _STOPEVENT(p, S_CORE, 0);
2373
2374 if (((sugid_coredump == 0) && p->p_flag & P_SUGID) || do_coredump == 0) {
2375 PROC_UNLOCK(p);
2376 return (EFAULT);
2377 }
2378
2379 /*
2380 * Note that the bulk of limit checking is done after
2381 * the corefile is created. The exception is if the limit
2382 * for corefiles is 0, in which case we don't bother
2383 * creating the corefile at all. This layout means that
2384 * a corefile is truncated instead of not being created,
2385 * if it is larger than the limit.
2386 */
2387 limit = p->p_rlimit[RLIMIT_CORE].rlim_cur;
2388 if (limit == 0) {
2389 PROC_UNLOCK(p);
2390 return 0;
2391 }
2392 PROC_UNLOCK(p);
2393
2394 restart:
2395 name = expand_name(p->p_comm, td->td_ucred->cr_uid, p->p_pid);
2396 if (name == NULL)
2397 return (EINVAL);
2398 NDINIT(&nd, LOOKUP, NOFOLLOW, UIO_SYSSPACE, name, td); /* XXXKSE */
2399 flags = O_CREAT | FWRITE | O_NOFOLLOW;
2400 error = vn_open(&nd, &flags, S_IRUSR | S_IWUSR);
2401 free(name, M_TEMP);
2402 if (error)
2403 return (error);
2404 NDFREE(&nd, NDF_ONLY_PNBUF);
2405 vp = nd.ni_vp;
2406
2407 /* Don't dump to non-regular files or files with links. */
2408 if (vp->v_type != VREG ||
2409 VOP_GETATTR(vp, &vattr, cred, td) || vattr.va_nlink != 1) {
2410 VOP_UNLOCK(vp, 0, td);
2411 error = EFAULT;
2412 goto out2;
2413 }
2414
2415 VOP_UNLOCK(vp, 0, td);
2416 lf.l_whence = SEEK_SET;
2417 lf.l_start = 0;
2418 lf.l_len = 0;
2419 lf.l_type = F_WRLCK;
2420 error = VOP_ADVLOCK(vp, (caddr_t)p, F_SETLK, &lf, F_FLOCK);
2421 if (error)
2422 goto out2;
2423
2424 if (vn_start_write(vp, &mp, V_NOWAIT) != 0) {
2425 lf.l_type = F_UNLCK;
2426 VOP_ADVLOCK(vp, (caddr_t)p, F_UNLCK, &lf, F_FLOCK);
2427 if ((error = vn_close(vp, FWRITE, cred, td)) != 0)
2428 return (error);
2429 if ((error = vn_start_write(NULL, &mp, V_XSLEEP | PCATCH)) != 0)
2430 return (error);
2431 goto restart;
2432 }
2433
2434 VATTR_NULL(&vattr);
2435 vattr.va_size = 0;
2436 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
2437 VOP_LEASE(vp, td, cred, LEASE_WRITE);
2438 VOP_SETATTR(vp, &vattr, cred, td);
2439 VOP_UNLOCK(vp, 0, td);
2440 PROC_LOCK(p);
2441 p->p_acflag |= ACORE;
2442 PROC_UNLOCK(p);
2443
2444 error = p->p_sysent->sv_coredump ?
2445 p->p_sysent->sv_coredump(td, vp, limit) :
2446 ENOSYS;
2447
2448 lf.l_type = F_UNLCK;
2449 VOP_ADVLOCK(vp, (caddr_t)p, F_UNLCK, &lf, F_FLOCK);
2450 vn_finished_write(mp);
2451 out2:
2452 error1 = vn_close(vp, FWRITE, cred, td);
2453 if (error == 0)
2454 error = error1;
2455 return (error);
2456 }
2457
2458 /*
2459 * Nonexistent system call-- signal process (may want to handle it).
2460 * Flag error in case process won't see signal immediately (blocked or ignored).
2461 */
2462 #ifndef _SYS_SYSPROTO_H_
2463 struct nosys_args {
2464 int dummy;
2465 };
2466 #endif
2467 /*
2468 * MPSAFE
2469 */
2470 /* ARGSUSED */
2471 int
2472 nosys(td, args)
2473 struct thread *td;
2474 struct nosys_args *args;
2475 {
2476 struct proc *p = td->td_proc;
2477
2478 PROC_LOCK(p);
2479 psignal(p, SIGSYS);
2480 PROC_UNLOCK(p);
2481 return (ENOSYS);
2482 }
2483
2484 /*
2485 * Send a SIGIO or SIGURG signal to a process or process group using
2486 * stored credentials rather than those of the current process.
2487 */
2488 void
2489 pgsigio(sigiop, sig, checkctty)
2490 struct sigio **sigiop;
2491 int sig, checkctty;
2492 {
2493 struct sigio *sigio;
2494
2495 SIGIO_LOCK();
2496 sigio = *sigiop;
2497 if (sigio == NULL) {
2498 SIGIO_UNLOCK();
2499 return;
2500 }
2501 if (sigio->sio_pgid > 0) {
2502 PROC_LOCK(sigio->sio_proc);
2503 if (CANSIGIO(sigio->sio_ucred, sigio->sio_proc->p_ucred))
2504 psignal(sigio->sio_proc, sig);
2505 PROC_UNLOCK(sigio->sio_proc);
2506 } else if (sigio->sio_pgid < 0) {
2507 struct proc *p;
2508
2509 PGRP_LOCK(sigio->sio_pgrp);
2510 LIST_FOREACH(p, &sigio->sio_pgrp->pg_members, p_pglist) {
2511 PROC_LOCK(p);
2512 if (CANSIGIO(sigio->sio_ucred, p->p_ucred) &&
2513 (checkctty == 0 || (p->p_flag & P_CONTROLT)))
2514 psignal(p, sig);
2515 PROC_UNLOCK(p);
2516 }
2517 PGRP_UNLOCK(sigio->sio_pgrp);
2518 }
2519 SIGIO_UNLOCK();
2520 }
2521
2522 static int
2523 filt_sigattach(struct knote *kn)
2524 {
2525 struct proc *p = curproc;
2526
2527 kn->kn_ptr.p_proc = p;
2528 kn->kn_flags |= EV_CLEAR; /* automatically set */
2529
2530 PROC_LOCK(p);
2531 SLIST_INSERT_HEAD(&p->p_klist, kn, kn_selnext);
2532 PROC_UNLOCK(p);
2533
2534 return (0);
2535 }
2536
2537 static void
2538 filt_sigdetach(struct knote *kn)
2539 {
2540 struct proc *p = kn->kn_ptr.p_proc;
2541
2542 PROC_LOCK(p);
2543 SLIST_REMOVE(&p->p_klist, kn, knote, kn_selnext);
2544 PROC_UNLOCK(p);
2545 }
2546
2547 /*
2548 * signal knotes are shared with proc knotes, so we apply a mask to
2549 * the hint in order to differentiate them from process hints. This
2550 * could be avoided by using a signal-specific knote list, but probably
2551 * isn't worth the trouble.
2552 */
2553 static int
2554 filt_signal(struct knote *kn, long hint)
2555 {
2556
2557 if (hint & NOTE_SIGNAL) {
2558 hint &= ~NOTE_SIGNAL;
2559
2560 if (kn->kn_id == hint)
2561 kn->kn_data++;
2562 }
2563 return (kn->kn_data != 0);
2564 }
2565
2566 struct sigacts *
2567 sigacts_alloc(void)
2568 {
2569 struct sigacts *ps;
2570
2571 ps = malloc(sizeof(struct sigacts), M_SUBPROC, M_WAITOK | M_ZERO);
2572 ps->ps_refcnt = 1;
2573 mtx_init(&ps->ps_mtx, "sigacts", NULL, MTX_DEF);
2574 return (ps);
2575 }
2576
2577 void
2578 sigacts_free(struct sigacts *ps)
2579 {
2580
2581 mtx_lock(&ps->ps_mtx);
2582 ps->ps_refcnt--;
2583 if (ps->ps_refcnt == 0) {
2584 mtx_destroy(&ps->ps_mtx);
2585 free(ps, M_SUBPROC);
2586 } else
2587 mtx_unlock(&ps->ps_mtx);
2588 }
2589
2590 struct sigacts *
2591 sigacts_hold(struct sigacts *ps)
2592 {
2593 mtx_lock(&ps->ps_mtx);
2594 ps->ps_refcnt++;
2595 mtx_unlock(&ps->ps_mtx);
2596 return (ps);
2597 }
2598
2599 void
2600 sigacts_copy(struct sigacts *dest, struct sigacts *src)
2601 {
2602
2603 KASSERT(dest->ps_refcnt == 1, ("sigacts_copy to shared dest"));
2604 mtx_lock(&src->ps_mtx);
2605 bcopy(src, dest, offsetof(struct sigacts, ps_refcnt));
2606 mtx_unlock(&src->ps_mtx);
2607 }
2608
2609 int
2610 sigacts_shared(struct sigacts *ps)
2611 {
2612 int shared;
2613
2614 mtx_lock(&ps->ps_mtx);
2615 shared = ps->ps_refcnt > 1;
2616 mtx_unlock(&ps->ps_mtx);
2617 return (shared);
2618 }
Cache object: a851d9faee6aa2383afa18b8f6081b5f
|