FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_sig.c
1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 1982, 1986, 1989, 1991, 1993
5 * The Regents of the University of California. All rights reserved.
6 * (c) UNIX System Laboratories, Inc.
7 * All or some portions of this file are derived from material licensed
8 * to the University of California by American Telephone and Telegraph
9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
10 * the permission of UNIX System Laboratories, Inc.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * @(#)kern_sig.c 8.7 (Berkeley) 4/18/94
37 */
38
39 #include <sys/cdefs.h>
40 __FBSDID("$FreeBSD: releng/12.0/sys/kern/kern_sig.c 337330 2018-08-04 20:45:43Z kib $");
41
42 #include "opt_ktrace.h"
43
44 #include <sys/param.h>
45 #include <sys/ctype.h>
46 #include <sys/systm.h>
47 #include <sys/signalvar.h>
48 #include <sys/vnode.h>
49 #include <sys/acct.h>
50 #include <sys/bus.h>
51 #include <sys/capsicum.h>
52 #include <sys/compressor.h>
53 #include <sys/condvar.h>
54 #include <sys/event.h>
55 #include <sys/fcntl.h>
56 #include <sys/imgact.h>
57 #include <sys/kernel.h>
58 #include <sys/ktr.h>
59 #include <sys/ktrace.h>
60 #include <sys/lock.h>
61 #include <sys/malloc.h>
62 #include <sys/mutex.h>
63 #include <sys/refcount.h>
64 #include <sys/namei.h>
65 #include <sys/proc.h>
66 #include <sys/procdesc.h>
67 #include <sys/posix4.h>
68 #include <sys/pioctl.h>
69 #include <sys/racct.h>
70 #include <sys/resourcevar.h>
71 #include <sys/sdt.h>
72 #include <sys/sbuf.h>
73 #include <sys/sleepqueue.h>
74 #include <sys/smp.h>
75 #include <sys/stat.h>
76 #include <sys/sx.h>
77 #include <sys/syscallsubr.h>
78 #include <sys/sysctl.h>
79 #include <sys/sysent.h>
80 #include <sys/syslog.h>
81 #include <sys/sysproto.h>
82 #include <sys/timers.h>
83 #include <sys/unistd.h>
84 #include <sys/wait.h>
85 #include <vm/vm.h>
86 #include <vm/vm_extern.h>
87 #include <vm/uma.h>
88
89 #include <sys/jail.h>
90
91 #include <machine/cpu.h>
92
93 #include <security/audit/audit.h>
94
95 #define ONSIG 32 /* NSIG for osig* syscalls. XXX. */
96
97 SDT_PROVIDER_DECLARE(proc);
98 SDT_PROBE_DEFINE3(proc, , , signal__send,
99 "struct thread *", "struct proc *", "int");
100 SDT_PROBE_DEFINE2(proc, , , signal__clear,
101 "int", "ksiginfo_t *");
102 SDT_PROBE_DEFINE3(proc, , , signal__discard,
103 "struct thread *", "struct proc *", "int");
104
105 static int coredump(struct thread *);
106 static int killpg1(struct thread *td, int sig, int pgid, int all,
107 ksiginfo_t *ksi);
108 static int issignal(struct thread *td);
109 static int sigprop(int sig);
110 static void tdsigwakeup(struct thread *, int, sig_t, int);
111 static int sig_suspend_threads(struct thread *, struct proc *, int);
112 static int filt_sigattach(struct knote *kn);
113 static void filt_sigdetach(struct knote *kn);
114 static int filt_signal(struct knote *kn, long hint);
115 static struct thread *sigtd(struct proc *p, int sig, int prop);
116 static void sigqueue_start(void);
117
118 static uma_zone_t ksiginfo_zone = NULL;
119 struct filterops sig_filtops = {
120 .f_isfd = 0,
121 .f_attach = filt_sigattach,
122 .f_detach = filt_sigdetach,
123 .f_event = filt_signal,
124 };
125
126 static int kern_logsigexit = 1;
127 SYSCTL_INT(_kern, KERN_LOGSIGEXIT, logsigexit, CTLFLAG_RW,
128 &kern_logsigexit, 0,
129 "Log processes quitting on abnormal signals to syslog(3)");
130
131 static int kern_forcesigexit = 1;
132 SYSCTL_INT(_kern, OID_AUTO, forcesigexit, CTLFLAG_RW,
133 &kern_forcesigexit, 0, "Force trap signal to be handled");
134
135 static SYSCTL_NODE(_kern, OID_AUTO, sigqueue, CTLFLAG_RW, 0,
136 "POSIX real time signal");
137
138 static int max_pending_per_proc = 128;
139 SYSCTL_INT(_kern_sigqueue, OID_AUTO, max_pending_per_proc, CTLFLAG_RW,
140 &max_pending_per_proc, 0, "Max pending signals per proc");
141
142 static int preallocate_siginfo = 1024;
143 SYSCTL_INT(_kern_sigqueue, OID_AUTO, preallocate, CTLFLAG_RDTUN,
144 &preallocate_siginfo, 0, "Preallocated signal memory size");
145
146 static int signal_overflow = 0;
147 SYSCTL_INT(_kern_sigqueue, OID_AUTO, overflow, CTLFLAG_RD,
148 &signal_overflow, 0, "Number of signals overflew");
149
150 static int signal_alloc_fail = 0;
151 SYSCTL_INT(_kern_sigqueue, OID_AUTO, alloc_fail, CTLFLAG_RD,
152 &signal_alloc_fail, 0, "signals failed to be allocated");
153
154 static int kern_lognosys = 0;
155 SYSCTL_INT(_kern, OID_AUTO, lognosys, CTLFLAG_RWTUN, &kern_lognosys, 0,
156 "Log invalid syscalls");
157
158 SYSINIT(signal, SI_SUB_P1003_1B, SI_ORDER_FIRST+3, sigqueue_start, NULL);
159
160 /*
161 * Policy -- Can ucred cr1 send SIGIO to process cr2?
162 * Should use cr_cansignal() once cr_cansignal() allows SIGIO and SIGURG
163 * in the right situations.
164 */
165 #define CANSIGIO(cr1, cr2) \
166 ((cr1)->cr_uid == 0 || \
167 (cr1)->cr_ruid == (cr2)->cr_ruid || \
168 (cr1)->cr_uid == (cr2)->cr_ruid || \
169 (cr1)->cr_ruid == (cr2)->cr_uid || \
170 (cr1)->cr_uid == (cr2)->cr_uid)
171
172 static int sugid_coredump;
173 SYSCTL_INT(_kern, OID_AUTO, sugid_coredump, CTLFLAG_RWTUN,
174 &sugid_coredump, 0, "Allow setuid and setgid processes to dump core");
175
176 static int capmode_coredump;
177 SYSCTL_INT(_kern, OID_AUTO, capmode_coredump, CTLFLAG_RWTUN,
178 &capmode_coredump, 0, "Allow processes in capability mode to dump core");
179
180 static int do_coredump = 1;
181 SYSCTL_INT(_kern, OID_AUTO, coredump, CTLFLAG_RW,
182 &do_coredump, 0, "Enable/Disable coredumps");
183
184 static int set_core_nodump_flag = 0;
185 SYSCTL_INT(_kern, OID_AUTO, nodump_coredump, CTLFLAG_RW, &set_core_nodump_flag,
186 0, "Enable setting the NODUMP flag on coredump files");
187
188 static int coredump_devctl = 0;
189 SYSCTL_INT(_kern, OID_AUTO, coredump_devctl, CTLFLAG_RW, &coredump_devctl,
190 0, "Generate a devctl notification when processes coredump");
191
192 /*
193 * Signal properties and actions.
194 * The array below categorizes the signals and their default actions
195 * according to the following properties:
196 */
197 #define SIGPROP_KILL 0x01 /* terminates process by default */
198 #define SIGPROP_CORE 0x02 /* ditto and coredumps */
199 #define SIGPROP_STOP 0x04 /* suspend process */
200 #define SIGPROP_TTYSTOP 0x08 /* ditto, from tty */
201 #define SIGPROP_IGNORE 0x10 /* ignore by default */
202 #define SIGPROP_CONT 0x20 /* continue if suspended */
203 #define SIGPROP_CANTMASK 0x40 /* non-maskable, catchable */
204
205 static int sigproptbl[NSIG] = {
206 [SIGHUP] = SIGPROP_KILL,
207 [SIGINT] = SIGPROP_KILL,
208 [SIGQUIT] = SIGPROP_KILL | SIGPROP_CORE,
209 [SIGILL] = SIGPROP_KILL | SIGPROP_CORE,
210 [SIGTRAP] = SIGPROP_KILL | SIGPROP_CORE,
211 [SIGABRT] = SIGPROP_KILL | SIGPROP_CORE,
212 [SIGEMT] = SIGPROP_KILL | SIGPROP_CORE,
213 [SIGFPE] = SIGPROP_KILL | SIGPROP_CORE,
214 [SIGKILL] = SIGPROP_KILL,
215 [SIGBUS] = SIGPROP_KILL | SIGPROP_CORE,
216 [SIGSEGV] = SIGPROP_KILL | SIGPROP_CORE,
217 [SIGSYS] = SIGPROP_KILL | SIGPROP_CORE,
218 [SIGPIPE] = SIGPROP_KILL,
219 [SIGALRM] = SIGPROP_KILL,
220 [SIGTERM] = SIGPROP_KILL,
221 [SIGURG] = SIGPROP_IGNORE,
222 [SIGSTOP] = SIGPROP_STOP,
223 [SIGTSTP] = SIGPROP_STOP | SIGPROP_TTYSTOP,
224 [SIGCONT] = SIGPROP_IGNORE | SIGPROP_CONT,
225 [SIGCHLD] = SIGPROP_IGNORE,
226 [SIGTTIN] = SIGPROP_STOP | SIGPROP_TTYSTOP,
227 [SIGTTOU] = SIGPROP_STOP | SIGPROP_TTYSTOP,
228 [SIGIO] = SIGPROP_IGNORE,
229 [SIGXCPU] = SIGPROP_KILL,
230 [SIGXFSZ] = SIGPROP_KILL,
231 [SIGVTALRM] = SIGPROP_KILL,
232 [SIGPROF] = SIGPROP_KILL,
233 [SIGWINCH] = SIGPROP_IGNORE,
234 [SIGINFO] = SIGPROP_IGNORE,
235 [SIGUSR1] = SIGPROP_KILL,
236 [SIGUSR2] = SIGPROP_KILL,
237 };
238
239 static void reschedule_signals(struct proc *p, sigset_t block, int flags);
240
241 static void
242 sigqueue_start(void)
243 {
244 ksiginfo_zone = uma_zcreate("ksiginfo", sizeof(ksiginfo_t),
245 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
246 uma_prealloc(ksiginfo_zone, preallocate_siginfo);
247 p31b_setcfg(CTL_P1003_1B_REALTIME_SIGNALS, _POSIX_REALTIME_SIGNALS);
248 p31b_setcfg(CTL_P1003_1B_RTSIG_MAX, SIGRTMAX - SIGRTMIN + 1);
249 p31b_setcfg(CTL_P1003_1B_SIGQUEUE_MAX, max_pending_per_proc);
250 }
251
252 ksiginfo_t *
253 ksiginfo_alloc(int wait)
254 {
255 int flags;
256
257 flags = M_ZERO;
258 if (! wait)
259 flags |= M_NOWAIT;
260 if (ksiginfo_zone != NULL)
261 return ((ksiginfo_t *)uma_zalloc(ksiginfo_zone, flags));
262 return (NULL);
263 }
264
265 void
266 ksiginfo_free(ksiginfo_t *ksi)
267 {
268 uma_zfree(ksiginfo_zone, ksi);
269 }
270
271 static __inline int
272 ksiginfo_tryfree(ksiginfo_t *ksi)
273 {
274 if (!(ksi->ksi_flags & KSI_EXT)) {
275 uma_zfree(ksiginfo_zone, ksi);
276 return (1);
277 }
278 return (0);
279 }
280
281 void
282 sigqueue_init(sigqueue_t *list, struct proc *p)
283 {
284 SIGEMPTYSET(list->sq_signals);
285 SIGEMPTYSET(list->sq_kill);
286 SIGEMPTYSET(list->sq_ptrace);
287 TAILQ_INIT(&list->sq_list);
288 list->sq_proc = p;
289 list->sq_flags = SQ_INIT;
290 }
291
292 /*
293 * Get a signal's ksiginfo.
294 * Return:
295 * 0 - signal not found
296 * others - signal number
297 */
298 static int
299 sigqueue_get(sigqueue_t *sq, int signo, ksiginfo_t *si)
300 {
301 struct proc *p = sq->sq_proc;
302 struct ksiginfo *ksi, *next;
303 int count = 0;
304
305 KASSERT(sq->sq_flags & SQ_INIT, ("sigqueue not inited"));
306
307 if (!SIGISMEMBER(sq->sq_signals, signo))
308 return (0);
309
310 if (SIGISMEMBER(sq->sq_ptrace, signo)) {
311 count++;
312 SIGDELSET(sq->sq_ptrace, signo);
313 si->ksi_flags |= KSI_PTRACE;
314 }
315 if (SIGISMEMBER(sq->sq_kill, signo)) {
316 count++;
317 if (count == 1)
318 SIGDELSET(sq->sq_kill, signo);
319 }
320
321 TAILQ_FOREACH_SAFE(ksi, &sq->sq_list, ksi_link, next) {
322 if (ksi->ksi_signo == signo) {
323 if (count == 0) {
324 TAILQ_REMOVE(&sq->sq_list, ksi, ksi_link);
325 ksi->ksi_sigq = NULL;
326 ksiginfo_copy(ksi, si);
327 if (ksiginfo_tryfree(ksi) && p != NULL)
328 p->p_pendingcnt--;
329 }
330 if (++count > 1)
331 break;
332 }
333 }
334
335 if (count <= 1)
336 SIGDELSET(sq->sq_signals, signo);
337 si->ksi_signo = signo;
338 return (signo);
339 }
340
341 void
342 sigqueue_take(ksiginfo_t *ksi)
343 {
344 struct ksiginfo *kp;
345 struct proc *p;
346 sigqueue_t *sq;
347
348 if (ksi == NULL || (sq = ksi->ksi_sigq) == NULL)
349 return;
350
351 p = sq->sq_proc;
352 TAILQ_REMOVE(&sq->sq_list, ksi, ksi_link);
353 ksi->ksi_sigq = NULL;
354 if (!(ksi->ksi_flags & KSI_EXT) && p != NULL)
355 p->p_pendingcnt--;
356
357 for (kp = TAILQ_FIRST(&sq->sq_list); kp != NULL;
358 kp = TAILQ_NEXT(kp, ksi_link)) {
359 if (kp->ksi_signo == ksi->ksi_signo)
360 break;
361 }
362 if (kp == NULL && !SIGISMEMBER(sq->sq_kill, ksi->ksi_signo) &&
363 !SIGISMEMBER(sq->sq_ptrace, ksi->ksi_signo))
364 SIGDELSET(sq->sq_signals, ksi->ksi_signo);
365 }
366
367 static int
368 sigqueue_add(sigqueue_t *sq, int signo, ksiginfo_t *si)
369 {
370 struct proc *p = sq->sq_proc;
371 struct ksiginfo *ksi;
372 int ret = 0;
373
374 KASSERT(sq->sq_flags & SQ_INIT, ("sigqueue not inited"));
375
376 /*
377 * SIGKILL/SIGSTOP cannot be caught or masked, so take the fast path
378 * for these signals.
379 */
380 if (signo == SIGKILL || signo == SIGSTOP || si == NULL) {
381 SIGADDSET(sq->sq_kill, signo);
382 goto out_set_bit;
383 }
384
385 /* directly insert the ksi, don't copy it */
386 if (si->ksi_flags & KSI_INS) {
387 if (si->ksi_flags & KSI_HEAD)
388 TAILQ_INSERT_HEAD(&sq->sq_list, si, ksi_link);
389 else
390 TAILQ_INSERT_TAIL(&sq->sq_list, si, ksi_link);
391 si->ksi_sigq = sq;
392 goto out_set_bit;
393 }
394
395 if (__predict_false(ksiginfo_zone == NULL)) {
396 SIGADDSET(sq->sq_kill, signo);
397 goto out_set_bit;
398 }
399
400 if (p != NULL && p->p_pendingcnt >= max_pending_per_proc) {
401 signal_overflow++;
402 ret = EAGAIN;
403 } else if ((ksi = ksiginfo_alloc(0)) == NULL) {
404 signal_alloc_fail++;
405 ret = EAGAIN;
406 } else {
407 if (p != NULL)
408 p->p_pendingcnt++;
409 ksiginfo_copy(si, ksi);
410 ksi->ksi_signo = signo;
411 if (si->ksi_flags & KSI_HEAD)
412 TAILQ_INSERT_HEAD(&sq->sq_list, ksi, ksi_link);
413 else
414 TAILQ_INSERT_TAIL(&sq->sq_list, ksi, ksi_link);
415 ksi->ksi_sigq = sq;
416 }
417
418 if (ret != 0) {
419 if ((si->ksi_flags & KSI_PTRACE) != 0) {
420 SIGADDSET(sq->sq_ptrace, signo);
421 ret = 0;
422 goto out_set_bit;
423 } else if ((si->ksi_flags & KSI_TRAP) != 0 ||
424 (si->ksi_flags & KSI_SIGQ) == 0) {
425 SIGADDSET(sq->sq_kill, signo);
426 ret = 0;
427 goto out_set_bit;
428 }
429 return (ret);
430 }
431
432 out_set_bit:
433 SIGADDSET(sq->sq_signals, signo);
434 return (ret);
435 }
436
437 void
438 sigqueue_flush(sigqueue_t *sq)
439 {
440 struct proc *p = sq->sq_proc;
441 ksiginfo_t *ksi;
442
443 KASSERT(sq->sq_flags & SQ_INIT, ("sigqueue not inited"));
444
445 if (p != NULL)
446 PROC_LOCK_ASSERT(p, MA_OWNED);
447
448 while ((ksi = TAILQ_FIRST(&sq->sq_list)) != NULL) {
449 TAILQ_REMOVE(&sq->sq_list, ksi, ksi_link);
450 ksi->ksi_sigq = NULL;
451 if (ksiginfo_tryfree(ksi) && p != NULL)
452 p->p_pendingcnt--;
453 }
454
455 SIGEMPTYSET(sq->sq_signals);
456 SIGEMPTYSET(sq->sq_kill);
457 SIGEMPTYSET(sq->sq_ptrace);
458 }
459
460 static void
461 sigqueue_move_set(sigqueue_t *src, sigqueue_t *dst, const sigset_t *set)
462 {
463 sigset_t tmp;
464 struct proc *p1, *p2;
465 ksiginfo_t *ksi, *next;
466
467 KASSERT(src->sq_flags & SQ_INIT, ("src sigqueue not inited"));
468 KASSERT(dst->sq_flags & SQ_INIT, ("dst sigqueue not inited"));
469 p1 = src->sq_proc;
470 p2 = dst->sq_proc;
471 /* Move siginfo to target list */
472 TAILQ_FOREACH_SAFE(ksi, &src->sq_list, ksi_link, next) {
473 if (SIGISMEMBER(*set, ksi->ksi_signo)) {
474 TAILQ_REMOVE(&src->sq_list, ksi, ksi_link);
475 if (p1 != NULL)
476 p1->p_pendingcnt--;
477 TAILQ_INSERT_TAIL(&dst->sq_list, ksi, ksi_link);
478 ksi->ksi_sigq = dst;
479 if (p2 != NULL)
480 p2->p_pendingcnt++;
481 }
482 }
483
484 /* Move pending bits to target list */
485 tmp = src->sq_kill;
486 SIGSETAND(tmp, *set);
487 SIGSETOR(dst->sq_kill, tmp);
488 SIGSETNAND(src->sq_kill, tmp);
489
490 tmp = src->sq_ptrace;
491 SIGSETAND(tmp, *set);
492 SIGSETOR(dst->sq_ptrace, tmp);
493 SIGSETNAND(src->sq_ptrace, tmp);
494
495 tmp = src->sq_signals;
496 SIGSETAND(tmp, *set);
497 SIGSETOR(dst->sq_signals, tmp);
498 SIGSETNAND(src->sq_signals, tmp);
499 }
500
501 #if 0
502 static void
503 sigqueue_move(sigqueue_t *src, sigqueue_t *dst, int signo)
504 {
505 sigset_t set;
506
507 SIGEMPTYSET(set);
508 SIGADDSET(set, signo);
509 sigqueue_move_set(src, dst, &set);
510 }
511 #endif
512
513 static void
514 sigqueue_delete_set(sigqueue_t *sq, const sigset_t *set)
515 {
516 struct proc *p = sq->sq_proc;
517 ksiginfo_t *ksi, *next;
518
519 KASSERT(sq->sq_flags & SQ_INIT, ("src sigqueue not inited"));
520
521 /* Remove siginfo queue */
522 TAILQ_FOREACH_SAFE(ksi, &sq->sq_list, ksi_link, next) {
523 if (SIGISMEMBER(*set, ksi->ksi_signo)) {
524 TAILQ_REMOVE(&sq->sq_list, ksi, ksi_link);
525 ksi->ksi_sigq = NULL;
526 if (ksiginfo_tryfree(ksi) && p != NULL)
527 p->p_pendingcnt--;
528 }
529 }
530 SIGSETNAND(sq->sq_kill, *set);
531 SIGSETNAND(sq->sq_ptrace, *set);
532 SIGSETNAND(sq->sq_signals, *set);
533 }
534
535 void
536 sigqueue_delete(sigqueue_t *sq, int signo)
537 {
538 sigset_t set;
539
540 SIGEMPTYSET(set);
541 SIGADDSET(set, signo);
542 sigqueue_delete_set(sq, &set);
543 }
544
545 /* Remove a set of signals for a process */
546 static void
547 sigqueue_delete_set_proc(struct proc *p, const sigset_t *set)
548 {
549 sigqueue_t worklist;
550 struct thread *td0;
551
552 PROC_LOCK_ASSERT(p, MA_OWNED);
553
554 sigqueue_init(&worklist, NULL);
555 sigqueue_move_set(&p->p_sigqueue, &worklist, set);
556
557 FOREACH_THREAD_IN_PROC(p, td0)
558 sigqueue_move_set(&td0->td_sigqueue, &worklist, set);
559
560 sigqueue_flush(&worklist);
561 }
562
563 void
564 sigqueue_delete_proc(struct proc *p, int signo)
565 {
566 sigset_t set;
567
568 SIGEMPTYSET(set);
569 SIGADDSET(set, signo);
570 sigqueue_delete_set_proc(p, &set);
571 }
572
573 static void
574 sigqueue_delete_stopmask_proc(struct proc *p)
575 {
576 sigset_t set;
577
578 SIGEMPTYSET(set);
579 SIGADDSET(set, SIGSTOP);
580 SIGADDSET(set, SIGTSTP);
581 SIGADDSET(set, SIGTTIN);
582 SIGADDSET(set, SIGTTOU);
583 sigqueue_delete_set_proc(p, &set);
584 }
585
586 /*
587 * Determine signal that should be delivered to thread td, the current
588 * thread, 0 if none. If there is a pending stop signal with default
589 * action, the process stops in issignal().
590 */
591 int
592 cursig(struct thread *td)
593 {
594 PROC_LOCK_ASSERT(td->td_proc, MA_OWNED);
595 mtx_assert(&td->td_proc->p_sigacts->ps_mtx, MA_OWNED);
596 THREAD_LOCK_ASSERT(td, MA_NOTOWNED);
597 return (SIGPENDING(td) ? issignal(td) : 0);
598 }
599
600 /*
601 * Arrange for ast() to handle unmasked pending signals on return to user
602 * mode. This must be called whenever a signal is added to td_sigqueue or
603 * unmasked in td_sigmask.
604 */
605 void
606 signotify(struct thread *td)
607 {
608
609 PROC_LOCK_ASSERT(td->td_proc, MA_OWNED);
610
611 if (SIGPENDING(td)) {
612 thread_lock(td);
613 td->td_flags |= TDF_NEEDSIGCHK | TDF_ASTPENDING;
614 thread_unlock(td);
615 }
616 }
617
618 int
619 sigonstack(size_t sp)
620 {
621 struct thread *td = curthread;
622
623 return ((td->td_pflags & TDP_ALTSTACK) ?
624 #if defined(COMPAT_43)
625 ((td->td_sigstk.ss_size == 0) ?
626 (td->td_sigstk.ss_flags & SS_ONSTACK) :
627 ((sp - (size_t)td->td_sigstk.ss_sp) < td->td_sigstk.ss_size))
628 #else
629 ((sp - (size_t)td->td_sigstk.ss_sp) < td->td_sigstk.ss_size)
630 #endif
631 : 0);
632 }
633
634 static __inline int
635 sigprop(int sig)
636 {
637
638 if (sig > 0 && sig < nitems(sigproptbl))
639 return (sigproptbl[sig]);
640 return (0);
641 }
642
643 int
644 sig_ffs(sigset_t *set)
645 {
646 int i;
647
648 for (i = 0; i < _SIG_WORDS; i++)
649 if (set->__bits[i])
650 return (ffs(set->__bits[i]) + (i * 32));
651 return (0);
652 }
653
654 static bool
655 sigact_flag_test(const struct sigaction *act, int flag)
656 {
657
658 /*
659 * SA_SIGINFO is reset when signal disposition is set to
660 * ignore or default. Other flags are kept according to user
661 * settings.
662 */
663 return ((act->sa_flags & flag) != 0 && (flag != SA_SIGINFO ||
664 ((__sighandler_t *)act->sa_sigaction != SIG_IGN &&
665 (__sighandler_t *)act->sa_sigaction != SIG_DFL)));
666 }
667
668 /*
669 * kern_sigaction
670 * sigaction
671 * freebsd4_sigaction
672 * osigaction
673 */
674 int
675 kern_sigaction(struct thread *td, int sig, const struct sigaction *act,
676 struct sigaction *oact, int flags)
677 {
678 struct sigacts *ps;
679 struct proc *p = td->td_proc;
680
681 if (!_SIG_VALID(sig))
682 return (EINVAL);
683 if (act != NULL && act->sa_handler != SIG_DFL &&
684 act->sa_handler != SIG_IGN && (act->sa_flags & ~(SA_ONSTACK |
685 SA_RESTART | SA_RESETHAND | SA_NOCLDSTOP | SA_NODEFER |
686 SA_NOCLDWAIT | SA_SIGINFO)) != 0)
687 return (EINVAL);
688
689 PROC_LOCK(p);
690 ps = p->p_sigacts;
691 mtx_lock(&ps->ps_mtx);
692 if (oact) {
693 memset(oact, 0, sizeof(*oact));
694 oact->sa_mask = ps->ps_catchmask[_SIG_IDX(sig)];
695 if (SIGISMEMBER(ps->ps_sigonstack, sig))
696 oact->sa_flags |= SA_ONSTACK;
697 if (!SIGISMEMBER(ps->ps_sigintr, sig))
698 oact->sa_flags |= SA_RESTART;
699 if (SIGISMEMBER(ps->ps_sigreset, sig))
700 oact->sa_flags |= SA_RESETHAND;
701 if (SIGISMEMBER(ps->ps_signodefer, sig))
702 oact->sa_flags |= SA_NODEFER;
703 if (SIGISMEMBER(ps->ps_siginfo, sig)) {
704 oact->sa_flags |= SA_SIGINFO;
705 oact->sa_sigaction =
706 (__siginfohandler_t *)ps->ps_sigact[_SIG_IDX(sig)];
707 } else
708 oact->sa_handler = ps->ps_sigact[_SIG_IDX(sig)];
709 if (sig == SIGCHLD && ps->ps_flag & PS_NOCLDSTOP)
710 oact->sa_flags |= SA_NOCLDSTOP;
711 if (sig == SIGCHLD && ps->ps_flag & PS_NOCLDWAIT)
712 oact->sa_flags |= SA_NOCLDWAIT;
713 }
714 if (act) {
715 if ((sig == SIGKILL || sig == SIGSTOP) &&
716 act->sa_handler != SIG_DFL) {
717 mtx_unlock(&ps->ps_mtx);
718 PROC_UNLOCK(p);
719 return (EINVAL);
720 }
721
722 /*
723 * Change setting atomically.
724 */
725
726 ps->ps_catchmask[_SIG_IDX(sig)] = act->sa_mask;
727 SIG_CANTMASK(ps->ps_catchmask[_SIG_IDX(sig)]);
728 if (sigact_flag_test(act, SA_SIGINFO)) {
729 ps->ps_sigact[_SIG_IDX(sig)] =
730 (__sighandler_t *)act->sa_sigaction;
731 SIGADDSET(ps->ps_siginfo, sig);
732 } else {
733 ps->ps_sigact[_SIG_IDX(sig)] = act->sa_handler;
734 SIGDELSET(ps->ps_siginfo, sig);
735 }
736 if (!sigact_flag_test(act, SA_RESTART))
737 SIGADDSET(ps->ps_sigintr, sig);
738 else
739 SIGDELSET(ps->ps_sigintr, sig);
740 if (sigact_flag_test(act, SA_ONSTACK))
741 SIGADDSET(ps->ps_sigonstack, sig);
742 else
743 SIGDELSET(ps->ps_sigonstack, sig);
744 if (sigact_flag_test(act, SA_RESETHAND))
745 SIGADDSET(ps->ps_sigreset, sig);
746 else
747 SIGDELSET(ps->ps_sigreset, sig);
748 if (sigact_flag_test(act, SA_NODEFER))
749 SIGADDSET(ps->ps_signodefer, sig);
750 else
751 SIGDELSET(ps->ps_signodefer, sig);
752 if (sig == SIGCHLD) {
753 if (act->sa_flags & SA_NOCLDSTOP)
754 ps->ps_flag |= PS_NOCLDSTOP;
755 else
756 ps->ps_flag &= ~PS_NOCLDSTOP;
757 if (act->sa_flags & SA_NOCLDWAIT) {
758 /*
759 * Paranoia: since SA_NOCLDWAIT is implemented
760 * by reparenting the dying child to PID 1 (and
761 * trust it to reap the zombie), PID 1 itself
762 * is forbidden to set SA_NOCLDWAIT.
763 */
764 if (p->p_pid == 1)
765 ps->ps_flag &= ~PS_NOCLDWAIT;
766 else
767 ps->ps_flag |= PS_NOCLDWAIT;
768 } else
769 ps->ps_flag &= ~PS_NOCLDWAIT;
770 if (ps->ps_sigact[_SIG_IDX(SIGCHLD)] == SIG_IGN)
771 ps->ps_flag |= PS_CLDSIGIGN;
772 else
773 ps->ps_flag &= ~PS_CLDSIGIGN;
774 }
775 /*
776 * Set bit in ps_sigignore for signals that are set to SIG_IGN,
777 * and for signals set to SIG_DFL where the default is to
778 * ignore. However, don't put SIGCONT in ps_sigignore, as we
779 * have to restart the process.
780 */
781 if (ps->ps_sigact[_SIG_IDX(sig)] == SIG_IGN ||
782 (sigprop(sig) & SIGPROP_IGNORE &&
783 ps->ps_sigact[_SIG_IDX(sig)] == SIG_DFL)) {
784 /* never to be seen again */
785 sigqueue_delete_proc(p, sig);
786 if (sig != SIGCONT)
787 /* easier in psignal */
788 SIGADDSET(ps->ps_sigignore, sig);
789 SIGDELSET(ps->ps_sigcatch, sig);
790 } else {
791 SIGDELSET(ps->ps_sigignore, sig);
792 if (ps->ps_sigact[_SIG_IDX(sig)] == SIG_DFL)
793 SIGDELSET(ps->ps_sigcatch, sig);
794 else
795 SIGADDSET(ps->ps_sigcatch, sig);
796 }
797 #ifdef COMPAT_FREEBSD4
798 if (ps->ps_sigact[_SIG_IDX(sig)] == SIG_IGN ||
799 ps->ps_sigact[_SIG_IDX(sig)] == SIG_DFL ||
800 (flags & KSA_FREEBSD4) == 0)
801 SIGDELSET(ps->ps_freebsd4, sig);
802 else
803 SIGADDSET(ps->ps_freebsd4, sig);
804 #endif
805 #ifdef COMPAT_43
806 if (ps->ps_sigact[_SIG_IDX(sig)] == SIG_IGN ||
807 ps->ps_sigact[_SIG_IDX(sig)] == SIG_DFL ||
808 (flags & KSA_OSIGSET) == 0)
809 SIGDELSET(ps->ps_osigset, sig);
810 else
811 SIGADDSET(ps->ps_osigset, sig);
812 #endif
813 }
814 mtx_unlock(&ps->ps_mtx);
815 PROC_UNLOCK(p);
816 return (0);
817 }
818
819 #ifndef _SYS_SYSPROTO_H_
820 struct sigaction_args {
821 int sig;
822 struct sigaction *act;
823 struct sigaction *oact;
824 };
825 #endif
826 int
827 sys_sigaction(struct thread *td, struct sigaction_args *uap)
828 {
829 struct sigaction act, oact;
830 struct sigaction *actp, *oactp;
831 int error;
832
833 actp = (uap->act != NULL) ? &act : NULL;
834 oactp = (uap->oact != NULL) ? &oact : NULL;
835 if (actp) {
836 error = copyin(uap->act, actp, sizeof(act));
837 if (error)
838 return (error);
839 }
840 error = kern_sigaction(td, uap->sig, actp, oactp, 0);
841 if (oactp && !error)
842 error = copyout(oactp, uap->oact, sizeof(oact));
843 return (error);
844 }
845
846 #ifdef COMPAT_FREEBSD4
847 #ifndef _SYS_SYSPROTO_H_
848 struct freebsd4_sigaction_args {
849 int sig;
850 struct sigaction *act;
851 struct sigaction *oact;
852 };
853 #endif
854 int
855 freebsd4_sigaction(struct thread *td, struct freebsd4_sigaction_args *uap)
856 {
857 struct sigaction act, oact;
858 struct sigaction *actp, *oactp;
859 int error;
860
861
862 actp = (uap->act != NULL) ? &act : NULL;
863 oactp = (uap->oact != NULL) ? &oact : NULL;
864 if (actp) {
865 error = copyin(uap->act, actp, sizeof(act));
866 if (error)
867 return (error);
868 }
869 error = kern_sigaction(td, uap->sig, actp, oactp, KSA_FREEBSD4);
870 if (oactp && !error)
871 error = copyout(oactp, uap->oact, sizeof(oact));
872 return (error);
873 }
874 #endif /* COMAPT_FREEBSD4 */
875
876 #ifdef COMPAT_43 /* XXX - COMPAT_FBSD3 */
877 #ifndef _SYS_SYSPROTO_H_
878 struct osigaction_args {
879 int signum;
880 struct osigaction *nsa;
881 struct osigaction *osa;
882 };
883 #endif
884 int
885 osigaction(struct thread *td, struct osigaction_args *uap)
886 {
887 struct osigaction sa;
888 struct sigaction nsa, osa;
889 struct sigaction *nsap, *osap;
890 int error;
891
892 if (uap->signum <= 0 || uap->signum >= ONSIG)
893 return (EINVAL);
894
895 nsap = (uap->nsa != NULL) ? &nsa : NULL;
896 osap = (uap->osa != NULL) ? &osa : NULL;
897
898 if (nsap) {
899 error = copyin(uap->nsa, &sa, sizeof(sa));
900 if (error)
901 return (error);
902 nsap->sa_handler = sa.sa_handler;
903 nsap->sa_flags = sa.sa_flags;
904 OSIG2SIG(sa.sa_mask, nsap->sa_mask);
905 }
906 error = kern_sigaction(td, uap->signum, nsap, osap, KSA_OSIGSET);
907 if (osap && !error) {
908 sa.sa_handler = osap->sa_handler;
909 sa.sa_flags = osap->sa_flags;
910 SIG2OSIG(osap->sa_mask, sa.sa_mask);
911 error = copyout(&sa, uap->osa, sizeof(sa));
912 }
913 return (error);
914 }
915
916 #if !defined(__i386__)
917 /* Avoid replicating the same stub everywhere */
918 int
919 osigreturn(struct thread *td, struct osigreturn_args *uap)
920 {
921
922 return (nosys(td, (struct nosys_args *)uap));
923 }
924 #endif
925 #endif /* COMPAT_43 */
926
927 /*
928 * Initialize signal state for process 0;
929 * set to ignore signals that are ignored by default.
930 */
931 void
932 siginit(struct proc *p)
933 {
934 int i;
935 struct sigacts *ps;
936
937 PROC_LOCK(p);
938 ps = p->p_sigacts;
939 mtx_lock(&ps->ps_mtx);
940 for (i = 1; i <= NSIG; i++) {
941 if (sigprop(i) & SIGPROP_IGNORE && i != SIGCONT) {
942 SIGADDSET(ps->ps_sigignore, i);
943 }
944 }
945 mtx_unlock(&ps->ps_mtx);
946 PROC_UNLOCK(p);
947 }
948
949 /*
950 * Reset specified signal to the default disposition.
951 */
952 static void
953 sigdflt(struct sigacts *ps, int sig)
954 {
955
956 mtx_assert(&ps->ps_mtx, MA_OWNED);
957 SIGDELSET(ps->ps_sigcatch, sig);
958 if ((sigprop(sig) & SIGPROP_IGNORE) != 0 && sig != SIGCONT)
959 SIGADDSET(ps->ps_sigignore, sig);
960 ps->ps_sigact[_SIG_IDX(sig)] = SIG_DFL;
961 SIGDELSET(ps->ps_siginfo, sig);
962 }
963
964 /*
965 * Reset signals for an exec of the specified process.
966 */
967 void
968 execsigs(struct proc *p)
969 {
970 sigset_t osigignore;
971 struct sigacts *ps;
972 int sig;
973 struct thread *td;
974
975 /*
976 * Reset caught signals. Held signals remain held
977 * through td_sigmask (unless they were caught,
978 * and are now ignored by default).
979 */
980 PROC_LOCK_ASSERT(p, MA_OWNED);
981 ps = p->p_sigacts;
982 mtx_lock(&ps->ps_mtx);
983 while (SIGNOTEMPTY(ps->ps_sigcatch)) {
984 sig = sig_ffs(&ps->ps_sigcatch);
985 sigdflt(ps, sig);
986 if ((sigprop(sig) & SIGPROP_IGNORE) != 0)
987 sigqueue_delete_proc(p, sig);
988 }
989
990 /*
991 * As CloudABI processes cannot modify signal handlers, fully
992 * reset all signals to their default behavior. Do ignore
993 * SIGPIPE, as it would otherwise be impossible to recover from
994 * writes to broken pipes and sockets.
995 */
996 if (SV_PROC_ABI(p) == SV_ABI_CLOUDABI) {
997 osigignore = ps->ps_sigignore;
998 while (SIGNOTEMPTY(osigignore)) {
999 sig = sig_ffs(&osigignore);
1000 SIGDELSET(osigignore, sig);
1001 if (sig != SIGPIPE)
1002 sigdflt(ps, sig);
1003 }
1004 SIGADDSET(ps->ps_sigignore, SIGPIPE);
1005 }
1006
1007 /*
1008 * Reset stack state to the user stack.
1009 * Clear set of signals caught on the signal stack.
1010 */
1011 td = curthread;
1012 MPASS(td->td_proc == p);
1013 td->td_sigstk.ss_flags = SS_DISABLE;
1014 td->td_sigstk.ss_size = 0;
1015 td->td_sigstk.ss_sp = 0;
1016 td->td_pflags &= ~TDP_ALTSTACK;
1017 /*
1018 * Reset no zombies if child dies flag as Solaris does.
1019 */
1020 ps->ps_flag &= ~(PS_NOCLDWAIT | PS_CLDSIGIGN);
1021 if (ps->ps_sigact[_SIG_IDX(SIGCHLD)] == SIG_IGN)
1022 ps->ps_sigact[_SIG_IDX(SIGCHLD)] = SIG_DFL;
1023 mtx_unlock(&ps->ps_mtx);
1024 }
1025
1026 /*
1027 * kern_sigprocmask()
1028 *
1029 * Manipulate signal mask.
1030 */
1031 int
1032 kern_sigprocmask(struct thread *td, int how, sigset_t *set, sigset_t *oset,
1033 int flags)
1034 {
1035 sigset_t new_block, oset1;
1036 struct proc *p;
1037 int error;
1038
1039 p = td->td_proc;
1040 if ((flags & SIGPROCMASK_PROC_LOCKED) != 0)
1041 PROC_LOCK_ASSERT(p, MA_OWNED);
1042 else
1043 PROC_LOCK(p);
1044 mtx_assert(&p->p_sigacts->ps_mtx, (flags & SIGPROCMASK_PS_LOCKED) != 0
1045 ? MA_OWNED : MA_NOTOWNED);
1046 if (oset != NULL)
1047 *oset = td->td_sigmask;
1048
1049 error = 0;
1050 if (set != NULL) {
1051 switch (how) {
1052 case SIG_BLOCK:
1053 SIG_CANTMASK(*set);
1054 oset1 = td->td_sigmask;
1055 SIGSETOR(td->td_sigmask, *set);
1056 new_block = td->td_sigmask;
1057 SIGSETNAND(new_block, oset1);
1058 break;
1059 case SIG_UNBLOCK:
1060 SIGSETNAND(td->td_sigmask, *set);
1061 signotify(td);
1062 goto out;
1063 case SIG_SETMASK:
1064 SIG_CANTMASK(*set);
1065 oset1 = td->td_sigmask;
1066 if (flags & SIGPROCMASK_OLD)
1067 SIGSETLO(td->td_sigmask, *set);
1068 else
1069 td->td_sigmask = *set;
1070 new_block = td->td_sigmask;
1071 SIGSETNAND(new_block, oset1);
1072 signotify(td);
1073 break;
1074 default:
1075 error = EINVAL;
1076 goto out;
1077 }
1078
1079 /*
1080 * The new_block set contains signals that were not previously
1081 * blocked, but are blocked now.
1082 *
1083 * In case we block any signal that was not previously blocked
1084 * for td, and process has the signal pending, try to schedule
1085 * signal delivery to some thread that does not block the
1086 * signal, possibly waking it up.
1087 */
1088 if (p->p_numthreads != 1)
1089 reschedule_signals(p, new_block, flags);
1090 }
1091
1092 out:
1093 if (!(flags & SIGPROCMASK_PROC_LOCKED))
1094 PROC_UNLOCK(p);
1095 return (error);
1096 }
1097
1098 #ifndef _SYS_SYSPROTO_H_
1099 struct sigprocmask_args {
1100 int how;
1101 const sigset_t *set;
1102 sigset_t *oset;
1103 };
1104 #endif
1105 int
1106 sys_sigprocmask(struct thread *td, struct sigprocmask_args *uap)
1107 {
1108 sigset_t set, oset;
1109 sigset_t *setp, *osetp;
1110 int error;
1111
1112 setp = (uap->set != NULL) ? &set : NULL;
1113 osetp = (uap->oset != NULL) ? &oset : NULL;
1114 if (setp) {
1115 error = copyin(uap->set, setp, sizeof(set));
1116 if (error)
1117 return (error);
1118 }
1119 error = kern_sigprocmask(td, uap->how, setp, osetp, 0);
1120 if (osetp && !error) {
1121 error = copyout(osetp, uap->oset, sizeof(oset));
1122 }
1123 return (error);
1124 }
1125
1126 #ifdef COMPAT_43 /* XXX - COMPAT_FBSD3 */
1127 #ifndef _SYS_SYSPROTO_H_
1128 struct osigprocmask_args {
1129 int how;
1130 osigset_t mask;
1131 };
1132 #endif
1133 int
1134 osigprocmask(struct thread *td, struct osigprocmask_args *uap)
1135 {
1136 sigset_t set, oset;
1137 int error;
1138
1139 OSIG2SIG(uap->mask, set);
1140 error = kern_sigprocmask(td, uap->how, &set, &oset, 1);
1141 SIG2OSIG(oset, td->td_retval[0]);
1142 return (error);
1143 }
1144 #endif /* COMPAT_43 */
1145
1146 int
1147 sys_sigwait(struct thread *td, struct sigwait_args *uap)
1148 {
1149 ksiginfo_t ksi;
1150 sigset_t set;
1151 int error;
1152
1153 error = copyin(uap->set, &set, sizeof(set));
1154 if (error) {
1155 td->td_retval[0] = error;
1156 return (0);
1157 }
1158
1159 error = kern_sigtimedwait(td, set, &ksi, NULL);
1160 if (error) {
1161 if (error == EINTR && td->td_proc->p_osrel < P_OSREL_SIGWAIT)
1162 error = ERESTART;
1163 if (error == ERESTART)
1164 return (error);
1165 td->td_retval[0] = error;
1166 return (0);
1167 }
1168
1169 error = copyout(&ksi.ksi_signo, uap->sig, sizeof(ksi.ksi_signo));
1170 td->td_retval[0] = error;
1171 return (0);
1172 }
1173
1174 int
1175 sys_sigtimedwait(struct thread *td, struct sigtimedwait_args *uap)
1176 {
1177 struct timespec ts;
1178 struct timespec *timeout;
1179 sigset_t set;
1180 ksiginfo_t ksi;
1181 int error;
1182
1183 if (uap->timeout) {
1184 error = copyin(uap->timeout, &ts, sizeof(ts));
1185 if (error)
1186 return (error);
1187
1188 timeout = &ts;
1189 } else
1190 timeout = NULL;
1191
1192 error = copyin(uap->set, &set, sizeof(set));
1193 if (error)
1194 return (error);
1195
1196 error = kern_sigtimedwait(td, set, &ksi, timeout);
1197 if (error)
1198 return (error);
1199
1200 if (uap->info)
1201 error = copyout(&ksi.ksi_info, uap->info, sizeof(siginfo_t));
1202
1203 if (error == 0)
1204 td->td_retval[0] = ksi.ksi_signo;
1205 return (error);
1206 }
1207
1208 int
1209 sys_sigwaitinfo(struct thread *td, struct sigwaitinfo_args *uap)
1210 {
1211 ksiginfo_t ksi;
1212 sigset_t set;
1213 int error;
1214
1215 error = copyin(uap->set, &set, sizeof(set));
1216 if (error)
1217 return (error);
1218
1219 error = kern_sigtimedwait(td, set, &ksi, NULL);
1220 if (error)
1221 return (error);
1222
1223 if (uap->info)
1224 error = copyout(&ksi.ksi_info, uap->info, sizeof(siginfo_t));
1225
1226 if (error == 0)
1227 td->td_retval[0] = ksi.ksi_signo;
1228 return (error);
1229 }
1230
1231 static void
1232 proc_td_siginfo_capture(struct thread *td, siginfo_t *si)
1233 {
1234 struct thread *thr;
1235
1236 FOREACH_THREAD_IN_PROC(td->td_proc, thr) {
1237 if (thr == td)
1238 thr->td_si = *si;
1239 else
1240 thr->td_si.si_signo = 0;
1241 }
1242 }
1243
1244 int
1245 kern_sigtimedwait(struct thread *td, sigset_t waitset, ksiginfo_t *ksi,
1246 struct timespec *timeout)
1247 {
1248 struct sigacts *ps;
1249 sigset_t saved_mask, new_block;
1250 struct proc *p;
1251 int error, sig, timo, timevalid = 0;
1252 struct timespec rts, ets, ts;
1253 struct timeval tv;
1254
1255 p = td->td_proc;
1256 error = 0;
1257 ets.tv_sec = 0;
1258 ets.tv_nsec = 0;
1259
1260 if (timeout != NULL) {
1261 if (timeout->tv_nsec >= 0 && timeout->tv_nsec < 1000000000) {
1262 timevalid = 1;
1263 getnanouptime(&rts);
1264 timespecadd(&rts, timeout, &ets);
1265 }
1266 }
1267 ksiginfo_init(ksi);
1268 /* Some signals can not be waited for. */
1269 SIG_CANTMASK(waitset);
1270 ps = p->p_sigacts;
1271 PROC_LOCK(p);
1272 saved_mask = td->td_sigmask;
1273 SIGSETNAND(td->td_sigmask, waitset);
1274 for (;;) {
1275 mtx_lock(&ps->ps_mtx);
1276 sig = cursig(td);
1277 mtx_unlock(&ps->ps_mtx);
1278 KASSERT(sig >= 0, ("sig %d", sig));
1279 if (sig != 0 && SIGISMEMBER(waitset, sig)) {
1280 if (sigqueue_get(&td->td_sigqueue, sig, ksi) != 0 ||
1281 sigqueue_get(&p->p_sigqueue, sig, ksi) != 0) {
1282 error = 0;
1283 break;
1284 }
1285 }
1286
1287 if (error != 0)
1288 break;
1289
1290 /*
1291 * POSIX says this must be checked after looking for pending
1292 * signals.
1293 */
1294 if (timeout != NULL) {
1295 if (!timevalid) {
1296 error = EINVAL;
1297 break;
1298 }
1299 getnanouptime(&rts);
1300 if (timespeccmp(&rts, &ets, >=)) {
1301 error = EAGAIN;
1302 break;
1303 }
1304 timespecsub(&ets, &rts, &ts);
1305 TIMESPEC_TO_TIMEVAL(&tv, &ts);
1306 timo = tvtohz(&tv);
1307 } else {
1308 timo = 0;
1309 }
1310
1311 error = msleep(ps, &p->p_mtx, PPAUSE|PCATCH, "sigwait", timo);
1312
1313 if (timeout != NULL) {
1314 if (error == ERESTART) {
1315 /* Timeout can not be restarted. */
1316 error = EINTR;
1317 } else if (error == EAGAIN) {
1318 /* We will calculate timeout by ourself. */
1319 error = 0;
1320 }
1321 }
1322 }
1323
1324 new_block = saved_mask;
1325 SIGSETNAND(new_block, td->td_sigmask);
1326 td->td_sigmask = saved_mask;
1327 /*
1328 * Fewer signals can be delivered to us, reschedule signal
1329 * notification.
1330 */
1331 if (p->p_numthreads != 1)
1332 reschedule_signals(p, new_block, 0);
1333
1334 if (error == 0) {
1335 SDT_PROBE2(proc, , , signal__clear, sig, ksi);
1336
1337 if (ksi->ksi_code == SI_TIMER)
1338 itimer_accept(p, ksi->ksi_timerid, ksi);
1339
1340 #ifdef KTRACE
1341 if (KTRPOINT(td, KTR_PSIG)) {
1342 sig_t action;
1343
1344 mtx_lock(&ps->ps_mtx);
1345 action = ps->ps_sigact[_SIG_IDX(sig)];
1346 mtx_unlock(&ps->ps_mtx);
1347 ktrpsig(sig, action, &td->td_sigmask, ksi->ksi_code);
1348 }
1349 #endif
1350 if (sig == SIGKILL) {
1351 proc_td_siginfo_capture(td, &ksi->ksi_info);
1352 sigexit(td, sig);
1353 }
1354 }
1355 PROC_UNLOCK(p);
1356 return (error);
1357 }
1358
1359 #ifndef _SYS_SYSPROTO_H_
1360 struct sigpending_args {
1361 sigset_t *set;
1362 };
1363 #endif
1364 int
1365 sys_sigpending(struct thread *td, struct sigpending_args *uap)
1366 {
1367 struct proc *p = td->td_proc;
1368 sigset_t pending;
1369
1370 PROC_LOCK(p);
1371 pending = p->p_sigqueue.sq_signals;
1372 SIGSETOR(pending, td->td_sigqueue.sq_signals);
1373 PROC_UNLOCK(p);
1374 return (copyout(&pending, uap->set, sizeof(sigset_t)));
1375 }
1376
1377 #ifdef COMPAT_43 /* XXX - COMPAT_FBSD3 */
1378 #ifndef _SYS_SYSPROTO_H_
1379 struct osigpending_args {
1380 int dummy;
1381 };
1382 #endif
1383 int
1384 osigpending(struct thread *td, struct osigpending_args *uap)
1385 {
1386 struct proc *p = td->td_proc;
1387 sigset_t pending;
1388
1389 PROC_LOCK(p);
1390 pending = p->p_sigqueue.sq_signals;
1391 SIGSETOR(pending, td->td_sigqueue.sq_signals);
1392 PROC_UNLOCK(p);
1393 SIG2OSIG(pending, td->td_retval[0]);
1394 return (0);
1395 }
1396 #endif /* COMPAT_43 */
1397
1398 #if defined(COMPAT_43)
1399 /*
1400 * Generalized interface signal handler, 4.3-compatible.
1401 */
1402 #ifndef _SYS_SYSPROTO_H_
1403 struct osigvec_args {
1404 int signum;
1405 struct sigvec *nsv;
1406 struct sigvec *osv;
1407 };
1408 #endif
1409 /* ARGSUSED */
1410 int
1411 osigvec(struct thread *td, struct osigvec_args *uap)
1412 {
1413 struct sigvec vec;
1414 struct sigaction nsa, osa;
1415 struct sigaction *nsap, *osap;
1416 int error;
1417
1418 if (uap->signum <= 0 || uap->signum >= ONSIG)
1419 return (EINVAL);
1420 nsap = (uap->nsv != NULL) ? &nsa : NULL;
1421 osap = (uap->osv != NULL) ? &osa : NULL;
1422 if (nsap) {
1423 error = copyin(uap->nsv, &vec, sizeof(vec));
1424 if (error)
1425 return (error);
1426 nsap->sa_handler = vec.sv_handler;
1427 OSIG2SIG(vec.sv_mask, nsap->sa_mask);
1428 nsap->sa_flags = vec.sv_flags;
1429 nsap->sa_flags ^= SA_RESTART; /* opposite of SV_INTERRUPT */
1430 }
1431 error = kern_sigaction(td, uap->signum, nsap, osap, KSA_OSIGSET);
1432 if (osap && !error) {
1433 vec.sv_handler = osap->sa_handler;
1434 SIG2OSIG(osap->sa_mask, vec.sv_mask);
1435 vec.sv_flags = osap->sa_flags;
1436 vec.sv_flags &= ~SA_NOCLDWAIT;
1437 vec.sv_flags ^= SA_RESTART;
1438 error = copyout(&vec, uap->osv, sizeof(vec));
1439 }
1440 return (error);
1441 }
1442
1443 #ifndef _SYS_SYSPROTO_H_
1444 struct osigblock_args {
1445 int mask;
1446 };
1447 #endif
1448 int
1449 osigblock(struct thread *td, struct osigblock_args *uap)
1450 {
1451 sigset_t set, oset;
1452
1453 OSIG2SIG(uap->mask, set);
1454 kern_sigprocmask(td, SIG_BLOCK, &set, &oset, 0);
1455 SIG2OSIG(oset, td->td_retval[0]);
1456 return (0);
1457 }
1458
1459 #ifndef _SYS_SYSPROTO_H_
1460 struct osigsetmask_args {
1461 int mask;
1462 };
1463 #endif
1464 int
1465 osigsetmask(struct thread *td, struct osigsetmask_args *uap)
1466 {
1467 sigset_t set, oset;
1468
1469 OSIG2SIG(uap->mask, set);
1470 kern_sigprocmask(td, SIG_SETMASK, &set, &oset, 0);
1471 SIG2OSIG(oset, td->td_retval[0]);
1472 return (0);
1473 }
1474 #endif /* COMPAT_43 */
1475
1476 /*
1477 * Suspend calling thread until signal, providing mask to be set in the
1478 * meantime.
1479 */
1480 #ifndef _SYS_SYSPROTO_H_
1481 struct sigsuspend_args {
1482 const sigset_t *sigmask;
1483 };
1484 #endif
1485 /* ARGSUSED */
1486 int
1487 sys_sigsuspend(struct thread *td, struct sigsuspend_args *uap)
1488 {
1489 sigset_t mask;
1490 int error;
1491
1492 error = copyin(uap->sigmask, &mask, sizeof(mask));
1493 if (error)
1494 return (error);
1495 return (kern_sigsuspend(td, mask));
1496 }
1497
1498 int
1499 kern_sigsuspend(struct thread *td, sigset_t mask)
1500 {
1501 struct proc *p = td->td_proc;
1502 int has_sig, sig;
1503
1504 /*
1505 * When returning from sigsuspend, we want
1506 * the old mask to be restored after the
1507 * signal handler has finished. Thus, we
1508 * save it here and mark the sigacts structure
1509 * to indicate this.
1510 */
1511 PROC_LOCK(p);
1512 kern_sigprocmask(td, SIG_SETMASK, &mask, &td->td_oldsigmask,
1513 SIGPROCMASK_PROC_LOCKED);
1514 td->td_pflags |= TDP_OLDMASK;
1515
1516 /*
1517 * Process signals now. Otherwise, we can get spurious wakeup
1518 * due to signal entered process queue, but delivered to other
1519 * thread. But sigsuspend should return only on signal
1520 * delivery.
1521 */
1522 (p->p_sysent->sv_set_syscall_retval)(td, EINTR);
1523 for (has_sig = 0; !has_sig;) {
1524 while (msleep(&p->p_sigacts, &p->p_mtx, PPAUSE|PCATCH, "pause",
1525 0) == 0)
1526 /* void */;
1527 thread_suspend_check(0);
1528 mtx_lock(&p->p_sigacts->ps_mtx);
1529 while ((sig = cursig(td)) != 0) {
1530 KASSERT(sig >= 0, ("sig %d", sig));
1531 has_sig += postsig(sig);
1532 }
1533 mtx_unlock(&p->p_sigacts->ps_mtx);
1534 }
1535 PROC_UNLOCK(p);
1536 td->td_errno = EINTR;
1537 td->td_pflags |= TDP_NERRNO;
1538 return (EJUSTRETURN);
1539 }
1540
1541 #ifdef COMPAT_43 /* XXX - COMPAT_FBSD3 */
1542 /*
1543 * Compatibility sigsuspend call for old binaries. Note nonstandard calling
1544 * convention: libc stub passes mask, not pointer, to save a copyin.
1545 */
1546 #ifndef _SYS_SYSPROTO_H_
1547 struct osigsuspend_args {
1548 osigset_t mask;
1549 };
1550 #endif
1551 /* ARGSUSED */
1552 int
1553 osigsuspend(struct thread *td, struct osigsuspend_args *uap)
1554 {
1555 sigset_t mask;
1556
1557 OSIG2SIG(uap->mask, mask);
1558 return (kern_sigsuspend(td, mask));
1559 }
1560 #endif /* COMPAT_43 */
1561
1562 #if defined(COMPAT_43)
1563 #ifndef _SYS_SYSPROTO_H_
1564 struct osigstack_args {
1565 struct sigstack *nss;
1566 struct sigstack *oss;
1567 };
1568 #endif
1569 /* ARGSUSED */
1570 int
1571 osigstack(struct thread *td, struct osigstack_args *uap)
1572 {
1573 struct sigstack nss, oss;
1574 int error = 0;
1575
1576 if (uap->nss != NULL) {
1577 error = copyin(uap->nss, &nss, sizeof(nss));
1578 if (error)
1579 return (error);
1580 }
1581 oss.ss_sp = td->td_sigstk.ss_sp;
1582 oss.ss_onstack = sigonstack(cpu_getstack(td));
1583 if (uap->nss != NULL) {
1584 td->td_sigstk.ss_sp = nss.ss_sp;
1585 td->td_sigstk.ss_size = 0;
1586 td->td_sigstk.ss_flags |= nss.ss_onstack & SS_ONSTACK;
1587 td->td_pflags |= TDP_ALTSTACK;
1588 }
1589 if (uap->oss != NULL)
1590 error = copyout(&oss, uap->oss, sizeof(oss));
1591
1592 return (error);
1593 }
1594 #endif /* COMPAT_43 */
1595
1596 #ifndef _SYS_SYSPROTO_H_
1597 struct sigaltstack_args {
1598 stack_t *ss;
1599 stack_t *oss;
1600 };
1601 #endif
1602 /* ARGSUSED */
1603 int
1604 sys_sigaltstack(struct thread *td, struct sigaltstack_args *uap)
1605 {
1606 stack_t ss, oss;
1607 int error;
1608
1609 if (uap->ss != NULL) {
1610 error = copyin(uap->ss, &ss, sizeof(ss));
1611 if (error)
1612 return (error);
1613 }
1614 error = kern_sigaltstack(td, (uap->ss != NULL) ? &ss : NULL,
1615 (uap->oss != NULL) ? &oss : NULL);
1616 if (error)
1617 return (error);
1618 if (uap->oss != NULL)
1619 error = copyout(&oss, uap->oss, sizeof(stack_t));
1620 return (error);
1621 }
1622
1623 int
1624 kern_sigaltstack(struct thread *td, stack_t *ss, stack_t *oss)
1625 {
1626 struct proc *p = td->td_proc;
1627 int oonstack;
1628
1629 oonstack = sigonstack(cpu_getstack(td));
1630
1631 if (oss != NULL) {
1632 *oss = td->td_sigstk;
1633 oss->ss_flags = (td->td_pflags & TDP_ALTSTACK)
1634 ? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE;
1635 }
1636
1637 if (ss != NULL) {
1638 if (oonstack)
1639 return (EPERM);
1640 if ((ss->ss_flags & ~SS_DISABLE) != 0)
1641 return (EINVAL);
1642 if (!(ss->ss_flags & SS_DISABLE)) {
1643 if (ss->ss_size < p->p_sysent->sv_minsigstksz)
1644 return (ENOMEM);
1645
1646 td->td_sigstk = *ss;
1647 td->td_pflags |= TDP_ALTSTACK;
1648 } else {
1649 td->td_pflags &= ~TDP_ALTSTACK;
1650 }
1651 }
1652 return (0);
1653 }
1654
1655 /*
1656 * Common code for kill process group/broadcast kill.
1657 * cp is calling process.
1658 */
1659 static int
1660 killpg1(struct thread *td, int sig, int pgid, int all, ksiginfo_t *ksi)
1661 {
1662 struct proc *p;
1663 struct pgrp *pgrp;
1664 int err;
1665 int ret;
1666
1667 ret = ESRCH;
1668 if (all) {
1669 /*
1670 * broadcast
1671 */
1672 sx_slock(&allproc_lock);
1673 FOREACH_PROC_IN_SYSTEM(p) {
1674 if (p->p_pid <= 1 || p->p_flag & P_SYSTEM ||
1675 p == td->td_proc || p->p_state == PRS_NEW) {
1676 continue;
1677 }
1678 PROC_LOCK(p);
1679 err = p_cansignal(td, p, sig);
1680 if (err == 0) {
1681 if (sig)
1682 pksignal(p, sig, ksi);
1683 ret = err;
1684 }
1685 else if (ret == ESRCH)
1686 ret = err;
1687 PROC_UNLOCK(p);
1688 }
1689 sx_sunlock(&allproc_lock);
1690 } else {
1691 sx_slock(&proctree_lock);
1692 if (pgid == 0) {
1693 /*
1694 * zero pgid means send to my process group.
1695 */
1696 pgrp = td->td_proc->p_pgrp;
1697 PGRP_LOCK(pgrp);
1698 } else {
1699 pgrp = pgfind(pgid);
1700 if (pgrp == NULL) {
1701 sx_sunlock(&proctree_lock);
1702 return (ESRCH);
1703 }
1704 }
1705 sx_sunlock(&proctree_lock);
1706 LIST_FOREACH(p, &pgrp->pg_members, p_pglist) {
1707 PROC_LOCK(p);
1708 if (p->p_pid <= 1 || p->p_flag & P_SYSTEM ||
1709 p->p_state == PRS_NEW) {
1710 PROC_UNLOCK(p);
1711 continue;
1712 }
1713 err = p_cansignal(td, p, sig);
1714 if (err == 0) {
1715 if (sig)
1716 pksignal(p, sig, ksi);
1717 ret = err;
1718 }
1719 else if (ret == ESRCH)
1720 ret = err;
1721 PROC_UNLOCK(p);
1722 }
1723 PGRP_UNLOCK(pgrp);
1724 }
1725 return (ret);
1726 }
1727
1728 #ifndef _SYS_SYSPROTO_H_
1729 struct kill_args {
1730 int pid;
1731 int signum;
1732 };
1733 #endif
1734 /* ARGSUSED */
1735 int
1736 sys_kill(struct thread *td, struct kill_args *uap)
1737 {
1738 ksiginfo_t ksi;
1739 struct proc *p;
1740 int error;
1741
1742 /*
1743 * A process in capability mode can send signals only to himself.
1744 * The main rationale behind this is that abort(3) is implemented as
1745 * kill(getpid(), SIGABRT).
1746 */
1747 if (IN_CAPABILITY_MODE(td) && uap->pid != td->td_proc->p_pid)
1748 return (ECAPMODE);
1749
1750 AUDIT_ARG_SIGNUM(uap->signum);
1751 AUDIT_ARG_PID(uap->pid);
1752 if ((u_int)uap->signum > _SIG_MAXSIG)
1753 return (EINVAL);
1754
1755 ksiginfo_init(&ksi);
1756 ksi.ksi_signo = uap->signum;
1757 ksi.ksi_code = SI_USER;
1758 ksi.ksi_pid = td->td_proc->p_pid;
1759 ksi.ksi_uid = td->td_ucred->cr_ruid;
1760
1761 if (uap->pid > 0) {
1762 /* kill single process */
1763 if ((p = pfind_any(uap->pid)) == NULL)
1764 return (ESRCH);
1765 AUDIT_ARG_PROCESS(p);
1766 error = p_cansignal(td, p, uap->signum);
1767 if (error == 0 && uap->signum)
1768 pksignal(p, uap->signum, &ksi);
1769 PROC_UNLOCK(p);
1770 return (error);
1771 }
1772 switch (uap->pid) {
1773 case -1: /* broadcast signal */
1774 return (killpg1(td, uap->signum, 0, 1, &ksi));
1775 case 0: /* signal own process group */
1776 return (killpg1(td, uap->signum, 0, 0, &ksi));
1777 default: /* negative explicit process group */
1778 return (killpg1(td, uap->signum, -uap->pid, 0, &ksi));
1779 }
1780 /* NOTREACHED */
1781 }
1782
1783 int
1784 sys_pdkill(struct thread *td, struct pdkill_args *uap)
1785 {
1786 struct proc *p;
1787 int error;
1788
1789 AUDIT_ARG_SIGNUM(uap->signum);
1790 AUDIT_ARG_FD(uap->fd);
1791 if ((u_int)uap->signum > _SIG_MAXSIG)
1792 return (EINVAL);
1793
1794 error = procdesc_find(td, uap->fd, &cap_pdkill_rights, &p);
1795 if (error)
1796 return (error);
1797 AUDIT_ARG_PROCESS(p);
1798 error = p_cansignal(td, p, uap->signum);
1799 if (error == 0 && uap->signum)
1800 kern_psignal(p, uap->signum);
1801 PROC_UNLOCK(p);
1802 return (error);
1803 }
1804
1805 #if defined(COMPAT_43)
1806 #ifndef _SYS_SYSPROTO_H_
1807 struct okillpg_args {
1808 int pgid;
1809 int signum;
1810 };
1811 #endif
1812 /* ARGSUSED */
1813 int
1814 okillpg(struct thread *td, struct okillpg_args *uap)
1815 {
1816 ksiginfo_t ksi;
1817
1818 AUDIT_ARG_SIGNUM(uap->signum);
1819 AUDIT_ARG_PID(uap->pgid);
1820 if ((u_int)uap->signum > _SIG_MAXSIG)
1821 return (EINVAL);
1822
1823 ksiginfo_init(&ksi);
1824 ksi.ksi_signo = uap->signum;
1825 ksi.ksi_code = SI_USER;
1826 ksi.ksi_pid = td->td_proc->p_pid;
1827 ksi.ksi_uid = td->td_ucred->cr_ruid;
1828 return (killpg1(td, uap->signum, uap->pgid, 0, &ksi));
1829 }
1830 #endif /* COMPAT_43 */
1831
1832 #ifndef _SYS_SYSPROTO_H_
1833 struct sigqueue_args {
1834 pid_t pid;
1835 int signum;
1836 /* union sigval */ void *value;
1837 };
1838 #endif
1839 int
1840 sys_sigqueue(struct thread *td, struct sigqueue_args *uap)
1841 {
1842 union sigval sv;
1843
1844 sv.sival_ptr = uap->value;
1845
1846 return (kern_sigqueue(td, uap->pid, uap->signum, &sv));
1847 }
1848
1849 int
1850 kern_sigqueue(struct thread *td, pid_t pid, int signum, union sigval *value)
1851 {
1852 ksiginfo_t ksi;
1853 struct proc *p;
1854 int error;
1855
1856 if ((u_int)signum > _SIG_MAXSIG)
1857 return (EINVAL);
1858
1859 /*
1860 * Specification says sigqueue can only send signal to
1861 * single process.
1862 */
1863 if (pid <= 0)
1864 return (EINVAL);
1865
1866 if ((p = pfind_any(pid)) == NULL)
1867 return (ESRCH);
1868 error = p_cansignal(td, p, signum);
1869 if (error == 0 && signum != 0) {
1870 ksiginfo_init(&ksi);
1871 ksi.ksi_flags = KSI_SIGQ;
1872 ksi.ksi_signo = signum;
1873 ksi.ksi_code = SI_QUEUE;
1874 ksi.ksi_pid = td->td_proc->p_pid;
1875 ksi.ksi_uid = td->td_ucred->cr_ruid;
1876 ksi.ksi_value = *value;
1877 error = pksignal(p, ksi.ksi_signo, &ksi);
1878 }
1879 PROC_UNLOCK(p);
1880 return (error);
1881 }
1882
1883 /*
1884 * Send a signal to a process group.
1885 */
1886 void
1887 gsignal(int pgid, int sig, ksiginfo_t *ksi)
1888 {
1889 struct pgrp *pgrp;
1890
1891 if (pgid != 0) {
1892 sx_slock(&proctree_lock);
1893 pgrp = pgfind(pgid);
1894 sx_sunlock(&proctree_lock);
1895 if (pgrp != NULL) {
1896 pgsignal(pgrp, sig, 0, ksi);
1897 PGRP_UNLOCK(pgrp);
1898 }
1899 }
1900 }
1901
1902 /*
1903 * Send a signal to a process group. If checktty is 1,
1904 * limit to members which have a controlling terminal.
1905 */
1906 void
1907 pgsignal(struct pgrp *pgrp, int sig, int checkctty, ksiginfo_t *ksi)
1908 {
1909 struct proc *p;
1910
1911 if (pgrp) {
1912 PGRP_LOCK_ASSERT(pgrp, MA_OWNED);
1913 LIST_FOREACH(p, &pgrp->pg_members, p_pglist) {
1914 PROC_LOCK(p);
1915 if (p->p_state == PRS_NORMAL &&
1916 (checkctty == 0 || p->p_flag & P_CONTROLT))
1917 pksignal(p, sig, ksi);
1918 PROC_UNLOCK(p);
1919 }
1920 }
1921 }
1922
1923
1924 /*
1925 * Recalculate the signal mask and reset the signal disposition after
1926 * usermode frame for delivery is formed. Should be called after
1927 * mach-specific routine, because sysent->sv_sendsig() needs correct
1928 * ps_siginfo and signal mask.
1929 */
1930 static void
1931 postsig_done(int sig, struct thread *td, struct sigacts *ps)
1932 {
1933 sigset_t mask;
1934
1935 mtx_assert(&ps->ps_mtx, MA_OWNED);
1936 td->td_ru.ru_nsignals++;
1937 mask = ps->ps_catchmask[_SIG_IDX(sig)];
1938 if (!SIGISMEMBER(ps->ps_signodefer, sig))
1939 SIGADDSET(mask, sig);
1940 kern_sigprocmask(td, SIG_BLOCK, &mask, NULL,
1941 SIGPROCMASK_PROC_LOCKED | SIGPROCMASK_PS_LOCKED);
1942 if (SIGISMEMBER(ps->ps_sigreset, sig))
1943 sigdflt(ps, sig);
1944 }
1945
1946
1947 /*
1948 * Send a signal caused by a trap to the current thread. If it will be
1949 * caught immediately, deliver it with correct code. Otherwise, post it
1950 * normally.
1951 */
1952 void
1953 trapsignal(struct thread *td, ksiginfo_t *ksi)
1954 {
1955 struct sigacts *ps;
1956 struct proc *p;
1957 int sig;
1958 int code;
1959
1960 p = td->td_proc;
1961 sig = ksi->ksi_signo;
1962 code = ksi->ksi_code;
1963 KASSERT(_SIG_VALID(sig), ("invalid signal"));
1964
1965 PROC_LOCK(p);
1966 ps = p->p_sigacts;
1967 mtx_lock(&ps->ps_mtx);
1968 if ((p->p_flag & P_TRACED) == 0 && SIGISMEMBER(ps->ps_sigcatch, sig) &&
1969 !SIGISMEMBER(td->td_sigmask, sig)) {
1970 #ifdef KTRACE
1971 if (KTRPOINT(curthread, KTR_PSIG))
1972 ktrpsig(sig, ps->ps_sigact[_SIG_IDX(sig)],
1973 &td->td_sigmask, code);
1974 #endif
1975 (*p->p_sysent->sv_sendsig)(ps->ps_sigact[_SIG_IDX(sig)],
1976 ksi, &td->td_sigmask);
1977 postsig_done(sig, td, ps);
1978 mtx_unlock(&ps->ps_mtx);
1979 } else {
1980 /*
1981 * Avoid a possible infinite loop if the thread
1982 * masking the signal or process is ignoring the
1983 * signal.
1984 */
1985 if (kern_forcesigexit &&
1986 (SIGISMEMBER(td->td_sigmask, sig) ||
1987 ps->ps_sigact[_SIG_IDX(sig)] == SIG_IGN)) {
1988 SIGDELSET(td->td_sigmask, sig);
1989 SIGDELSET(ps->ps_sigcatch, sig);
1990 SIGDELSET(ps->ps_sigignore, sig);
1991 ps->ps_sigact[_SIG_IDX(sig)] = SIG_DFL;
1992 }
1993 mtx_unlock(&ps->ps_mtx);
1994 p->p_code = code; /* XXX for core dump/debugger */
1995 p->p_sig = sig; /* XXX to verify code */
1996 tdsendsignal(p, td, sig, ksi);
1997 }
1998 PROC_UNLOCK(p);
1999 }
2000
2001 static struct thread *
2002 sigtd(struct proc *p, int sig, int prop)
2003 {
2004 struct thread *td, *signal_td;
2005
2006 PROC_LOCK_ASSERT(p, MA_OWNED);
2007
2008 /*
2009 * Check if current thread can handle the signal without
2010 * switching context to another thread.
2011 */
2012 if (curproc == p && !SIGISMEMBER(curthread->td_sigmask, sig))
2013 return (curthread);
2014 signal_td = NULL;
2015 FOREACH_THREAD_IN_PROC(p, td) {
2016 if (!SIGISMEMBER(td->td_sigmask, sig)) {
2017 signal_td = td;
2018 break;
2019 }
2020 }
2021 if (signal_td == NULL)
2022 signal_td = FIRST_THREAD_IN_PROC(p);
2023 return (signal_td);
2024 }
2025
2026 /*
2027 * Send the signal to the process. If the signal has an action, the action
2028 * is usually performed by the target process rather than the caller; we add
2029 * the signal to the set of pending signals for the process.
2030 *
2031 * Exceptions:
2032 * o When a stop signal is sent to a sleeping process that takes the
2033 * default action, the process is stopped without awakening it.
2034 * o SIGCONT restarts stopped processes (or puts them back to sleep)
2035 * regardless of the signal action (eg, blocked or ignored).
2036 *
2037 * Other ignored signals are discarded immediately.
2038 *
2039 * NB: This function may be entered from the debugger via the "kill" DDB
2040 * command. There is little that can be done to mitigate the possibly messy
2041 * side effects of this unwise possibility.
2042 */
2043 void
2044 kern_psignal(struct proc *p, int sig)
2045 {
2046 ksiginfo_t ksi;
2047
2048 ksiginfo_init(&ksi);
2049 ksi.ksi_signo = sig;
2050 ksi.ksi_code = SI_KERNEL;
2051 (void) tdsendsignal(p, NULL, sig, &ksi);
2052 }
2053
2054 int
2055 pksignal(struct proc *p, int sig, ksiginfo_t *ksi)
2056 {
2057
2058 return (tdsendsignal(p, NULL, sig, ksi));
2059 }
2060
2061 /* Utility function for finding a thread to send signal event to. */
2062 int
2063 sigev_findtd(struct proc *p ,struct sigevent *sigev, struct thread **ttd)
2064 {
2065 struct thread *td;
2066
2067 if (sigev->sigev_notify == SIGEV_THREAD_ID) {
2068 td = tdfind(sigev->sigev_notify_thread_id, p->p_pid);
2069 if (td == NULL)
2070 return (ESRCH);
2071 *ttd = td;
2072 } else {
2073 *ttd = NULL;
2074 PROC_LOCK(p);
2075 }
2076 return (0);
2077 }
2078
2079 void
2080 tdsignal(struct thread *td, int sig)
2081 {
2082 ksiginfo_t ksi;
2083
2084 ksiginfo_init(&ksi);
2085 ksi.ksi_signo = sig;
2086 ksi.ksi_code = SI_KERNEL;
2087 (void) tdsendsignal(td->td_proc, td, sig, &ksi);
2088 }
2089
2090 void
2091 tdksignal(struct thread *td, int sig, ksiginfo_t *ksi)
2092 {
2093
2094 (void) tdsendsignal(td->td_proc, td, sig, ksi);
2095 }
2096
2097 int
2098 tdsendsignal(struct proc *p, struct thread *td, int sig, ksiginfo_t *ksi)
2099 {
2100 sig_t action;
2101 sigqueue_t *sigqueue;
2102 int prop;
2103 struct sigacts *ps;
2104 int intrval;
2105 int ret = 0;
2106 int wakeup_swapper;
2107
2108 MPASS(td == NULL || p == td->td_proc);
2109 PROC_LOCK_ASSERT(p, MA_OWNED);
2110
2111 if (!_SIG_VALID(sig))
2112 panic("%s(): invalid signal %d", __func__, sig);
2113
2114 KASSERT(ksi == NULL || !KSI_ONQ(ksi), ("%s: ksi on queue", __func__));
2115
2116 /*
2117 * IEEE Std 1003.1-2001: return success when killing a zombie.
2118 */
2119 if (p->p_state == PRS_ZOMBIE) {
2120 if (ksi && (ksi->ksi_flags & KSI_INS))
2121 ksiginfo_tryfree(ksi);
2122 return (ret);
2123 }
2124
2125 ps = p->p_sigacts;
2126 KNOTE_LOCKED(p->p_klist, NOTE_SIGNAL | sig);
2127 prop = sigprop(sig);
2128
2129 if (td == NULL) {
2130 td = sigtd(p, sig, prop);
2131 sigqueue = &p->p_sigqueue;
2132 } else
2133 sigqueue = &td->td_sigqueue;
2134
2135 SDT_PROBE3(proc, , , signal__send, td, p, sig);
2136
2137 /*
2138 * If the signal is being ignored,
2139 * then we forget about it immediately.
2140 * (Note: we don't set SIGCONT in ps_sigignore,
2141 * and if it is set to SIG_IGN,
2142 * action will be SIG_DFL here.)
2143 */
2144 mtx_lock(&ps->ps_mtx);
2145 if (SIGISMEMBER(ps->ps_sigignore, sig)) {
2146 SDT_PROBE3(proc, , , signal__discard, td, p, sig);
2147
2148 mtx_unlock(&ps->ps_mtx);
2149 if (ksi && (ksi->ksi_flags & KSI_INS))
2150 ksiginfo_tryfree(ksi);
2151 return (ret);
2152 }
2153 if (SIGISMEMBER(td->td_sigmask, sig))
2154 action = SIG_HOLD;
2155 else if (SIGISMEMBER(ps->ps_sigcatch, sig))
2156 action = SIG_CATCH;
2157 else
2158 action = SIG_DFL;
2159 if (SIGISMEMBER(ps->ps_sigintr, sig))
2160 intrval = EINTR;
2161 else
2162 intrval = ERESTART;
2163 mtx_unlock(&ps->ps_mtx);
2164
2165 if (prop & SIGPROP_CONT)
2166 sigqueue_delete_stopmask_proc(p);
2167 else if (prop & SIGPROP_STOP) {
2168 /*
2169 * If sending a tty stop signal to a member of an orphaned
2170 * process group, discard the signal here if the action
2171 * is default; don't stop the process below if sleeping,
2172 * and don't clear any pending SIGCONT.
2173 */
2174 if ((prop & SIGPROP_TTYSTOP) &&
2175 (p->p_pgrp->pg_jobc == 0) &&
2176 (action == SIG_DFL)) {
2177 if (ksi && (ksi->ksi_flags & KSI_INS))
2178 ksiginfo_tryfree(ksi);
2179 return (ret);
2180 }
2181 sigqueue_delete_proc(p, SIGCONT);
2182 if (p->p_flag & P_CONTINUED) {
2183 p->p_flag &= ~P_CONTINUED;
2184 PROC_LOCK(p->p_pptr);
2185 sigqueue_take(p->p_ksi);
2186 PROC_UNLOCK(p->p_pptr);
2187 }
2188 }
2189
2190 ret = sigqueue_add(sigqueue, sig, ksi);
2191 if (ret != 0)
2192 return (ret);
2193 signotify(td);
2194 /*
2195 * Defer further processing for signals which are held,
2196 * except that stopped processes must be continued by SIGCONT.
2197 */
2198 if (action == SIG_HOLD &&
2199 !((prop & SIGPROP_CONT) && (p->p_flag & P_STOPPED_SIG)))
2200 return (ret);
2201
2202 /* SIGKILL: Remove procfs STOPEVENTs. */
2203 if (sig == SIGKILL) {
2204 /* from procfs_ioctl.c: PIOCBIC */
2205 p->p_stops = 0;
2206 /* from procfs_ioctl.c: PIOCCONT */
2207 p->p_step = 0;
2208 wakeup(&p->p_step);
2209 }
2210 /*
2211 * Some signals have a process-wide effect and a per-thread
2212 * component. Most processing occurs when the process next
2213 * tries to cross the user boundary, however there are some
2214 * times when processing needs to be done immediately, such as
2215 * waking up threads so that they can cross the user boundary.
2216 * We try to do the per-process part here.
2217 */
2218 if (P_SHOULDSTOP(p)) {
2219 KASSERT(!(p->p_flag & P_WEXIT),
2220 ("signal to stopped but exiting process"));
2221 if (sig == SIGKILL) {
2222 /*
2223 * If traced process is already stopped,
2224 * then no further action is necessary.
2225 */
2226 if (p->p_flag & P_TRACED)
2227 goto out;
2228 /*
2229 * SIGKILL sets process running.
2230 * It will die elsewhere.
2231 * All threads must be restarted.
2232 */
2233 p->p_flag &= ~P_STOPPED_SIG;
2234 goto runfast;
2235 }
2236
2237 if (prop & SIGPROP_CONT) {
2238 /*
2239 * If traced process is already stopped,
2240 * then no further action is necessary.
2241 */
2242 if (p->p_flag & P_TRACED)
2243 goto out;
2244 /*
2245 * If SIGCONT is default (or ignored), we continue the
2246 * process but don't leave the signal in sigqueue as
2247 * it has no further action. If SIGCONT is held, we
2248 * continue the process and leave the signal in
2249 * sigqueue. If the process catches SIGCONT, let it
2250 * handle the signal itself. If it isn't waiting on
2251 * an event, it goes back to run state.
2252 * Otherwise, process goes back to sleep state.
2253 */
2254 p->p_flag &= ~P_STOPPED_SIG;
2255 PROC_SLOCK(p);
2256 if (p->p_numthreads == p->p_suspcount) {
2257 PROC_SUNLOCK(p);
2258 p->p_flag |= P_CONTINUED;
2259 p->p_xsig = SIGCONT;
2260 PROC_LOCK(p->p_pptr);
2261 childproc_continued(p);
2262 PROC_UNLOCK(p->p_pptr);
2263 PROC_SLOCK(p);
2264 }
2265 if (action == SIG_DFL) {
2266 thread_unsuspend(p);
2267 PROC_SUNLOCK(p);
2268 sigqueue_delete(sigqueue, sig);
2269 goto out;
2270 }
2271 if (action == SIG_CATCH) {
2272 /*
2273 * The process wants to catch it so it needs
2274 * to run at least one thread, but which one?
2275 */
2276 PROC_SUNLOCK(p);
2277 goto runfast;
2278 }
2279 /*
2280 * The signal is not ignored or caught.
2281 */
2282 thread_unsuspend(p);
2283 PROC_SUNLOCK(p);
2284 goto out;
2285 }
2286
2287 if (prop & SIGPROP_STOP) {
2288 /*
2289 * If traced process is already stopped,
2290 * then no further action is necessary.
2291 */
2292 if (p->p_flag & P_TRACED)
2293 goto out;
2294 /*
2295 * Already stopped, don't need to stop again
2296 * (If we did the shell could get confused).
2297 * Just make sure the signal STOP bit set.
2298 */
2299 p->p_flag |= P_STOPPED_SIG;
2300 sigqueue_delete(sigqueue, sig);
2301 goto out;
2302 }
2303
2304 /*
2305 * All other kinds of signals:
2306 * If a thread is sleeping interruptibly, simulate a
2307 * wakeup so that when it is continued it will be made
2308 * runnable and can look at the signal. However, don't make
2309 * the PROCESS runnable, leave it stopped.
2310 * It may run a bit until it hits a thread_suspend_check().
2311 */
2312 wakeup_swapper = 0;
2313 PROC_SLOCK(p);
2314 thread_lock(td);
2315 if (TD_ON_SLEEPQ(td) && (td->td_flags & TDF_SINTR))
2316 wakeup_swapper = sleepq_abort(td, intrval);
2317 thread_unlock(td);
2318 PROC_SUNLOCK(p);
2319 if (wakeup_swapper)
2320 kick_proc0();
2321 goto out;
2322 /*
2323 * Mutexes are short lived. Threads waiting on them will
2324 * hit thread_suspend_check() soon.
2325 */
2326 } else if (p->p_state == PRS_NORMAL) {
2327 if (p->p_flag & P_TRACED || action == SIG_CATCH) {
2328 tdsigwakeup(td, sig, action, intrval);
2329 goto out;
2330 }
2331
2332 MPASS(action == SIG_DFL);
2333
2334 if (prop & SIGPROP_STOP) {
2335 if (p->p_flag & (P_PPWAIT|P_WEXIT))
2336 goto out;
2337 p->p_flag |= P_STOPPED_SIG;
2338 p->p_xsig = sig;
2339 PROC_SLOCK(p);
2340 wakeup_swapper = sig_suspend_threads(td, p, 1);
2341 if (p->p_numthreads == p->p_suspcount) {
2342 /*
2343 * only thread sending signal to another
2344 * process can reach here, if thread is sending
2345 * signal to its process, because thread does
2346 * not suspend itself here, p_numthreads
2347 * should never be equal to p_suspcount.
2348 */
2349 thread_stopped(p);
2350 PROC_SUNLOCK(p);
2351 sigqueue_delete_proc(p, p->p_xsig);
2352 } else
2353 PROC_SUNLOCK(p);
2354 if (wakeup_swapper)
2355 kick_proc0();
2356 goto out;
2357 }
2358 } else {
2359 /* Not in "NORMAL" state. discard the signal. */
2360 sigqueue_delete(sigqueue, sig);
2361 goto out;
2362 }
2363
2364 /*
2365 * The process is not stopped so we need to apply the signal to all the
2366 * running threads.
2367 */
2368 runfast:
2369 tdsigwakeup(td, sig, action, intrval);
2370 PROC_SLOCK(p);
2371 thread_unsuspend(p);
2372 PROC_SUNLOCK(p);
2373 out:
2374 /* If we jump here, proc slock should not be owned. */
2375 PROC_SLOCK_ASSERT(p, MA_NOTOWNED);
2376 return (ret);
2377 }
2378
2379 /*
2380 * The force of a signal has been directed against a single
2381 * thread. We need to see what we can do about knocking it
2382 * out of any sleep it may be in etc.
2383 */
2384 static void
2385 tdsigwakeup(struct thread *td, int sig, sig_t action, int intrval)
2386 {
2387 struct proc *p = td->td_proc;
2388 int prop;
2389 int wakeup_swapper;
2390
2391 wakeup_swapper = 0;
2392 PROC_LOCK_ASSERT(p, MA_OWNED);
2393 prop = sigprop(sig);
2394
2395 PROC_SLOCK(p);
2396 thread_lock(td);
2397 /*
2398 * Bring the priority of a thread up if we want it to get
2399 * killed in this lifetime. Be careful to avoid bumping the
2400 * priority of the idle thread, since we still allow to signal
2401 * kernel processes.
2402 */
2403 if (action == SIG_DFL && (prop & SIGPROP_KILL) != 0 &&
2404 td->td_priority > PUSER && !TD_IS_IDLETHREAD(td))
2405 sched_prio(td, PUSER);
2406 if (TD_ON_SLEEPQ(td)) {
2407 /*
2408 * If thread is sleeping uninterruptibly
2409 * we can't interrupt the sleep... the signal will
2410 * be noticed when the process returns through
2411 * trap() or syscall().
2412 */
2413 if ((td->td_flags & TDF_SINTR) == 0)
2414 goto out;
2415 /*
2416 * If SIGCONT is default (or ignored) and process is
2417 * asleep, we are finished; the process should not
2418 * be awakened.
2419 */
2420 if ((prop & SIGPROP_CONT) && action == SIG_DFL) {
2421 thread_unlock(td);
2422 PROC_SUNLOCK(p);
2423 sigqueue_delete(&p->p_sigqueue, sig);
2424 /*
2425 * It may be on either list in this state.
2426 * Remove from both for now.
2427 */
2428 sigqueue_delete(&td->td_sigqueue, sig);
2429 return;
2430 }
2431
2432 /*
2433 * Don't awaken a sleeping thread for SIGSTOP if the
2434 * STOP signal is deferred.
2435 */
2436 if ((prop & SIGPROP_STOP) != 0 && (td->td_flags & (TDF_SBDRY |
2437 TDF_SERESTART | TDF_SEINTR)) == TDF_SBDRY)
2438 goto out;
2439
2440 /*
2441 * Give low priority threads a better chance to run.
2442 */
2443 if (td->td_priority > PUSER && !TD_IS_IDLETHREAD(td))
2444 sched_prio(td, PUSER);
2445
2446 wakeup_swapper = sleepq_abort(td, intrval);
2447 } else {
2448 /*
2449 * Other states do nothing with the signal immediately,
2450 * other than kicking ourselves if we are running.
2451 * It will either never be noticed, or noticed very soon.
2452 */
2453 #ifdef SMP
2454 if (TD_IS_RUNNING(td) && td != curthread)
2455 forward_signal(td);
2456 #endif
2457 }
2458 out:
2459 PROC_SUNLOCK(p);
2460 thread_unlock(td);
2461 if (wakeup_swapper)
2462 kick_proc0();
2463 }
2464
2465 static int
2466 sig_suspend_threads(struct thread *td, struct proc *p, int sending)
2467 {
2468 struct thread *td2;
2469 int wakeup_swapper;
2470
2471 PROC_LOCK_ASSERT(p, MA_OWNED);
2472 PROC_SLOCK_ASSERT(p, MA_OWNED);
2473 MPASS(sending || td == curthread);
2474
2475 wakeup_swapper = 0;
2476 FOREACH_THREAD_IN_PROC(p, td2) {
2477 thread_lock(td2);
2478 td2->td_flags |= TDF_ASTPENDING | TDF_NEEDSUSPCHK;
2479 if ((TD_IS_SLEEPING(td2) || TD_IS_SWAPPED(td2)) &&
2480 (td2->td_flags & TDF_SINTR)) {
2481 if (td2->td_flags & TDF_SBDRY) {
2482 /*
2483 * Once a thread is asleep with
2484 * TDF_SBDRY and without TDF_SERESTART
2485 * or TDF_SEINTR set, it should never
2486 * become suspended due to this check.
2487 */
2488 KASSERT(!TD_IS_SUSPENDED(td2),
2489 ("thread with deferred stops suspended"));
2490 if (TD_SBDRY_INTR(td2))
2491 wakeup_swapper |= sleepq_abort(td2,
2492 TD_SBDRY_ERRNO(td2));
2493 } else if (!TD_IS_SUSPENDED(td2)) {
2494 thread_suspend_one(td2);
2495 }
2496 } else if (!TD_IS_SUSPENDED(td2)) {
2497 if (sending || td != td2)
2498 td2->td_flags |= TDF_ASTPENDING;
2499 #ifdef SMP
2500 if (TD_IS_RUNNING(td2) && td2 != td)
2501 forward_signal(td2);
2502 #endif
2503 }
2504 thread_unlock(td2);
2505 }
2506 return (wakeup_swapper);
2507 }
2508
2509 /*
2510 * Stop the process for an event deemed interesting to the debugger. If si is
2511 * non-NULL, this is a signal exchange; the new signal requested by the
2512 * debugger will be returned for handling. If si is NULL, this is some other
2513 * type of interesting event. The debugger may request a signal be delivered in
2514 * that case as well, however it will be deferred until it can be handled.
2515 */
2516 int
2517 ptracestop(struct thread *td, int sig, ksiginfo_t *si)
2518 {
2519 struct proc *p = td->td_proc;
2520 struct thread *td2;
2521 ksiginfo_t ksi;
2522 int prop;
2523
2524 PROC_LOCK_ASSERT(p, MA_OWNED);
2525 KASSERT(!(p->p_flag & P_WEXIT), ("Stopping exiting process"));
2526 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK,
2527 &p->p_mtx.lock_object, "Stopping for traced signal");
2528
2529 td->td_xsig = sig;
2530
2531 if (si == NULL || (si->ksi_flags & KSI_PTRACE) == 0) {
2532 td->td_dbgflags |= TDB_XSIG;
2533 CTR4(KTR_PTRACE, "ptracestop: tid %d (pid %d) flags %#x sig %d",
2534 td->td_tid, p->p_pid, td->td_dbgflags, sig);
2535 PROC_SLOCK(p);
2536 while ((p->p_flag & P_TRACED) && (td->td_dbgflags & TDB_XSIG)) {
2537 if (P_KILLED(p)) {
2538 /*
2539 * Ensure that, if we've been PT_KILLed, the
2540 * exit status reflects that. Another thread
2541 * may also be in ptracestop(), having just
2542 * received the SIGKILL, but this thread was
2543 * unsuspended first.
2544 */
2545 td->td_dbgflags &= ~TDB_XSIG;
2546 td->td_xsig = SIGKILL;
2547 p->p_ptevents = 0;
2548 break;
2549 }
2550 if (p->p_flag & P_SINGLE_EXIT &&
2551 !(td->td_dbgflags & TDB_EXIT)) {
2552 /*
2553 * Ignore ptrace stops except for thread exit
2554 * events when the process exits.
2555 */
2556 td->td_dbgflags &= ~TDB_XSIG;
2557 PROC_SUNLOCK(p);
2558 return (0);
2559 }
2560
2561 /*
2562 * Make wait(2) work. Ensure that right after the
2563 * attach, the thread which was decided to become the
2564 * leader of attach gets reported to the waiter.
2565 * Otherwise, just avoid overwriting another thread's
2566 * assignment to p_xthread. If another thread has
2567 * already set p_xthread, the current thread will get
2568 * a chance to report itself upon the next iteration.
2569 */
2570 if ((td->td_dbgflags & TDB_FSTP) != 0 ||
2571 ((p->p_flag2 & P2_PTRACE_FSTP) == 0 &&
2572 p->p_xthread == NULL)) {
2573 p->p_xsig = sig;
2574 p->p_xthread = td;
2575 td->td_dbgflags &= ~TDB_FSTP;
2576 p->p_flag2 &= ~P2_PTRACE_FSTP;
2577 p->p_flag |= P_STOPPED_SIG | P_STOPPED_TRACE;
2578 sig_suspend_threads(td, p, 0);
2579 }
2580 if ((td->td_dbgflags & TDB_STOPATFORK) != 0) {
2581 td->td_dbgflags &= ~TDB_STOPATFORK;
2582 }
2583 stopme:
2584 thread_suspend_switch(td, p);
2585 if (p->p_xthread == td)
2586 p->p_xthread = NULL;
2587 if (!(p->p_flag & P_TRACED))
2588 break;
2589 if (td->td_dbgflags & TDB_SUSPEND) {
2590 if (p->p_flag & P_SINGLE_EXIT)
2591 break;
2592 goto stopme;
2593 }
2594 }
2595 PROC_SUNLOCK(p);
2596 }
2597
2598 if (si != NULL && sig == td->td_xsig) {
2599 /* Parent wants us to take the original signal unchanged. */
2600 si->ksi_flags |= KSI_HEAD;
2601 if (sigqueue_add(&td->td_sigqueue, sig, si) != 0)
2602 si->ksi_signo = 0;
2603 } else if (td->td_xsig != 0) {
2604 /*
2605 * If parent wants us to take a new signal, then it will leave
2606 * it in td->td_xsig; otherwise we just look for signals again.
2607 */
2608 ksiginfo_init(&ksi);
2609 ksi.ksi_signo = td->td_xsig;
2610 ksi.ksi_flags |= KSI_PTRACE;
2611 prop = sigprop(td->td_xsig);
2612 td2 = sigtd(p, td->td_xsig, prop);
2613 tdsendsignal(p, td2, td->td_xsig, &ksi);
2614 if (td != td2)
2615 return (0);
2616 }
2617
2618 return (td->td_xsig);
2619 }
2620
2621 static void
2622 reschedule_signals(struct proc *p, sigset_t block, int flags)
2623 {
2624 struct sigacts *ps;
2625 struct thread *td;
2626 int sig;
2627
2628 PROC_LOCK_ASSERT(p, MA_OWNED);
2629 ps = p->p_sigacts;
2630 mtx_assert(&ps->ps_mtx, (flags & SIGPROCMASK_PS_LOCKED) != 0 ?
2631 MA_OWNED : MA_NOTOWNED);
2632 if (SIGISEMPTY(p->p_siglist))
2633 return;
2634 SIGSETAND(block, p->p_siglist);
2635 while ((sig = sig_ffs(&block)) != 0) {
2636 SIGDELSET(block, sig);
2637 td = sigtd(p, sig, 0);
2638 signotify(td);
2639 if (!(flags & SIGPROCMASK_PS_LOCKED))
2640 mtx_lock(&ps->ps_mtx);
2641 if (p->p_flag & P_TRACED ||
2642 (SIGISMEMBER(ps->ps_sigcatch, sig) &&
2643 !SIGISMEMBER(td->td_sigmask, sig)))
2644 tdsigwakeup(td, sig, SIG_CATCH,
2645 (SIGISMEMBER(ps->ps_sigintr, sig) ? EINTR :
2646 ERESTART));
2647 if (!(flags & SIGPROCMASK_PS_LOCKED))
2648 mtx_unlock(&ps->ps_mtx);
2649 }
2650 }
2651
2652 void
2653 tdsigcleanup(struct thread *td)
2654 {
2655 struct proc *p;
2656 sigset_t unblocked;
2657
2658 p = td->td_proc;
2659 PROC_LOCK_ASSERT(p, MA_OWNED);
2660
2661 sigqueue_flush(&td->td_sigqueue);
2662 if (p->p_numthreads == 1)
2663 return;
2664
2665 /*
2666 * Since we cannot handle signals, notify signal post code
2667 * about this by filling the sigmask.
2668 *
2669 * Also, if needed, wake up thread(s) that do not block the
2670 * same signals as the exiting thread, since the thread might
2671 * have been selected for delivery and woken up.
2672 */
2673 SIGFILLSET(unblocked);
2674 SIGSETNAND(unblocked, td->td_sigmask);
2675 SIGFILLSET(td->td_sigmask);
2676 reschedule_signals(p, unblocked, 0);
2677
2678 }
2679
2680 static int
2681 sigdeferstop_curr_flags(int cflags)
2682 {
2683
2684 MPASS((cflags & (TDF_SEINTR | TDF_SERESTART)) == 0 ||
2685 (cflags & TDF_SBDRY) != 0);
2686 return (cflags & (TDF_SBDRY | TDF_SEINTR | TDF_SERESTART));
2687 }
2688
2689 /*
2690 * Defer the delivery of SIGSTOP for the current thread, according to
2691 * the requested mode. Returns previous flags, which must be restored
2692 * by sigallowstop().
2693 *
2694 * TDF_SBDRY, TDF_SEINTR, and TDF_SERESTART flags are only set and
2695 * cleared by the current thread, which allow the lock-less read-only
2696 * accesses below.
2697 */
2698 int
2699 sigdeferstop_impl(int mode)
2700 {
2701 struct thread *td;
2702 int cflags, nflags;
2703
2704 td = curthread;
2705 cflags = sigdeferstop_curr_flags(td->td_flags);
2706 switch (mode) {
2707 case SIGDEFERSTOP_NOP:
2708 nflags = cflags;
2709 break;
2710 case SIGDEFERSTOP_OFF:
2711 nflags = 0;
2712 break;
2713 case SIGDEFERSTOP_SILENT:
2714 nflags = (cflags | TDF_SBDRY) & ~(TDF_SEINTR | TDF_SERESTART);
2715 break;
2716 case SIGDEFERSTOP_EINTR:
2717 nflags = (cflags | TDF_SBDRY | TDF_SEINTR) & ~TDF_SERESTART;
2718 break;
2719 case SIGDEFERSTOP_ERESTART:
2720 nflags = (cflags | TDF_SBDRY | TDF_SERESTART) & ~TDF_SEINTR;
2721 break;
2722 default:
2723 panic("sigdeferstop: invalid mode %x", mode);
2724 break;
2725 }
2726 if (cflags == nflags)
2727 return (SIGDEFERSTOP_VAL_NCHG);
2728 thread_lock(td);
2729 td->td_flags = (td->td_flags & ~cflags) | nflags;
2730 thread_unlock(td);
2731 return (cflags);
2732 }
2733
2734 /*
2735 * Restores the STOP handling mode, typically permitting the delivery
2736 * of SIGSTOP for the current thread. This does not immediately
2737 * suspend if a stop was posted. Instead, the thread will suspend
2738 * either via ast() or a subsequent interruptible sleep.
2739 */
2740 void
2741 sigallowstop_impl(int prev)
2742 {
2743 struct thread *td;
2744 int cflags;
2745
2746 KASSERT(prev != SIGDEFERSTOP_VAL_NCHG, ("failed sigallowstop"));
2747 KASSERT((prev & ~(TDF_SBDRY | TDF_SEINTR | TDF_SERESTART)) == 0,
2748 ("sigallowstop: incorrect previous mode %x", prev));
2749 td = curthread;
2750 cflags = sigdeferstop_curr_flags(td->td_flags);
2751 if (cflags != prev) {
2752 thread_lock(td);
2753 td->td_flags = (td->td_flags & ~cflags) | prev;
2754 thread_unlock(td);
2755 }
2756 }
2757
2758 /*
2759 * If the current process has received a signal (should be caught or cause
2760 * termination, should interrupt current syscall), return the signal number.
2761 * Stop signals with default action are processed immediately, then cleared;
2762 * they aren't returned. This is checked after each entry to the system for
2763 * a syscall or trap (though this can usually be done without calling issignal
2764 * by checking the pending signal masks in cursig.) The normal call
2765 * sequence is
2766 *
2767 * while (sig = cursig(curthread))
2768 * postsig(sig);
2769 */
2770 static int
2771 issignal(struct thread *td)
2772 {
2773 struct proc *p;
2774 struct sigacts *ps;
2775 struct sigqueue *queue;
2776 sigset_t sigpending;
2777 ksiginfo_t ksi;
2778 int prop, sig, traced;
2779
2780 p = td->td_proc;
2781 ps = p->p_sigacts;
2782 mtx_assert(&ps->ps_mtx, MA_OWNED);
2783 PROC_LOCK_ASSERT(p, MA_OWNED);
2784 for (;;) {
2785 traced = (p->p_flag & P_TRACED) || (p->p_stops & S_SIG);
2786
2787 sigpending = td->td_sigqueue.sq_signals;
2788 SIGSETOR(sigpending, p->p_sigqueue.sq_signals);
2789 SIGSETNAND(sigpending, td->td_sigmask);
2790
2791 if ((p->p_flag & P_PPWAIT) != 0 || (td->td_flags &
2792 (TDF_SBDRY | TDF_SERESTART | TDF_SEINTR)) == TDF_SBDRY)
2793 SIG_STOPSIGMASK(sigpending);
2794 if (SIGISEMPTY(sigpending)) /* no signal to send */
2795 return (0);
2796 if ((p->p_flag & (P_TRACED | P_PPTRACE)) == P_TRACED &&
2797 (p->p_flag2 & P2_PTRACE_FSTP) != 0 &&
2798 SIGISMEMBER(sigpending, SIGSTOP)) {
2799 /*
2800 * If debugger just attached, always consume
2801 * SIGSTOP from ptrace(PT_ATTACH) first, to
2802 * execute the debugger attach ritual in
2803 * order.
2804 */
2805 sig = SIGSTOP;
2806 td->td_dbgflags |= TDB_FSTP;
2807 } else {
2808 sig = sig_ffs(&sigpending);
2809 }
2810
2811 if (p->p_stops & S_SIG) {
2812 mtx_unlock(&ps->ps_mtx);
2813 stopevent(p, S_SIG, sig);
2814 mtx_lock(&ps->ps_mtx);
2815 }
2816
2817 /*
2818 * We should see pending but ignored signals
2819 * only if P_TRACED was on when they were posted.
2820 */
2821 if (SIGISMEMBER(ps->ps_sigignore, sig) && (traced == 0)) {
2822 sigqueue_delete(&td->td_sigqueue, sig);
2823 sigqueue_delete(&p->p_sigqueue, sig);
2824 continue;
2825 }
2826 if ((p->p_flag & (P_TRACED | P_PPTRACE)) == P_TRACED) {
2827 /*
2828 * If traced, always stop.
2829 * Remove old signal from queue before the stop.
2830 * XXX shrug off debugger, it causes siginfo to
2831 * be thrown away.
2832 */
2833 queue = &td->td_sigqueue;
2834 ksiginfo_init(&ksi);
2835 if (sigqueue_get(queue, sig, &ksi) == 0) {
2836 queue = &p->p_sigqueue;
2837 sigqueue_get(queue, sig, &ksi);
2838 }
2839 td->td_si = ksi.ksi_info;
2840
2841 mtx_unlock(&ps->ps_mtx);
2842 sig = ptracestop(td, sig, &ksi);
2843 mtx_lock(&ps->ps_mtx);
2844
2845 /*
2846 * Keep looking if the debugger discarded or
2847 * replaced the signal.
2848 */
2849 if (sig == 0)
2850 continue;
2851
2852 /*
2853 * If the signal became masked, re-queue it.
2854 */
2855 if (SIGISMEMBER(td->td_sigmask, sig)) {
2856 ksi.ksi_flags |= KSI_HEAD;
2857 sigqueue_add(&p->p_sigqueue, sig, &ksi);
2858 continue;
2859 }
2860
2861 /*
2862 * If the traced bit got turned off, requeue
2863 * the signal and go back up to the top to
2864 * rescan signals. This ensures that p_sig*
2865 * and p_sigact are consistent.
2866 */
2867 if ((p->p_flag & P_TRACED) == 0) {
2868 ksi.ksi_flags |= KSI_HEAD;
2869 sigqueue_add(queue, sig, &ksi);
2870 continue;
2871 }
2872 }
2873
2874 prop = sigprop(sig);
2875
2876 /*
2877 * Decide whether the signal should be returned.
2878 * Return the signal's number, or fall through
2879 * to clear it from the pending mask.
2880 */
2881 switch ((intptr_t)p->p_sigacts->ps_sigact[_SIG_IDX(sig)]) {
2882
2883 case (intptr_t)SIG_DFL:
2884 /*
2885 * Don't take default actions on system processes.
2886 */
2887 if (p->p_pid <= 1) {
2888 #ifdef DIAGNOSTIC
2889 /*
2890 * Are you sure you want to ignore SIGSEGV
2891 * in init? XXX
2892 */
2893 printf("Process (pid %lu) got signal %d\n",
2894 (u_long)p->p_pid, sig);
2895 #endif
2896 break; /* == ignore */
2897 }
2898 /*
2899 * If there is a pending stop signal to process with
2900 * default action, stop here, then clear the signal.
2901 * Traced or exiting processes should ignore stops.
2902 * Additionally, a member of an orphaned process group
2903 * should ignore tty stops.
2904 */
2905 if (prop & SIGPROP_STOP) {
2906 if (p->p_flag &
2907 (P_TRACED | P_WEXIT | P_SINGLE_EXIT) ||
2908 (p->p_pgrp->pg_jobc == 0 &&
2909 prop & SIGPROP_TTYSTOP))
2910 break; /* == ignore */
2911 if (TD_SBDRY_INTR(td)) {
2912 KASSERT((td->td_flags & TDF_SBDRY) != 0,
2913 ("lost TDF_SBDRY"));
2914 return (-1);
2915 }
2916 mtx_unlock(&ps->ps_mtx);
2917 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK,
2918 &p->p_mtx.lock_object, "Catching SIGSTOP");
2919 sigqueue_delete(&td->td_sigqueue, sig);
2920 sigqueue_delete(&p->p_sigqueue, sig);
2921 p->p_flag |= P_STOPPED_SIG;
2922 p->p_xsig = sig;
2923 PROC_SLOCK(p);
2924 sig_suspend_threads(td, p, 0);
2925 thread_suspend_switch(td, p);
2926 PROC_SUNLOCK(p);
2927 mtx_lock(&ps->ps_mtx);
2928 goto next;
2929 } else if (prop & SIGPROP_IGNORE) {
2930 /*
2931 * Except for SIGCONT, shouldn't get here.
2932 * Default action is to ignore; drop it.
2933 */
2934 break; /* == ignore */
2935 } else
2936 return (sig);
2937 /*NOTREACHED*/
2938
2939 case (intptr_t)SIG_IGN:
2940 /*
2941 * Masking above should prevent us ever trying
2942 * to take action on an ignored signal other
2943 * than SIGCONT, unless process is traced.
2944 */
2945 if ((prop & SIGPROP_CONT) == 0 &&
2946 (p->p_flag & P_TRACED) == 0)
2947 printf("issignal\n");
2948 break; /* == ignore */
2949
2950 default:
2951 /*
2952 * This signal has an action, let
2953 * postsig() process it.
2954 */
2955 return (sig);
2956 }
2957 sigqueue_delete(&td->td_sigqueue, sig); /* take the signal! */
2958 sigqueue_delete(&p->p_sigqueue, sig);
2959 next:;
2960 }
2961 /* NOTREACHED */
2962 }
2963
2964 void
2965 thread_stopped(struct proc *p)
2966 {
2967 int n;
2968
2969 PROC_LOCK_ASSERT(p, MA_OWNED);
2970 PROC_SLOCK_ASSERT(p, MA_OWNED);
2971 n = p->p_suspcount;
2972 if (p == curproc)
2973 n++;
2974 if ((p->p_flag & P_STOPPED_SIG) && (n == p->p_numthreads)) {
2975 PROC_SUNLOCK(p);
2976 p->p_flag &= ~P_WAITED;
2977 PROC_LOCK(p->p_pptr);
2978 childproc_stopped(p, (p->p_flag & P_TRACED) ?
2979 CLD_TRAPPED : CLD_STOPPED);
2980 PROC_UNLOCK(p->p_pptr);
2981 PROC_SLOCK(p);
2982 }
2983 }
2984
2985 /*
2986 * Take the action for the specified signal
2987 * from the current set of pending signals.
2988 */
2989 int
2990 postsig(int sig)
2991 {
2992 struct thread *td;
2993 struct proc *p;
2994 struct sigacts *ps;
2995 sig_t action;
2996 ksiginfo_t ksi;
2997 sigset_t returnmask;
2998
2999 KASSERT(sig != 0, ("postsig"));
3000
3001 td = curthread;
3002 p = td->td_proc;
3003 PROC_LOCK_ASSERT(p, MA_OWNED);
3004 ps = p->p_sigacts;
3005 mtx_assert(&ps->ps_mtx, MA_OWNED);
3006 ksiginfo_init(&ksi);
3007 if (sigqueue_get(&td->td_sigqueue, sig, &ksi) == 0 &&
3008 sigqueue_get(&p->p_sigqueue, sig, &ksi) == 0)
3009 return (0);
3010 ksi.ksi_signo = sig;
3011 if (ksi.ksi_code == SI_TIMER)
3012 itimer_accept(p, ksi.ksi_timerid, &ksi);
3013 action = ps->ps_sigact[_SIG_IDX(sig)];
3014 #ifdef KTRACE
3015 if (KTRPOINT(td, KTR_PSIG))
3016 ktrpsig(sig, action, td->td_pflags & TDP_OLDMASK ?
3017 &td->td_oldsigmask : &td->td_sigmask, ksi.ksi_code);
3018 #endif
3019 if ((p->p_stops & S_SIG) != 0) {
3020 mtx_unlock(&ps->ps_mtx);
3021 stopevent(p, S_SIG, sig);
3022 mtx_lock(&ps->ps_mtx);
3023 }
3024
3025 if (action == SIG_DFL) {
3026 /*
3027 * Default action, where the default is to kill
3028 * the process. (Other cases were ignored above.)
3029 */
3030 mtx_unlock(&ps->ps_mtx);
3031 proc_td_siginfo_capture(td, &ksi.ksi_info);
3032 sigexit(td, sig);
3033 /* NOTREACHED */
3034 } else {
3035 /*
3036 * If we get here, the signal must be caught.
3037 */
3038 KASSERT(action != SIG_IGN, ("postsig action %p", action));
3039 KASSERT(!SIGISMEMBER(td->td_sigmask, sig),
3040 ("postsig action: blocked sig %d", sig));
3041
3042 /*
3043 * Set the new mask value and also defer further
3044 * occurrences of this signal.
3045 *
3046 * Special case: user has done a sigsuspend. Here the
3047 * current mask is not of interest, but rather the
3048 * mask from before the sigsuspend is what we want
3049 * restored after the signal processing is completed.
3050 */
3051 if (td->td_pflags & TDP_OLDMASK) {
3052 returnmask = td->td_oldsigmask;
3053 td->td_pflags &= ~TDP_OLDMASK;
3054 } else
3055 returnmask = td->td_sigmask;
3056
3057 if (p->p_sig == sig) {
3058 p->p_code = 0;
3059 p->p_sig = 0;
3060 }
3061 (*p->p_sysent->sv_sendsig)(action, &ksi, &returnmask);
3062 postsig_done(sig, td, ps);
3063 }
3064 return (1);
3065 }
3066
3067 void
3068 proc_wkilled(struct proc *p)
3069 {
3070
3071 PROC_LOCK_ASSERT(p, MA_OWNED);
3072 if ((p->p_flag & P_WKILLED) == 0) {
3073 p->p_flag |= P_WKILLED;
3074 /*
3075 * Notify swapper that there is a process to swap in.
3076 * The notification is racy, at worst it would take 10
3077 * seconds for the swapper process to notice.
3078 */
3079 if ((p->p_flag & (P_INMEM | P_SWAPPINGIN)) == 0)
3080 wakeup(&proc0);
3081 }
3082 }
3083
3084 /*
3085 * Kill the current process for stated reason.
3086 */
3087 void
3088 killproc(struct proc *p, char *why)
3089 {
3090
3091 PROC_LOCK_ASSERT(p, MA_OWNED);
3092 CTR3(KTR_PROC, "killproc: proc %p (pid %d, %s)", p, p->p_pid,
3093 p->p_comm);
3094 log(LOG_ERR, "pid %d (%s), uid %d, was killed: %s\n", p->p_pid,
3095 p->p_comm, p->p_ucred ? p->p_ucred->cr_uid : -1, why);
3096 proc_wkilled(p);
3097 kern_psignal(p, SIGKILL);
3098 }
3099
3100 /*
3101 * Force the current process to exit with the specified signal, dumping core
3102 * if appropriate. We bypass the normal tests for masked and caught signals,
3103 * allowing unrecoverable failures to terminate the process without changing
3104 * signal state. Mark the accounting record with the signal termination.
3105 * If dumping core, save the signal number for the debugger. Calls exit and
3106 * does not return.
3107 */
3108 void
3109 sigexit(struct thread *td, int sig)
3110 {
3111 struct proc *p = td->td_proc;
3112
3113 PROC_LOCK_ASSERT(p, MA_OWNED);
3114 p->p_acflag |= AXSIG;
3115 /*
3116 * We must be single-threading to generate a core dump. This
3117 * ensures that the registers in the core file are up-to-date.
3118 * Also, the ELF dump handler assumes that the thread list doesn't
3119 * change out from under it.
3120 *
3121 * XXX If another thread attempts to single-thread before us
3122 * (e.g. via fork()), we won't get a dump at all.
3123 */
3124 if ((sigprop(sig) & SIGPROP_CORE) &&
3125 thread_single(p, SINGLE_NO_EXIT) == 0) {
3126 p->p_sig = sig;
3127 /*
3128 * Log signals which would cause core dumps
3129 * (Log as LOG_INFO to appease those who don't want
3130 * these messages.)
3131 * XXX : Todo, as well as euid, write out ruid too
3132 * Note that coredump() drops proc lock.
3133 */
3134 if (coredump(td) == 0)
3135 sig |= WCOREFLAG;
3136 if (kern_logsigexit)
3137 log(LOG_INFO,
3138 "pid %d (%s), uid %d: exited on signal %d%s\n",
3139 p->p_pid, p->p_comm,
3140 td->td_ucred ? td->td_ucred->cr_uid : -1,
3141 sig &~ WCOREFLAG,
3142 sig & WCOREFLAG ? " (core dumped)" : "");
3143 } else
3144 PROC_UNLOCK(p);
3145 exit1(td, 0, sig);
3146 /* NOTREACHED */
3147 }
3148
3149 /*
3150 * Send queued SIGCHLD to parent when child process's state
3151 * is changed.
3152 */
3153 static void
3154 sigparent(struct proc *p, int reason, int status)
3155 {
3156 PROC_LOCK_ASSERT(p, MA_OWNED);
3157 PROC_LOCK_ASSERT(p->p_pptr, MA_OWNED);
3158
3159 if (p->p_ksi != NULL) {
3160 p->p_ksi->ksi_signo = SIGCHLD;
3161 p->p_ksi->ksi_code = reason;
3162 p->p_ksi->ksi_status = status;
3163 p->p_ksi->ksi_pid = p->p_pid;
3164 p->p_ksi->ksi_uid = p->p_ucred->cr_ruid;
3165 if (KSI_ONQ(p->p_ksi))
3166 return;
3167 }
3168 pksignal(p->p_pptr, SIGCHLD, p->p_ksi);
3169 }
3170
3171 static void
3172 childproc_jobstate(struct proc *p, int reason, int sig)
3173 {
3174 struct sigacts *ps;
3175
3176 PROC_LOCK_ASSERT(p, MA_OWNED);
3177 PROC_LOCK_ASSERT(p->p_pptr, MA_OWNED);
3178
3179 /*
3180 * Wake up parent sleeping in kern_wait(), also send
3181 * SIGCHLD to parent, but SIGCHLD does not guarantee
3182 * that parent will awake, because parent may masked
3183 * the signal.
3184 */
3185 p->p_pptr->p_flag |= P_STATCHILD;
3186 wakeup(p->p_pptr);
3187
3188 ps = p->p_pptr->p_sigacts;
3189 mtx_lock(&ps->ps_mtx);
3190 if ((ps->ps_flag & PS_NOCLDSTOP) == 0) {
3191 mtx_unlock(&ps->ps_mtx);
3192 sigparent(p, reason, sig);
3193 } else
3194 mtx_unlock(&ps->ps_mtx);
3195 }
3196
3197 void
3198 childproc_stopped(struct proc *p, int reason)
3199 {
3200
3201 childproc_jobstate(p, reason, p->p_xsig);
3202 }
3203
3204 void
3205 childproc_continued(struct proc *p)
3206 {
3207 childproc_jobstate(p, CLD_CONTINUED, SIGCONT);
3208 }
3209
3210 void
3211 childproc_exited(struct proc *p)
3212 {
3213 int reason, status;
3214
3215 if (WCOREDUMP(p->p_xsig)) {
3216 reason = CLD_DUMPED;
3217 status = WTERMSIG(p->p_xsig);
3218 } else if (WIFSIGNALED(p->p_xsig)) {
3219 reason = CLD_KILLED;
3220 status = WTERMSIG(p->p_xsig);
3221 } else {
3222 reason = CLD_EXITED;
3223 status = p->p_xexit;
3224 }
3225 /*
3226 * XXX avoid calling wakeup(p->p_pptr), the work is
3227 * done in exit1().
3228 */
3229 sigparent(p, reason, status);
3230 }
3231
3232 #define MAX_NUM_CORE_FILES 100000
3233 #ifndef NUM_CORE_FILES
3234 #define NUM_CORE_FILES 5
3235 #endif
3236 CTASSERT(NUM_CORE_FILES >= 0 && NUM_CORE_FILES <= MAX_NUM_CORE_FILES);
3237 static int num_cores = NUM_CORE_FILES;
3238
3239 static int
3240 sysctl_debug_num_cores_check (SYSCTL_HANDLER_ARGS)
3241 {
3242 int error;
3243 int new_val;
3244
3245 new_val = num_cores;
3246 error = sysctl_handle_int(oidp, &new_val, 0, req);
3247 if (error != 0 || req->newptr == NULL)
3248 return (error);
3249 if (new_val > MAX_NUM_CORE_FILES)
3250 new_val = MAX_NUM_CORE_FILES;
3251 if (new_val < 0)
3252 new_val = 0;
3253 num_cores = new_val;
3254 return (0);
3255 }
3256 SYSCTL_PROC(_debug, OID_AUTO, ncores, CTLTYPE_INT|CTLFLAG_RW,
3257 0, sizeof(int), sysctl_debug_num_cores_check, "I",
3258 "Maximum number of generated process corefiles while using index format");
3259
3260 #define GZIP_SUFFIX ".gz"
3261 #define ZSTD_SUFFIX ".zst"
3262
3263 int compress_user_cores = 0;
3264
3265 static int
3266 sysctl_compress_user_cores(SYSCTL_HANDLER_ARGS)
3267 {
3268 int error, val;
3269
3270 val = compress_user_cores;
3271 error = sysctl_handle_int(oidp, &val, 0, req);
3272 if (error != 0 || req->newptr == NULL)
3273 return (error);
3274 if (val != 0 && !compressor_avail(val))
3275 return (EINVAL);
3276 compress_user_cores = val;
3277 return (error);
3278 }
3279 SYSCTL_PROC(_kern, OID_AUTO, compress_user_cores, CTLTYPE_INT | CTLFLAG_RWTUN,
3280 0, sizeof(int), sysctl_compress_user_cores, "I",
3281 "Enable compression of user corefiles ("
3282 __XSTRING(COMPRESS_GZIP) " = gzip, "
3283 __XSTRING(COMPRESS_ZSTD) " = zstd)");
3284
3285 int compress_user_cores_level = 6;
3286 SYSCTL_INT(_kern, OID_AUTO, compress_user_cores_level, CTLFLAG_RWTUN,
3287 &compress_user_cores_level, 0,
3288 "Corefile compression level");
3289
3290 /*
3291 * Protect the access to corefilename[] by allproc_lock.
3292 */
3293 #define corefilename_lock allproc_lock
3294
3295 static char corefilename[MAXPATHLEN] = {"%N.core"};
3296 TUNABLE_STR("kern.corefile", corefilename, sizeof(corefilename));
3297
3298 static int
3299 sysctl_kern_corefile(SYSCTL_HANDLER_ARGS)
3300 {
3301 int error;
3302
3303 sx_xlock(&corefilename_lock);
3304 error = sysctl_handle_string(oidp, corefilename, sizeof(corefilename),
3305 req);
3306 sx_xunlock(&corefilename_lock);
3307
3308 return (error);
3309 }
3310 SYSCTL_PROC(_kern, OID_AUTO, corefile, CTLTYPE_STRING | CTLFLAG_RW |
3311 CTLFLAG_MPSAFE, 0, 0, sysctl_kern_corefile, "A",
3312 "Process corefile name format string");
3313
3314 static void
3315 vnode_close_locked(struct thread *td, struct vnode *vp)
3316 {
3317
3318 VOP_UNLOCK(vp, 0);
3319 vn_close(vp, FWRITE, td->td_ucred, td);
3320 }
3321
3322 /*
3323 * If the core format has a %I in it, then we need to check
3324 * for existing corefiles before defining a name.
3325 * To do this we iterate over 0..ncores to find a
3326 * non-existing core file name to use. If all core files are
3327 * already used we choose the oldest one.
3328 */
3329 static int
3330 corefile_open_last(struct thread *td, char *name, int indexpos,
3331 int indexlen, int ncores, struct vnode **vpp)
3332 {
3333 struct vnode *oldvp, *nextvp, *vp;
3334 struct vattr vattr;
3335 struct nameidata nd;
3336 int error, i, flags, oflags, cmode;
3337 char ch;
3338 struct timespec lasttime;
3339
3340 nextvp = oldvp = NULL;
3341 cmode = S_IRUSR | S_IWUSR;
3342 oflags = VN_OPEN_NOAUDIT | VN_OPEN_NAMECACHE |
3343 (capmode_coredump ? VN_OPEN_NOCAPCHECK : 0);
3344
3345 for (i = 0; i < ncores; i++) {
3346 flags = O_CREAT | FWRITE | O_NOFOLLOW;
3347
3348 ch = name[indexpos + indexlen];
3349 (void)snprintf(name + indexpos, indexlen + 1, "%.*u", indexlen,
3350 i);
3351 name[indexpos + indexlen] = ch;
3352
3353 NDINIT(&nd, LOOKUP, NOFOLLOW, UIO_SYSSPACE, name, td);
3354 error = vn_open_cred(&nd, &flags, cmode, oflags, td->td_ucred,
3355 NULL);
3356 if (error != 0)
3357 break;
3358
3359 vp = nd.ni_vp;
3360 NDFREE(&nd, NDF_ONLY_PNBUF);
3361 if ((flags & O_CREAT) == O_CREAT) {
3362 nextvp = vp;
3363 break;
3364 }
3365
3366 error = VOP_GETATTR(vp, &vattr, td->td_ucred);
3367 if (error != 0) {
3368 vnode_close_locked(td, vp);
3369 break;
3370 }
3371
3372 if (oldvp == NULL ||
3373 lasttime.tv_sec > vattr.va_mtime.tv_sec ||
3374 (lasttime.tv_sec == vattr.va_mtime.tv_sec &&
3375 lasttime.tv_nsec >= vattr.va_mtime.tv_nsec)) {
3376 if (oldvp != NULL)
3377 vnode_close_locked(td, oldvp);
3378 oldvp = vp;
3379 lasttime = vattr.va_mtime;
3380 } else {
3381 vnode_close_locked(td, vp);
3382 }
3383 }
3384
3385 if (oldvp != NULL) {
3386 if (nextvp == NULL)
3387 nextvp = oldvp;
3388 else
3389 vnode_close_locked(td, oldvp);
3390 }
3391 if (error != 0) {
3392 if (nextvp != NULL)
3393 vnode_close_locked(td, oldvp);
3394 } else {
3395 *vpp = nextvp;
3396 }
3397
3398 return (error);
3399 }
3400
3401 /*
3402 * corefile_open(comm, uid, pid, td, compress, vpp, namep)
3403 * Expand the name described in corefilename, using name, uid, and pid
3404 * and open/create core file.
3405 * corefilename is a printf-like string, with three format specifiers:
3406 * %N name of process ("name")
3407 * %P process id (pid)
3408 * %U user id (uid)
3409 * For example, "%N.core" is the default; they can be disabled completely
3410 * by using "/dev/null", or all core files can be stored in "/cores/%U/%N-%P".
3411 * This is controlled by the sysctl variable kern.corefile (see above).
3412 */
3413 static int
3414 corefile_open(const char *comm, uid_t uid, pid_t pid, struct thread *td,
3415 int compress, struct vnode **vpp, char **namep)
3416 {
3417 struct sbuf sb;
3418 struct nameidata nd;
3419 const char *format;
3420 char *hostname, *name;
3421 int cmode, error, flags, i, indexpos, indexlen, oflags, ncores;
3422
3423 hostname = NULL;
3424 format = corefilename;
3425 name = malloc(MAXPATHLEN, M_TEMP, M_WAITOK | M_ZERO);
3426 indexlen = 0;
3427 indexpos = -1;
3428 ncores = num_cores;
3429 (void)sbuf_new(&sb, name, MAXPATHLEN, SBUF_FIXEDLEN);
3430 sx_slock(&corefilename_lock);
3431 for (i = 0; format[i] != '\0'; i++) {
3432 switch (format[i]) {
3433 case '%': /* Format character */
3434 i++;
3435 switch (format[i]) {
3436 case '%':
3437 sbuf_putc(&sb, '%');
3438 break;
3439 case 'H': /* hostname */
3440 if (hostname == NULL) {
3441 hostname = malloc(MAXHOSTNAMELEN,
3442 M_TEMP, M_WAITOK);
3443 }
3444 getcredhostname(td->td_ucred, hostname,
3445 MAXHOSTNAMELEN);
3446 sbuf_printf(&sb, "%s", hostname);
3447 break;
3448 case 'I': /* autoincrementing index */
3449 if (indexpos != -1) {
3450 sbuf_printf(&sb, "%%I");
3451 break;
3452 }
3453
3454 indexpos = sbuf_len(&sb);
3455 sbuf_printf(&sb, "%u", ncores - 1);
3456 indexlen = sbuf_len(&sb) - indexpos;
3457 break;
3458 case 'N': /* process name */
3459 sbuf_printf(&sb, "%s", comm);
3460 break;
3461 case 'P': /* process id */
3462 sbuf_printf(&sb, "%u", pid);
3463 break;
3464 case 'U': /* user id */
3465 sbuf_printf(&sb, "%u", uid);
3466 break;
3467 default:
3468 log(LOG_ERR,
3469 "Unknown format character %c in "
3470 "corename `%s'\n", format[i], format);
3471 break;
3472 }
3473 break;
3474 default:
3475 sbuf_putc(&sb, format[i]);
3476 break;
3477 }
3478 }
3479 sx_sunlock(&corefilename_lock);
3480 free(hostname, M_TEMP);
3481 if (compress == COMPRESS_GZIP)
3482 sbuf_printf(&sb, GZIP_SUFFIX);
3483 else if (compress == COMPRESS_ZSTD)
3484 sbuf_printf(&sb, ZSTD_SUFFIX);
3485 if (sbuf_error(&sb) != 0) {
3486 log(LOG_ERR, "pid %ld (%s), uid (%lu): corename is too "
3487 "long\n", (long)pid, comm, (u_long)uid);
3488 sbuf_delete(&sb);
3489 free(name, M_TEMP);
3490 return (ENOMEM);
3491 }
3492 sbuf_finish(&sb);
3493 sbuf_delete(&sb);
3494
3495 if (indexpos != -1) {
3496 error = corefile_open_last(td, name, indexpos, indexlen, ncores,
3497 vpp);
3498 if (error != 0) {
3499 log(LOG_ERR,
3500 "pid %d (%s), uid (%u): Path `%s' failed "
3501 "on initial open test, error = %d\n",
3502 pid, comm, uid, name, error);
3503 }
3504 } else {
3505 cmode = S_IRUSR | S_IWUSR;
3506 oflags = VN_OPEN_NOAUDIT | VN_OPEN_NAMECACHE |
3507 (capmode_coredump ? VN_OPEN_NOCAPCHECK : 0);
3508 flags = O_CREAT | FWRITE | O_NOFOLLOW;
3509
3510 NDINIT(&nd, LOOKUP, NOFOLLOW, UIO_SYSSPACE, name, td);
3511 error = vn_open_cred(&nd, &flags, cmode, oflags, td->td_ucred,
3512 NULL);
3513 if (error == 0) {
3514 *vpp = nd.ni_vp;
3515 NDFREE(&nd, NDF_ONLY_PNBUF);
3516 }
3517 }
3518
3519 if (error != 0) {
3520 #ifdef AUDIT
3521 audit_proc_coredump(td, name, error);
3522 #endif
3523 free(name, M_TEMP);
3524 return (error);
3525 }
3526 *namep = name;
3527 return (0);
3528 }
3529
3530 /*
3531 * Dump a process' core. The main routine does some
3532 * policy checking, and creates the name of the coredump;
3533 * then it passes on a vnode and a size limit to the process-specific
3534 * coredump routine if there is one; if there _is not_ one, it returns
3535 * ENOSYS; otherwise it returns the error from the process-specific routine.
3536 */
3537
3538 static int
3539 coredump(struct thread *td)
3540 {
3541 struct proc *p = td->td_proc;
3542 struct ucred *cred = td->td_ucred;
3543 struct vnode *vp;
3544 struct flock lf;
3545 struct vattr vattr;
3546 int error, error1, locked;
3547 char *name; /* name of corefile */
3548 void *rl_cookie;
3549 off_t limit;
3550 char *fullpath, *freepath = NULL;
3551 struct sbuf *sb;
3552
3553 PROC_LOCK_ASSERT(p, MA_OWNED);
3554 MPASS((p->p_flag & P_HADTHREADS) == 0 || p->p_singlethread == td);
3555 _STOPEVENT(p, S_CORE, 0);
3556
3557 if (!do_coredump || (!sugid_coredump && (p->p_flag & P_SUGID) != 0) ||
3558 (p->p_flag2 & P2_NOTRACE) != 0) {
3559 PROC_UNLOCK(p);
3560 return (EFAULT);
3561 }
3562
3563 /*
3564 * Note that the bulk of limit checking is done after
3565 * the corefile is created. The exception is if the limit
3566 * for corefiles is 0, in which case we don't bother
3567 * creating the corefile at all. This layout means that
3568 * a corefile is truncated instead of not being created,
3569 * if it is larger than the limit.
3570 */
3571 limit = (off_t)lim_cur(td, RLIMIT_CORE);
3572 if (limit == 0 || racct_get_available(p, RACCT_CORE) == 0) {
3573 PROC_UNLOCK(p);
3574 return (EFBIG);
3575 }
3576 PROC_UNLOCK(p);
3577
3578 error = corefile_open(p->p_comm, cred->cr_uid, p->p_pid, td,
3579 compress_user_cores, &vp, &name);
3580 if (error != 0)
3581 return (error);
3582
3583 /*
3584 * Don't dump to non-regular files or files with links.
3585 * Do not dump into system files.
3586 */
3587 if (vp->v_type != VREG || VOP_GETATTR(vp, &vattr, cred) != 0 ||
3588 vattr.va_nlink != 1 || (vp->v_vflag & VV_SYSTEM) != 0) {
3589 VOP_UNLOCK(vp, 0);
3590 error = EFAULT;
3591 goto out;
3592 }
3593
3594 VOP_UNLOCK(vp, 0);
3595
3596 /* Postpone other writers, including core dumps of other processes. */
3597 rl_cookie = vn_rangelock_wlock(vp, 0, OFF_MAX);
3598
3599 lf.l_whence = SEEK_SET;
3600 lf.l_start = 0;
3601 lf.l_len = 0;
3602 lf.l_type = F_WRLCK;
3603 locked = (VOP_ADVLOCK(vp, (caddr_t)p, F_SETLK, &lf, F_FLOCK) == 0);
3604
3605 VATTR_NULL(&vattr);
3606 vattr.va_size = 0;
3607 if (set_core_nodump_flag)
3608 vattr.va_flags = UF_NODUMP;
3609 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
3610 VOP_SETATTR(vp, &vattr, cred);
3611 VOP_UNLOCK(vp, 0);
3612 PROC_LOCK(p);
3613 p->p_acflag |= ACORE;
3614 PROC_UNLOCK(p);
3615
3616 if (p->p_sysent->sv_coredump != NULL) {
3617 error = p->p_sysent->sv_coredump(td, vp, limit, 0);
3618 } else {
3619 error = ENOSYS;
3620 }
3621
3622 if (locked) {
3623 lf.l_type = F_UNLCK;
3624 VOP_ADVLOCK(vp, (caddr_t)p, F_UNLCK, &lf, F_FLOCK);
3625 }
3626 vn_rangelock_unlock(vp, rl_cookie);
3627
3628 /*
3629 * Notify the userland helper that a process triggered a core dump.
3630 * This allows the helper to run an automated debugging session.
3631 */
3632 if (error != 0 || coredump_devctl == 0)
3633 goto out;
3634 sb = sbuf_new_auto();
3635 if (vn_fullpath_global(td, p->p_textvp, &fullpath, &freepath) != 0)
3636 goto out2;
3637 sbuf_printf(sb, "comm=\"");
3638 devctl_safe_quote_sb(sb, fullpath);
3639 free(freepath, M_TEMP);
3640 sbuf_printf(sb, "\" core=\"");
3641
3642 /*
3643 * We can't lookup core file vp directly. When we're replacing a core, and
3644 * other random times, we flush the name cache, so it will fail. Instead,
3645 * if the path of the core is relative, add the current dir in front if it.
3646 */
3647 if (name[0] != '/') {
3648 fullpath = malloc(MAXPATHLEN, M_TEMP, M_WAITOK);
3649 if (kern___getcwd(td, fullpath, UIO_SYSSPACE, MAXPATHLEN, MAXPATHLEN) != 0) {
3650 free(fullpath, M_TEMP);
3651 goto out2;
3652 }
3653 devctl_safe_quote_sb(sb, fullpath);
3654 free(fullpath, M_TEMP);
3655 sbuf_putc(sb, '/');
3656 }
3657 devctl_safe_quote_sb(sb, name);
3658 sbuf_printf(sb, "\"");
3659 if (sbuf_finish(sb) == 0)
3660 devctl_notify("kernel", "signal", "coredump", sbuf_data(sb));
3661 out2:
3662 sbuf_delete(sb);
3663 out:
3664 error1 = vn_close(vp, FWRITE, cred, td);
3665 if (error == 0)
3666 error = error1;
3667 #ifdef AUDIT
3668 audit_proc_coredump(td, name, error);
3669 #endif
3670 free(name, M_TEMP);
3671 return (error);
3672 }
3673
3674 /*
3675 * Nonexistent system call-- signal process (may want to handle it). Flag
3676 * error in case process won't see signal immediately (blocked or ignored).
3677 */
3678 #ifndef _SYS_SYSPROTO_H_
3679 struct nosys_args {
3680 int dummy;
3681 };
3682 #endif
3683 /* ARGSUSED */
3684 int
3685 nosys(struct thread *td, struct nosys_args *args)
3686 {
3687 struct proc *p;
3688
3689 p = td->td_proc;
3690
3691 PROC_LOCK(p);
3692 tdsignal(td, SIGSYS);
3693 PROC_UNLOCK(p);
3694 if (kern_lognosys == 1 || kern_lognosys == 3) {
3695 uprintf("pid %d comm %s: nosys %d\n", p->p_pid, p->p_comm,
3696 td->td_sa.code);
3697 }
3698 if (kern_lognosys == 2 || kern_lognosys == 3) {
3699 printf("pid %d comm %s: nosys %d\n", p->p_pid, p->p_comm,
3700 td->td_sa.code);
3701 }
3702 return (ENOSYS);
3703 }
3704
3705 /*
3706 * Send a SIGIO or SIGURG signal to a process or process group using stored
3707 * credentials rather than those of the current process.
3708 */
3709 void
3710 pgsigio(struct sigio **sigiop, int sig, int checkctty)
3711 {
3712 ksiginfo_t ksi;
3713 struct sigio *sigio;
3714
3715 ksiginfo_init(&ksi);
3716 ksi.ksi_signo = sig;
3717 ksi.ksi_code = SI_KERNEL;
3718
3719 SIGIO_LOCK();
3720 sigio = *sigiop;
3721 if (sigio == NULL) {
3722 SIGIO_UNLOCK();
3723 return;
3724 }
3725 if (sigio->sio_pgid > 0) {
3726 PROC_LOCK(sigio->sio_proc);
3727 if (CANSIGIO(sigio->sio_ucred, sigio->sio_proc->p_ucred))
3728 kern_psignal(sigio->sio_proc, sig);
3729 PROC_UNLOCK(sigio->sio_proc);
3730 } else if (sigio->sio_pgid < 0) {
3731 struct proc *p;
3732
3733 PGRP_LOCK(sigio->sio_pgrp);
3734 LIST_FOREACH(p, &sigio->sio_pgrp->pg_members, p_pglist) {
3735 PROC_LOCK(p);
3736 if (p->p_state == PRS_NORMAL &&
3737 CANSIGIO(sigio->sio_ucred, p->p_ucred) &&
3738 (checkctty == 0 || (p->p_flag & P_CONTROLT)))
3739 kern_psignal(p, sig);
3740 PROC_UNLOCK(p);
3741 }
3742 PGRP_UNLOCK(sigio->sio_pgrp);
3743 }
3744 SIGIO_UNLOCK();
3745 }
3746
3747 static int
3748 filt_sigattach(struct knote *kn)
3749 {
3750 struct proc *p = curproc;
3751
3752 kn->kn_ptr.p_proc = p;
3753 kn->kn_flags |= EV_CLEAR; /* automatically set */
3754
3755 knlist_add(p->p_klist, kn, 0);
3756
3757 return (0);
3758 }
3759
3760 static void
3761 filt_sigdetach(struct knote *kn)
3762 {
3763 struct proc *p = kn->kn_ptr.p_proc;
3764
3765 knlist_remove(p->p_klist, kn, 0);
3766 }
3767
3768 /*
3769 * signal knotes are shared with proc knotes, so we apply a mask to
3770 * the hint in order to differentiate them from process hints. This
3771 * could be avoided by using a signal-specific knote list, but probably
3772 * isn't worth the trouble.
3773 */
3774 static int
3775 filt_signal(struct knote *kn, long hint)
3776 {
3777
3778 if (hint & NOTE_SIGNAL) {
3779 hint &= ~NOTE_SIGNAL;
3780
3781 if (kn->kn_id == hint)
3782 kn->kn_data++;
3783 }
3784 return (kn->kn_data != 0);
3785 }
3786
3787 struct sigacts *
3788 sigacts_alloc(void)
3789 {
3790 struct sigacts *ps;
3791
3792 ps = malloc(sizeof(struct sigacts), M_SUBPROC, M_WAITOK | M_ZERO);
3793 refcount_init(&ps->ps_refcnt, 1);
3794 mtx_init(&ps->ps_mtx, "sigacts", NULL, MTX_DEF);
3795 return (ps);
3796 }
3797
3798 void
3799 sigacts_free(struct sigacts *ps)
3800 {
3801
3802 if (refcount_release(&ps->ps_refcnt) == 0)
3803 return;
3804 mtx_destroy(&ps->ps_mtx);
3805 free(ps, M_SUBPROC);
3806 }
3807
3808 struct sigacts *
3809 sigacts_hold(struct sigacts *ps)
3810 {
3811
3812 refcount_acquire(&ps->ps_refcnt);
3813 return (ps);
3814 }
3815
3816 void
3817 sigacts_copy(struct sigacts *dest, struct sigacts *src)
3818 {
3819
3820 KASSERT(dest->ps_refcnt == 1, ("sigacts_copy to shared dest"));
3821 mtx_lock(&src->ps_mtx);
3822 bcopy(src, dest, offsetof(struct sigacts, ps_refcnt));
3823 mtx_unlock(&src->ps_mtx);
3824 }
3825
3826 int
3827 sigacts_shared(struct sigacts *ps)
3828 {
3829
3830 return (ps->ps_refcnt > 1);
3831 }
Cache object: b094bb1bfd149a71bae05f01d05f51e1
|