FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_sig.c
1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 1982, 1986, 1989, 1991, 1993
5 * The Regents of the University of California. All rights reserved.
6 * (c) UNIX System Laboratories, Inc.
7 * All or some portions of this file are derived from material licensed
8 * to the University of California by American Telephone and Telegraph
9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
10 * the permission of UNIX System Laboratories, Inc.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * @(#)kern_sig.c 8.7 (Berkeley) 4/18/94
37 */
38
39 #include <sys/cdefs.h>
40 __FBSDID("$FreeBSD$");
41
42 #include "opt_ktrace.h"
43
44 #include <sys/param.h>
45 #include <sys/ctype.h>
46 #include <sys/systm.h>
47 #include <sys/signalvar.h>
48 #include <sys/vnode.h>
49 #include <sys/acct.h>
50 #include <sys/bus.h>
51 #include <sys/capsicum.h>
52 #include <sys/compressor.h>
53 #include <sys/condvar.h>
54 #include <sys/event.h>
55 #include <sys/fcntl.h>
56 #include <sys/imgact.h>
57 #include <sys/kernel.h>
58 #include <sys/ktr.h>
59 #include <sys/ktrace.h>
60 #include <sys/limits.h>
61 #include <sys/lock.h>
62 #include <sys/malloc.h>
63 #include <sys/mutex.h>
64 #include <sys/refcount.h>
65 #include <sys/namei.h>
66 #include <sys/proc.h>
67 #include <sys/procdesc.h>
68 #include <sys/ptrace.h>
69 #include <sys/posix4.h>
70 #include <sys/pioctl.h>
71 #include <sys/racct.h>
72 #include <sys/resourcevar.h>
73 #include <sys/sdt.h>
74 #include <sys/sbuf.h>
75 #include <sys/sleepqueue.h>
76 #include <sys/smp.h>
77 #include <sys/stat.h>
78 #include <sys/sx.h>
79 #include <sys/syscallsubr.h>
80 #include <sys/sysctl.h>
81 #include <sys/sysent.h>
82 #include <sys/syslog.h>
83 #include <sys/sysproto.h>
84 #include <sys/timers.h>
85 #include <sys/unistd.h>
86 #include <sys/wait.h>
87 #include <vm/vm.h>
88 #include <vm/vm_extern.h>
89 #include <vm/uma.h>
90
91 #include <sys/jail.h>
92
93 #include <machine/cpu.h>
94
95 #include <security/audit/audit.h>
96
97 #define ONSIG 32 /* NSIG for osig* syscalls. XXX. */
98
99 SDT_PROVIDER_DECLARE(proc);
100 SDT_PROBE_DEFINE3(proc, , , signal__send,
101 "struct thread *", "struct proc *", "int");
102 SDT_PROBE_DEFINE2(proc, , , signal__clear,
103 "int", "ksiginfo_t *");
104 SDT_PROBE_DEFINE3(proc, , , signal__discard,
105 "struct thread *", "struct proc *", "int");
106
107 static int coredump(struct thread *);
108 static int killpg1(struct thread *td, int sig, int pgid, int all,
109 ksiginfo_t *ksi);
110 static int issignal(struct thread *td);
111 static int sigprop(int sig);
112 static void tdsigwakeup(struct thread *, int, sig_t, int);
113 static int sig_suspend_threads(struct thread *, struct proc *, int);
114 static int filt_sigattach(struct knote *kn);
115 static void filt_sigdetach(struct knote *kn);
116 static int filt_signal(struct knote *kn, long hint);
117 static struct thread *sigtd(struct proc *p, int sig, int prop);
118 static void sigqueue_start(void);
119
120 static uma_zone_t ksiginfo_zone = NULL;
121 struct filterops sig_filtops = {
122 .f_isfd = 0,
123 .f_attach = filt_sigattach,
124 .f_detach = filt_sigdetach,
125 .f_event = filt_signal,
126 };
127
128 static int kern_logsigexit = 1;
129 SYSCTL_INT(_kern, KERN_LOGSIGEXIT, logsigexit, CTLFLAG_RW,
130 &kern_logsigexit, 0,
131 "Log processes quitting on abnormal signals to syslog(3)");
132
133 static int kern_forcesigexit = 1;
134 SYSCTL_INT(_kern, OID_AUTO, forcesigexit, CTLFLAG_RW,
135 &kern_forcesigexit, 0, "Force trap signal to be handled");
136
137 static SYSCTL_NODE(_kern, OID_AUTO, sigqueue, CTLFLAG_RW, 0,
138 "POSIX real time signal");
139
140 static int max_pending_per_proc = 128;
141 SYSCTL_INT(_kern_sigqueue, OID_AUTO, max_pending_per_proc, CTLFLAG_RW,
142 &max_pending_per_proc, 0, "Max pending signals per proc");
143
144 static int preallocate_siginfo = 1024;
145 SYSCTL_INT(_kern_sigqueue, OID_AUTO, preallocate, CTLFLAG_RDTUN,
146 &preallocate_siginfo, 0, "Preallocated signal memory size");
147
148 static int signal_overflow = 0;
149 SYSCTL_INT(_kern_sigqueue, OID_AUTO, overflow, CTLFLAG_RD,
150 &signal_overflow, 0, "Number of signals overflew");
151
152 static int signal_alloc_fail = 0;
153 SYSCTL_INT(_kern_sigqueue, OID_AUTO, alloc_fail, CTLFLAG_RD,
154 &signal_alloc_fail, 0, "signals failed to be allocated");
155
156 static int kern_lognosys = 0;
157 SYSCTL_INT(_kern, OID_AUTO, lognosys, CTLFLAG_RWTUN, &kern_lognosys, 0,
158 "Log invalid syscalls");
159
160 SYSINIT(signal, SI_SUB_P1003_1B, SI_ORDER_FIRST+3, sigqueue_start, NULL);
161
162 /*
163 * Policy -- Can ucred cr1 send SIGIO to process cr2?
164 * Should use cr_cansignal() once cr_cansignal() allows SIGIO and SIGURG
165 * in the right situations.
166 */
167 #define CANSIGIO(cr1, cr2) \
168 ((cr1)->cr_uid == 0 || \
169 (cr1)->cr_ruid == (cr2)->cr_ruid || \
170 (cr1)->cr_uid == (cr2)->cr_ruid || \
171 (cr1)->cr_ruid == (cr2)->cr_uid || \
172 (cr1)->cr_uid == (cr2)->cr_uid)
173
174 static int sugid_coredump;
175 SYSCTL_INT(_kern, OID_AUTO, sugid_coredump, CTLFLAG_RWTUN,
176 &sugid_coredump, 0, "Allow setuid and setgid processes to dump core");
177
178 static int capmode_coredump;
179 SYSCTL_INT(_kern, OID_AUTO, capmode_coredump, CTLFLAG_RWTUN,
180 &capmode_coredump, 0, "Allow processes in capability mode to dump core");
181
182 static int do_coredump = 1;
183 SYSCTL_INT(_kern, OID_AUTO, coredump, CTLFLAG_RW,
184 &do_coredump, 0, "Enable/Disable coredumps");
185
186 static int set_core_nodump_flag = 0;
187 SYSCTL_INT(_kern, OID_AUTO, nodump_coredump, CTLFLAG_RW, &set_core_nodump_flag,
188 0, "Enable setting the NODUMP flag on coredump files");
189
190 static int coredump_devctl = 0;
191 SYSCTL_INT(_kern, OID_AUTO, coredump_devctl, CTLFLAG_RW, &coredump_devctl,
192 0, "Generate a devctl notification when processes coredump");
193
194 /*
195 * Signal properties and actions.
196 * The array below categorizes the signals and their default actions
197 * according to the following properties:
198 */
199 #define SIGPROP_KILL 0x01 /* terminates process by default */
200 #define SIGPROP_CORE 0x02 /* ditto and coredumps */
201 #define SIGPROP_STOP 0x04 /* suspend process */
202 #define SIGPROP_TTYSTOP 0x08 /* ditto, from tty */
203 #define SIGPROP_IGNORE 0x10 /* ignore by default */
204 #define SIGPROP_CONT 0x20 /* continue if suspended */
205 #define SIGPROP_CANTMASK 0x40 /* non-maskable, catchable */
206
207 static int sigproptbl[NSIG] = {
208 [SIGHUP] = SIGPROP_KILL,
209 [SIGINT] = SIGPROP_KILL,
210 [SIGQUIT] = SIGPROP_KILL | SIGPROP_CORE,
211 [SIGILL] = SIGPROP_KILL | SIGPROP_CORE,
212 [SIGTRAP] = SIGPROP_KILL | SIGPROP_CORE,
213 [SIGABRT] = SIGPROP_KILL | SIGPROP_CORE,
214 [SIGEMT] = SIGPROP_KILL | SIGPROP_CORE,
215 [SIGFPE] = SIGPROP_KILL | SIGPROP_CORE,
216 [SIGKILL] = SIGPROP_KILL,
217 [SIGBUS] = SIGPROP_KILL | SIGPROP_CORE,
218 [SIGSEGV] = SIGPROP_KILL | SIGPROP_CORE,
219 [SIGSYS] = SIGPROP_KILL | SIGPROP_CORE,
220 [SIGPIPE] = SIGPROP_KILL,
221 [SIGALRM] = SIGPROP_KILL,
222 [SIGTERM] = SIGPROP_KILL,
223 [SIGURG] = SIGPROP_IGNORE,
224 [SIGSTOP] = SIGPROP_STOP,
225 [SIGTSTP] = SIGPROP_STOP | SIGPROP_TTYSTOP,
226 [SIGCONT] = SIGPROP_IGNORE | SIGPROP_CONT,
227 [SIGCHLD] = SIGPROP_IGNORE,
228 [SIGTTIN] = SIGPROP_STOP | SIGPROP_TTYSTOP,
229 [SIGTTOU] = SIGPROP_STOP | SIGPROP_TTYSTOP,
230 [SIGIO] = SIGPROP_IGNORE,
231 [SIGXCPU] = SIGPROP_KILL,
232 [SIGXFSZ] = SIGPROP_KILL,
233 [SIGVTALRM] = SIGPROP_KILL,
234 [SIGPROF] = SIGPROP_KILL,
235 [SIGWINCH] = SIGPROP_IGNORE,
236 [SIGINFO] = SIGPROP_IGNORE,
237 [SIGUSR1] = SIGPROP_KILL,
238 [SIGUSR2] = SIGPROP_KILL,
239 };
240
241 static void reschedule_signals(struct proc *p, sigset_t block, int flags);
242
243 static void
244 sigqueue_start(void)
245 {
246 ksiginfo_zone = uma_zcreate("ksiginfo", sizeof(ksiginfo_t),
247 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
248 uma_prealloc(ksiginfo_zone, preallocate_siginfo);
249 p31b_setcfg(CTL_P1003_1B_REALTIME_SIGNALS, _POSIX_REALTIME_SIGNALS);
250 p31b_setcfg(CTL_P1003_1B_RTSIG_MAX, SIGRTMAX - SIGRTMIN + 1);
251 p31b_setcfg(CTL_P1003_1B_SIGQUEUE_MAX, max_pending_per_proc);
252 }
253
254 ksiginfo_t *
255 ksiginfo_alloc(int wait)
256 {
257 int flags;
258
259 flags = M_ZERO;
260 if (! wait)
261 flags |= M_NOWAIT;
262 if (ksiginfo_zone != NULL)
263 return ((ksiginfo_t *)uma_zalloc(ksiginfo_zone, flags));
264 return (NULL);
265 }
266
267 void
268 ksiginfo_free(ksiginfo_t *ksi)
269 {
270 uma_zfree(ksiginfo_zone, ksi);
271 }
272
273 static __inline int
274 ksiginfo_tryfree(ksiginfo_t *ksi)
275 {
276 if (!(ksi->ksi_flags & KSI_EXT)) {
277 uma_zfree(ksiginfo_zone, ksi);
278 return (1);
279 }
280 return (0);
281 }
282
283 void
284 sigqueue_init(sigqueue_t *list, struct proc *p)
285 {
286 SIGEMPTYSET(list->sq_signals);
287 SIGEMPTYSET(list->sq_kill);
288 SIGEMPTYSET(list->sq_ptrace);
289 TAILQ_INIT(&list->sq_list);
290 list->sq_proc = p;
291 list->sq_flags = SQ_INIT;
292 }
293
294 /*
295 * Get a signal's ksiginfo.
296 * Return:
297 * 0 - signal not found
298 * others - signal number
299 */
300 static int
301 sigqueue_get(sigqueue_t *sq, int signo, ksiginfo_t *si)
302 {
303 struct proc *p = sq->sq_proc;
304 struct ksiginfo *ksi, *next;
305 int count = 0;
306
307 KASSERT(sq->sq_flags & SQ_INIT, ("sigqueue not inited"));
308
309 if (!SIGISMEMBER(sq->sq_signals, signo))
310 return (0);
311
312 if (SIGISMEMBER(sq->sq_ptrace, signo)) {
313 count++;
314 SIGDELSET(sq->sq_ptrace, signo);
315 si->ksi_flags |= KSI_PTRACE;
316 }
317 if (SIGISMEMBER(sq->sq_kill, signo)) {
318 count++;
319 if (count == 1)
320 SIGDELSET(sq->sq_kill, signo);
321 }
322
323 TAILQ_FOREACH_SAFE(ksi, &sq->sq_list, ksi_link, next) {
324 if (ksi->ksi_signo == signo) {
325 if (count == 0) {
326 TAILQ_REMOVE(&sq->sq_list, ksi, ksi_link);
327 ksi->ksi_sigq = NULL;
328 ksiginfo_copy(ksi, si);
329 if (ksiginfo_tryfree(ksi) && p != NULL)
330 p->p_pendingcnt--;
331 }
332 if (++count > 1)
333 break;
334 }
335 }
336
337 if (count <= 1)
338 SIGDELSET(sq->sq_signals, signo);
339 si->ksi_signo = signo;
340 return (signo);
341 }
342
343 void
344 sigqueue_take(ksiginfo_t *ksi)
345 {
346 struct ksiginfo *kp;
347 struct proc *p;
348 sigqueue_t *sq;
349
350 if (ksi == NULL || (sq = ksi->ksi_sigq) == NULL)
351 return;
352
353 p = sq->sq_proc;
354 TAILQ_REMOVE(&sq->sq_list, ksi, ksi_link);
355 ksi->ksi_sigq = NULL;
356 if (!(ksi->ksi_flags & KSI_EXT) && p != NULL)
357 p->p_pendingcnt--;
358
359 for (kp = TAILQ_FIRST(&sq->sq_list); kp != NULL;
360 kp = TAILQ_NEXT(kp, ksi_link)) {
361 if (kp->ksi_signo == ksi->ksi_signo)
362 break;
363 }
364 if (kp == NULL && !SIGISMEMBER(sq->sq_kill, ksi->ksi_signo) &&
365 !SIGISMEMBER(sq->sq_ptrace, ksi->ksi_signo))
366 SIGDELSET(sq->sq_signals, ksi->ksi_signo);
367 }
368
369 static int
370 sigqueue_add(sigqueue_t *sq, int signo, ksiginfo_t *si)
371 {
372 struct proc *p = sq->sq_proc;
373 struct ksiginfo *ksi;
374 int ret = 0;
375
376 KASSERT(sq->sq_flags & SQ_INIT, ("sigqueue not inited"));
377
378 /*
379 * SIGKILL/SIGSTOP cannot be caught or masked, so take the fast path
380 * for these signals.
381 */
382 if (signo == SIGKILL || signo == SIGSTOP || si == NULL) {
383 SIGADDSET(sq->sq_kill, signo);
384 goto out_set_bit;
385 }
386
387 /* directly insert the ksi, don't copy it */
388 if (si->ksi_flags & KSI_INS) {
389 if (si->ksi_flags & KSI_HEAD)
390 TAILQ_INSERT_HEAD(&sq->sq_list, si, ksi_link);
391 else
392 TAILQ_INSERT_TAIL(&sq->sq_list, si, ksi_link);
393 si->ksi_sigq = sq;
394 goto out_set_bit;
395 }
396
397 if (__predict_false(ksiginfo_zone == NULL)) {
398 SIGADDSET(sq->sq_kill, signo);
399 goto out_set_bit;
400 }
401
402 if (p != NULL && p->p_pendingcnt >= max_pending_per_proc) {
403 signal_overflow++;
404 ret = EAGAIN;
405 } else if ((ksi = ksiginfo_alloc(0)) == NULL) {
406 signal_alloc_fail++;
407 ret = EAGAIN;
408 } else {
409 if (p != NULL)
410 p->p_pendingcnt++;
411 ksiginfo_copy(si, ksi);
412 ksi->ksi_signo = signo;
413 if (si->ksi_flags & KSI_HEAD)
414 TAILQ_INSERT_HEAD(&sq->sq_list, ksi, ksi_link);
415 else
416 TAILQ_INSERT_TAIL(&sq->sq_list, ksi, ksi_link);
417 ksi->ksi_sigq = sq;
418 }
419
420 if (ret != 0) {
421 if ((si->ksi_flags & KSI_PTRACE) != 0) {
422 SIGADDSET(sq->sq_ptrace, signo);
423 ret = 0;
424 goto out_set_bit;
425 } else if ((si->ksi_flags & KSI_TRAP) != 0 ||
426 (si->ksi_flags & KSI_SIGQ) == 0) {
427 SIGADDSET(sq->sq_kill, signo);
428 ret = 0;
429 goto out_set_bit;
430 }
431 return (ret);
432 }
433
434 out_set_bit:
435 SIGADDSET(sq->sq_signals, signo);
436 return (ret);
437 }
438
439 void
440 sigqueue_flush(sigqueue_t *sq)
441 {
442 struct proc *p = sq->sq_proc;
443 ksiginfo_t *ksi;
444
445 KASSERT(sq->sq_flags & SQ_INIT, ("sigqueue not inited"));
446
447 if (p != NULL)
448 PROC_LOCK_ASSERT(p, MA_OWNED);
449
450 while ((ksi = TAILQ_FIRST(&sq->sq_list)) != NULL) {
451 TAILQ_REMOVE(&sq->sq_list, ksi, ksi_link);
452 ksi->ksi_sigq = NULL;
453 if (ksiginfo_tryfree(ksi) && p != NULL)
454 p->p_pendingcnt--;
455 }
456
457 SIGEMPTYSET(sq->sq_signals);
458 SIGEMPTYSET(sq->sq_kill);
459 SIGEMPTYSET(sq->sq_ptrace);
460 }
461
462 static void
463 sigqueue_move_set(sigqueue_t *src, sigqueue_t *dst, const sigset_t *set)
464 {
465 sigset_t tmp;
466 struct proc *p1, *p2;
467 ksiginfo_t *ksi, *next;
468
469 KASSERT(src->sq_flags & SQ_INIT, ("src sigqueue not inited"));
470 KASSERT(dst->sq_flags & SQ_INIT, ("dst sigqueue not inited"));
471 p1 = src->sq_proc;
472 p2 = dst->sq_proc;
473 /* Move siginfo to target list */
474 TAILQ_FOREACH_SAFE(ksi, &src->sq_list, ksi_link, next) {
475 if (SIGISMEMBER(*set, ksi->ksi_signo)) {
476 TAILQ_REMOVE(&src->sq_list, ksi, ksi_link);
477 if (p1 != NULL)
478 p1->p_pendingcnt--;
479 TAILQ_INSERT_TAIL(&dst->sq_list, ksi, ksi_link);
480 ksi->ksi_sigq = dst;
481 if (p2 != NULL)
482 p2->p_pendingcnt++;
483 }
484 }
485
486 /* Move pending bits to target list */
487 tmp = src->sq_kill;
488 SIGSETAND(tmp, *set);
489 SIGSETOR(dst->sq_kill, tmp);
490 SIGSETNAND(src->sq_kill, tmp);
491
492 tmp = src->sq_ptrace;
493 SIGSETAND(tmp, *set);
494 SIGSETOR(dst->sq_ptrace, tmp);
495 SIGSETNAND(src->sq_ptrace, tmp);
496
497 tmp = src->sq_signals;
498 SIGSETAND(tmp, *set);
499 SIGSETOR(dst->sq_signals, tmp);
500 SIGSETNAND(src->sq_signals, tmp);
501 }
502
503 #if 0
504 static void
505 sigqueue_move(sigqueue_t *src, sigqueue_t *dst, int signo)
506 {
507 sigset_t set;
508
509 SIGEMPTYSET(set);
510 SIGADDSET(set, signo);
511 sigqueue_move_set(src, dst, &set);
512 }
513 #endif
514
515 static void
516 sigqueue_delete_set(sigqueue_t *sq, const sigset_t *set)
517 {
518 struct proc *p = sq->sq_proc;
519 ksiginfo_t *ksi, *next;
520
521 KASSERT(sq->sq_flags & SQ_INIT, ("src sigqueue not inited"));
522
523 /* Remove siginfo queue */
524 TAILQ_FOREACH_SAFE(ksi, &sq->sq_list, ksi_link, next) {
525 if (SIGISMEMBER(*set, ksi->ksi_signo)) {
526 TAILQ_REMOVE(&sq->sq_list, ksi, ksi_link);
527 ksi->ksi_sigq = NULL;
528 if (ksiginfo_tryfree(ksi) && p != NULL)
529 p->p_pendingcnt--;
530 }
531 }
532 SIGSETNAND(sq->sq_kill, *set);
533 SIGSETNAND(sq->sq_ptrace, *set);
534 SIGSETNAND(sq->sq_signals, *set);
535 }
536
537 void
538 sigqueue_delete(sigqueue_t *sq, int signo)
539 {
540 sigset_t set;
541
542 SIGEMPTYSET(set);
543 SIGADDSET(set, signo);
544 sigqueue_delete_set(sq, &set);
545 }
546
547 /* Remove a set of signals for a process */
548 static void
549 sigqueue_delete_set_proc(struct proc *p, const sigset_t *set)
550 {
551 sigqueue_t worklist;
552 struct thread *td0;
553
554 PROC_LOCK_ASSERT(p, MA_OWNED);
555
556 sigqueue_init(&worklist, NULL);
557 sigqueue_move_set(&p->p_sigqueue, &worklist, set);
558
559 FOREACH_THREAD_IN_PROC(p, td0)
560 sigqueue_move_set(&td0->td_sigqueue, &worklist, set);
561
562 sigqueue_flush(&worklist);
563 }
564
565 void
566 sigqueue_delete_proc(struct proc *p, int signo)
567 {
568 sigset_t set;
569
570 SIGEMPTYSET(set);
571 SIGADDSET(set, signo);
572 sigqueue_delete_set_proc(p, &set);
573 }
574
575 static void
576 sigqueue_delete_stopmask_proc(struct proc *p)
577 {
578 sigset_t set;
579
580 SIGEMPTYSET(set);
581 SIGADDSET(set, SIGSTOP);
582 SIGADDSET(set, SIGTSTP);
583 SIGADDSET(set, SIGTTIN);
584 SIGADDSET(set, SIGTTOU);
585 sigqueue_delete_set_proc(p, &set);
586 }
587
588 /*
589 * Determine signal that should be delivered to thread td, the current
590 * thread, 0 if none. If there is a pending stop signal with default
591 * action, the process stops in issignal().
592 */
593 int
594 cursig(struct thread *td)
595 {
596 PROC_LOCK_ASSERT(td->td_proc, MA_OWNED);
597 mtx_assert(&td->td_proc->p_sigacts->ps_mtx, MA_OWNED);
598 THREAD_LOCK_ASSERT(td, MA_NOTOWNED);
599 return (SIGPENDING(td) ? issignal(td) : 0);
600 }
601
602 /*
603 * Arrange for ast() to handle unmasked pending signals on return to user
604 * mode. This must be called whenever a signal is added to td_sigqueue or
605 * unmasked in td_sigmask.
606 */
607 void
608 signotify(struct thread *td)
609 {
610
611 PROC_LOCK_ASSERT(td->td_proc, MA_OWNED);
612
613 if (SIGPENDING(td)) {
614 thread_lock(td);
615 td->td_flags |= TDF_NEEDSIGCHK | TDF_ASTPENDING;
616 thread_unlock(td);
617 }
618 }
619
620 /*
621 * Returns 1 (true) if altstack is configured for the thread, and the
622 * passed stack bottom address falls into the altstack range. Handles
623 * the 43 compat special case where the alt stack size is zero.
624 */
625 int
626 sigonstack(size_t sp)
627 {
628 struct thread *td;
629
630 td = curthread;
631 if ((td->td_pflags & TDP_ALTSTACK) == 0)
632 return (0);
633 #if defined(COMPAT_43)
634 if (SV_PROC_FLAG(td->td_proc, SV_AOUT) && td->td_sigstk.ss_size == 0)
635 return ((td->td_sigstk.ss_flags & SS_ONSTACK) != 0);
636 #endif
637 return (sp >= (size_t)td->td_sigstk.ss_sp &&
638 sp < td->td_sigstk.ss_size + (size_t)td->td_sigstk.ss_sp);
639 }
640
641 static __inline int
642 sigprop(int sig)
643 {
644
645 if (sig > 0 && sig < nitems(sigproptbl))
646 return (sigproptbl[sig]);
647 return (0);
648 }
649
650 int
651 sig_ffs(sigset_t *set)
652 {
653 int i;
654
655 for (i = 0; i < _SIG_WORDS; i++)
656 if (set->__bits[i])
657 return (ffs(set->__bits[i]) + (i * 32));
658 return (0);
659 }
660
661 static bool
662 sigact_flag_test(const struct sigaction *act, int flag)
663 {
664
665 /*
666 * SA_SIGINFO is reset when signal disposition is set to
667 * ignore or default. Other flags are kept according to user
668 * settings.
669 */
670 return ((act->sa_flags & flag) != 0 && (flag != SA_SIGINFO ||
671 ((__sighandler_t *)act->sa_sigaction != SIG_IGN &&
672 (__sighandler_t *)act->sa_sigaction != SIG_DFL)));
673 }
674
675 /*
676 * kern_sigaction
677 * sigaction
678 * freebsd4_sigaction
679 * osigaction
680 */
681 int
682 kern_sigaction(struct thread *td, int sig, const struct sigaction *act,
683 struct sigaction *oact, int flags)
684 {
685 struct sigacts *ps;
686 struct proc *p = td->td_proc;
687
688 if (!_SIG_VALID(sig))
689 return (EINVAL);
690 if (act != NULL && act->sa_handler != SIG_DFL &&
691 act->sa_handler != SIG_IGN && (act->sa_flags & ~(SA_ONSTACK |
692 SA_RESTART | SA_RESETHAND | SA_NOCLDSTOP | SA_NODEFER |
693 SA_NOCLDWAIT | SA_SIGINFO)) != 0)
694 return (EINVAL);
695
696 PROC_LOCK(p);
697 ps = p->p_sigacts;
698 mtx_lock(&ps->ps_mtx);
699 if (oact) {
700 memset(oact, 0, sizeof(*oact));
701 oact->sa_mask = ps->ps_catchmask[_SIG_IDX(sig)];
702 if (SIGISMEMBER(ps->ps_sigonstack, sig))
703 oact->sa_flags |= SA_ONSTACK;
704 if (!SIGISMEMBER(ps->ps_sigintr, sig))
705 oact->sa_flags |= SA_RESTART;
706 if (SIGISMEMBER(ps->ps_sigreset, sig))
707 oact->sa_flags |= SA_RESETHAND;
708 if (SIGISMEMBER(ps->ps_signodefer, sig))
709 oact->sa_flags |= SA_NODEFER;
710 if (SIGISMEMBER(ps->ps_siginfo, sig)) {
711 oact->sa_flags |= SA_SIGINFO;
712 oact->sa_sigaction =
713 (__siginfohandler_t *)ps->ps_sigact[_SIG_IDX(sig)];
714 } else
715 oact->sa_handler = ps->ps_sigact[_SIG_IDX(sig)];
716 if (sig == SIGCHLD && ps->ps_flag & PS_NOCLDSTOP)
717 oact->sa_flags |= SA_NOCLDSTOP;
718 if (sig == SIGCHLD && ps->ps_flag & PS_NOCLDWAIT)
719 oact->sa_flags |= SA_NOCLDWAIT;
720 }
721 if (act) {
722 if ((sig == SIGKILL || sig == SIGSTOP) &&
723 act->sa_handler != SIG_DFL) {
724 mtx_unlock(&ps->ps_mtx);
725 PROC_UNLOCK(p);
726 return (EINVAL);
727 }
728
729 /*
730 * Change setting atomically.
731 */
732
733 ps->ps_catchmask[_SIG_IDX(sig)] = act->sa_mask;
734 SIG_CANTMASK(ps->ps_catchmask[_SIG_IDX(sig)]);
735 if (sigact_flag_test(act, SA_SIGINFO)) {
736 ps->ps_sigact[_SIG_IDX(sig)] =
737 (__sighandler_t *)act->sa_sigaction;
738 SIGADDSET(ps->ps_siginfo, sig);
739 } else {
740 ps->ps_sigact[_SIG_IDX(sig)] = act->sa_handler;
741 SIGDELSET(ps->ps_siginfo, sig);
742 }
743 if (!sigact_flag_test(act, SA_RESTART))
744 SIGADDSET(ps->ps_sigintr, sig);
745 else
746 SIGDELSET(ps->ps_sigintr, sig);
747 if (sigact_flag_test(act, SA_ONSTACK))
748 SIGADDSET(ps->ps_sigonstack, sig);
749 else
750 SIGDELSET(ps->ps_sigonstack, sig);
751 if (sigact_flag_test(act, SA_RESETHAND))
752 SIGADDSET(ps->ps_sigreset, sig);
753 else
754 SIGDELSET(ps->ps_sigreset, sig);
755 if (sigact_flag_test(act, SA_NODEFER))
756 SIGADDSET(ps->ps_signodefer, sig);
757 else
758 SIGDELSET(ps->ps_signodefer, sig);
759 if (sig == SIGCHLD) {
760 if (act->sa_flags & SA_NOCLDSTOP)
761 ps->ps_flag |= PS_NOCLDSTOP;
762 else
763 ps->ps_flag &= ~PS_NOCLDSTOP;
764 if (act->sa_flags & SA_NOCLDWAIT) {
765 /*
766 * Paranoia: since SA_NOCLDWAIT is implemented
767 * by reparenting the dying child to PID 1 (and
768 * trust it to reap the zombie), PID 1 itself
769 * is forbidden to set SA_NOCLDWAIT.
770 */
771 if (p->p_pid == 1)
772 ps->ps_flag &= ~PS_NOCLDWAIT;
773 else
774 ps->ps_flag |= PS_NOCLDWAIT;
775 } else
776 ps->ps_flag &= ~PS_NOCLDWAIT;
777 if (ps->ps_sigact[_SIG_IDX(SIGCHLD)] == SIG_IGN)
778 ps->ps_flag |= PS_CLDSIGIGN;
779 else
780 ps->ps_flag &= ~PS_CLDSIGIGN;
781 }
782 /*
783 * Set bit in ps_sigignore for signals that are set to SIG_IGN,
784 * and for signals set to SIG_DFL where the default is to
785 * ignore. However, don't put SIGCONT in ps_sigignore, as we
786 * have to restart the process.
787 */
788 if (ps->ps_sigact[_SIG_IDX(sig)] == SIG_IGN ||
789 (sigprop(sig) & SIGPROP_IGNORE &&
790 ps->ps_sigact[_SIG_IDX(sig)] == SIG_DFL)) {
791 /* never to be seen again */
792 sigqueue_delete_proc(p, sig);
793 if (sig != SIGCONT)
794 /* easier in psignal */
795 SIGADDSET(ps->ps_sigignore, sig);
796 SIGDELSET(ps->ps_sigcatch, sig);
797 } else {
798 SIGDELSET(ps->ps_sigignore, sig);
799 if (ps->ps_sigact[_SIG_IDX(sig)] == SIG_DFL)
800 SIGDELSET(ps->ps_sigcatch, sig);
801 else
802 SIGADDSET(ps->ps_sigcatch, sig);
803 }
804 #ifdef COMPAT_FREEBSD4
805 if (ps->ps_sigact[_SIG_IDX(sig)] == SIG_IGN ||
806 ps->ps_sigact[_SIG_IDX(sig)] == SIG_DFL ||
807 (flags & KSA_FREEBSD4) == 0)
808 SIGDELSET(ps->ps_freebsd4, sig);
809 else
810 SIGADDSET(ps->ps_freebsd4, sig);
811 #endif
812 #ifdef COMPAT_43
813 if (ps->ps_sigact[_SIG_IDX(sig)] == SIG_IGN ||
814 ps->ps_sigact[_SIG_IDX(sig)] == SIG_DFL ||
815 (flags & KSA_OSIGSET) == 0)
816 SIGDELSET(ps->ps_osigset, sig);
817 else
818 SIGADDSET(ps->ps_osigset, sig);
819 #endif
820 }
821 mtx_unlock(&ps->ps_mtx);
822 PROC_UNLOCK(p);
823 return (0);
824 }
825
826 #ifndef _SYS_SYSPROTO_H_
827 struct sigaction_args {
828 int sig;
829 struct sigaction *act;
830 struct sigaction *oact;
831 };
832 #endif
833 int
834 sys_sigaction(struct thread *td, struct sigaction_args *uap)
835 {
836 struct sigaction act, oact;
837 struct sigaction *actp, *oactp;
838 int error;
839
840 actp = (uap->act != NULL) ? &act : NULL;
841 oactp = (uap->oact != NULL) ? &oact : NULL;
842 if (actp) {
843 error = copyin(uap->act, actp, sizeof(act));
844 if (error)
845 return (error);
846 }
847 error = kern_sigaction(td, uap->sig, actp, oactp, 0);
848 if (oactp && !error)
849 error = copyout(oactp, uap->oact, sizeof(oact));
850 return (error);
851 }
852
853 #ifdef COMPAT_FREEBSD4
854 #ifndef _SYS_SYSPROTO_H_
855 struct freebsd4_sigaction_args {
856 int sig;
857 struct sigaction *act;
858 struct sigaction *oact;
859 };
860 #endif
861 int
862 freebsd4_sigaction(struct thread *td, struct freebsd4_sigaction_args *uap)
863 {
864 struct sigaction act, oact;
865 struct sigaction *actp, *oactp;
866 int error;
867
868
869 actp = (uap->act != NULL) ? &act : NULL;
870 oactp = (uap->oact != NULL) ? &oact : NULL;
871 if (actp) {
872 error = copyin(uap->act, actp, sizeof(act));
873 if (error)
874 return (error);
875 }
876 error = kern_sigaction(td, uap->sig, actp, oactp, KSA_FREEBSD4);
877 if (oactp && !error)
878 error = copyout(oactp, uap->oact, sizeof(oact));
879 return (error);
880 }
881 #endif /* COMAPT_FREEBSD4 */
882
883 #ifdef COMPAT_43 /* XXX - COMPAT_FBSD3 */
884 #ifndef _SYS_SYSPROTO_H_
885 struct osigaction_args {
886 int signum;
887 struct osigaction *nsa;
888 struct osigaction *osa;
889 };
890 #endif
891 int
892 osigaction(struct thread *td, struct osigaction_args *uap)
893 {
894 struct osigaction sa;
895 struct sigaction nsa, osa;
896 struct sigaction *nsap, *osap;
897 int error;
898
899 if (uap->signum <= 0 || uap->signum >= ONSIG)
900 return (EINVAL);
901
902 nsap = (uap->nsa != NULL) ? &nsa : NULL;
903 osap = (uap->osa != NULL) ? &osa : NULL;
904
905 if (nsap) {
906 error = copyin(uap->nsa, &sa, sizeof(sa));
907 if (error)
908 return (error);
909 nsap->sa_handler = sa.sa_handler;
910 nsap->sa_flags = sa.sa_flags;
911 OSIG2SIG(sa.sa_mask, nsap->sa_mask);
912 }
913 error = kern_sigaction(td, uap->signum, nsap, osap, KSA_OSIGSET);
914 if (osap && !error) {
915 sa.sa_handler = osap->sa_handler;
916 sa.sa_flags = osap->sa_flags;
917 SIG2OSIG(osap->sa_mask, sa.sa_mask);
918 error = copyout(&sa, uap->osa, sizeof(sa));
919 }
920 return (error);
921 }
922
923 #if !defined(__i386__)
924 /* Avoid replicating the same stub everywhere */
925 int
926 osigreturn(struct thread *td, struct osigreturn_args *uap)
927 {
928
929 return (nosys(td, (struct nosys_args *)uap));
930 }
931 #endif
932 #endif /* COMPAT_43 */
933
934 /*
935 * Initialize signal state for process 0;
936 * set to ignore signals that are ignored by default.
937 */
938 void
939 siginit(struct proc *p)
940 {
941 int i;
942 struct sigacts *ps;
943
944 PROC_LOCK(p);
945 ps = p->p_sigacts;
946 mtx_lock(&ps->ps_mtx);
947 for (i = 1; i <= NSIG; i++) {
948 if (sigprop(i) & SIGPROP_IGNORE && i != SIGCONT) {
949 SIGADDSET(ps->ps_sigignore, i);
950 }
951 }
952 mtx_unlock(&ps->ps_mtx);
953 PROC_UNLOCK(p);
954 }
955
956 /*
957 * Reset specified signal to the default disposition.
958 */
959 static void
960 sigdflt(struct sigacts *ps, int sig)
961 {
962
963 mtx_assert(&ps->ps_mtx, MA_OWNED);
964 SIGDELSET(ps->ps_sigcatch, sig);
965 if ((sigprop(sig) & SIGPROP_IGNORE) != 0 && sig != SIGCONT)
966 SIGADDSET(ps->ps_sigignore, sig);
967 ps->ps_sigact[_SIG_IDX(sig)] = SIG_DFL;
968 SIGDELSET(ps->ps_siginfo, sig);
969 }
970
971 /*
972 * Reset signals for an exec of the specified process.
973 */
974 void
975 execsigs(struct proc *p)
976 {
977 sigset_t osigignore;
978 struct sigacts *ps;
979 int sig;
980 struct thread *td;
981
982 /*
983 * Reset caught signals. Held signals remain held
984 * through td_sigmask (unless they were caught,
985 * and are now ignored by default).
986 */
987 PROC_LOCK_ASSERT(p, MA_OWNED);
988 ps = p->p_sigacts;
989 mtx_lock(&ps->ps_mtx);
990 sig_drop_caught(p);
991
992 /*
993 * As CloudABI processes cannot modify signal handlers, fully
994 * reset all signals to their default behavior. Do ignore
995 * SIGPIPE, as it would otherwise be impossible to recover from
996 * writes to broken pipes and sockets.
997 */
998 if (SV_PROC_ABI(p) == SV_ABI_CLOUDABI) {
999 osigignore = ps->ps_sigignore;
1000 while (SIGNOTEMPTY(osigignore)) {
1001 sig = sig_ffs(&osigignore);
1002 SIGDELSET(osigignore, sig);
1003 if (sig != SIGPIPE)
1004 sigdflt(ps, sig);
1005 }
1006 SIGADDSET(ps->ps_sigignore, SIGPIPE);
1007 }
1008
1009 /*
1010 * Reset stack state to the user stack.
1011 * Clear set of signals caught on the signal stack.
1012 */
1013 td = curthread;
1014 MPASS(td->td_proc == p);
1015 td->td_sigstk.ss_flags = SS_DISABLE;
1016 td->td_sigstk.ss_size = 0;
1017 td->td_sigstk.ss_sp = 0;
1018 td->td_pflags &= ~TDP_ALTSTACK;
1019 /*
1020 * Reset no zombies if child dies flag as Solaris does.
1021 */
1022 ps->ps_flag &= ~(PS_NOCLDWAIT | PS_CLDSIGIGN);
1023 if (ps->ps_sigact[_SIG_IDX(SIGCHLD)] == SIG_IGN)
1024 ps->ps_sigact[_SIG_IDX(SIGCHLD)] = SIG_DFL;
1025 mtx_unlock(&ps->ps_mtx);
1026 }
1027
1028 /*
1029 * kern_sigprocmask()
1030 *
1031 * Manipulate signal mask.
1032 */
1033 int
1034 kern_sigprocmask(struct thread *td, int how, sigset_t *set, sigset_t *oset,
1035 int flags)
1036 {
1037 sigset_t new_block, oset1;
1038 struct proc *p;
1039 int error;
1040
1041 p = td->td_proc;
1042 if ((flags & SIGPROCMASK_PROC_LOCKED) != 0)
1043 PROC_LOCK_ASSERT(p, MA_OWNED);
1044 else
1045 PROC_LOCK(p);
1046 mtx_assert(&p->p_sigacts->ps_mtx, (flags & SIGPROCMASK_PS_LOCKED) != 0
1047 ? MA_OWNED : MA_NOTOWNED);
1048 if (oset != NULL)
1049 *oset = td->td_sigmask;
1050
1051 error = 0;
1052 if (set != NULL) {
1053 switch (how) {
1054 case SIG_BLOCK:
1055 SIG_CANTMASK(*set);
1056 oset1 = td->td_sigmask;
1057 SIGSETOR(td->td_sigmask, *set);
1058 new_block = td->td_sigmask;
1059 SIGSETNAND(new_block, oset1);
1060 break;
1061 case SIG_UNBLOCK:
1062 SIGSETNAND(td->td_sigmask, *set);
1063 signotify(td);
1064 goto out;
1065 case SIG_SETMASK:
1066 SIG_CANTMASK(*set);
1067 oset1 = td->td_sigmask;
1068 if (flags & SIGPROCMASK_OLD)
1069 SIGSETLO(td->td_sigmask, *set);
1070 else
1071 td->td_sigmask = *set;
1072 new_block = td->td_sigmask;
1073 SIGSETNAND(new_block, oset1);
1074 signotify(td);
1075 break;
1076 default:
1077 error = EINVAL;
1078 goto out;
1079 }
1080
1081 /*
1082 * The new_block set contains signals that were not previously
1083 * blocked, but are blocked now.
1084 *
1085 * In case we block any signal that was not previously blocked
1086 * for td, and process has the signal pending, try to schedule
1087 * signal delivery to some thread that does not block the
1088 * signal, possibly waking it up.
1089 */
1090 if (p->p_numthreads != 1)
1091 reschedule_signals(p, new_block, flags);
1092 }
1093
1094 out:
1095 if (!(flags & SIGPROCMASK_PROC_LOCKED))
1096 PROC_UNLOCK(p);
1097 return (error);
1098 }
1099
1100 #ifndef _SYS_SYSPROTO_H_
1101 struct sigprocmask_args {
1102 int how;
1103 const sigset_t *set;
1104 sigset_t *oset;
1105 };
1106 #endif
1107 int
1108 sys_sigprocmask(struct thread *td, struct sigprocmask_args *uap)
1109 {
1110 sigset_t set, oset;
1111 sigset_t *setp, *osetp;
1112 int error;
1113
1114 setp = (uap->set != NULL) ? &set : NULL;
1115 osetp = (uap->oset != NULL) ? &oset : NULL;
1116 if (setp) {
1117 error = copyin(uap->set, setp, sizeof(set));
1118 if (error)
1119 return (error);
1120 }
1121 error = kern_sigprocmask(td, uap->how, setp, osetp, 0);
1122 if (osetp && !error) {
1123 error = copyout(osetp, uap->oset, sizeof(oset));
1124 }
1125 return (error);
1126 }
1127
1128 #ifdef COMPAT_43 /* XXX - COMPAT_FBSD3 */
1129 #ifndef _SYS_SYSPROTO_H_
1130 struct osigprocmask_args {
1131 int how;
1132 osigset_t mask;
1133 };
1134 #endif
1135 int
1136 osigprocmask(struct thread *td, struct osigprocmask_args *uap)
1137 {
1138 sigset_t set, oset;
1139 int error;
1140
1141 OSIG2SIG(uap->mask, set);
1142 error = kern_sigprocmask(td, uap->how, &set, &oset, 1);
1143 SIG2OSIG(oset, td->td_retval[0]);
1144 return (error);
1145 }
1146 #endif /* COMPAT_43 */
1147
1148 int
1149 sys_sigwait(struct thread *td, struct sigwait_args *uap)
1150 {
1151 ksiginfo_t ksi;
1152 sigset_t set;
1153 int error;
1154
1155 error = copyin(uap->set, &set, sizeof(set));
1156 if (error) {
1157 td->td_retval[0] = error;
1158 return (0);
1159 }
1160
1161 error = kern_sigtimedwait(td, set, &ksi, NULL);
1162 if (error) {
1163 if (error == EINTR && td->td_proc->p_osrel < P_OSREL_SIGWAIT)
1164 error = ERESTART;
1165 if (error == ERESTART)
1166 return (error);
1167 td->td_retval[0] = error;
1168 return (0);
1169 }
1170
1171 error = copyout(&ksi.ksi_signo, uap->sig, sizeof(ksi.ksi_signo));
1172 td->td_retval[0] = error;
1173 return (0);
1174 }
1175
1176 int
1177 sys_sigtimedwait(struct thread *td, struct sigtimedwait_args *uap)
1178 {
1179 struct timespec ts;
1180 struct timespec *timeout;
1181 sigset_t set;
1182 ksiginfo_t ksi;
1183 int error;
1184
1185 if (uap->timeout) {
1186 error = copyin(uap->timeout, &ts, sizeof(ts));
1187 if (error)
1188 return (error);
1189
1190 timeout = &ts;
1191 } else
1192 timeout = NULL;
1193
1194 error = copyin(uap->set, &set, sizeof(set));
1195 if (error)
1196 return (error);
1197
1198 error = kern_sigtimedwait(td, set, &ksi, timeout);
1199 if (error)
1200 return (error);
1201
1202 if (uap->info)
1203 error = copyout(&ksi.ksi_info, uap->info, sizeof(siginfo_t));
1204
1205 if (error == 0)
1206 td->td_retval[0] = ksi.ksi_signo;
1207 return (error);
1208 }
1209
1210 int
1211 sys_sigwaitinfo(struct thread *td, struct sigwaitinfo_args *uap)
1212 {
1213 ksiginfo_t ksi;
1214 sigset_t set;
1215 int error;
1216
1217 error = copyin(uap->set, &set, sizeof(set));
1218 if (error)
1219 return (error);
1220
1221 error = kern_sigtimedwait(td, set, &ksi, NULL);
1222 if (error)
1223 return (error);
1224
1225 if (uap->info)
1226 error = copyout(&ksi.ksi_info, uap->info, sizeof(siginfo_t));
1227
1228 if (error == 0)
1229 td->td_retval[0] = ksi.ksi_signo;
1230 return (error);
1231 }
1232
1233 static void
1234 proc_td_siginfo_capture(struct thread *td, siginfo_t *si)
1235 {
1236 struct thread *thr;
1237
1238 FOREACH_THREAD_IN_PROC(td->td_proc, thr) {
1239 if (thr == td)
1240 thr->td_si = *si;
1241 else
1242 thr->td_si.si_signo = 0;
1243 }
1244 }
1245
1246 int
1247 kern_sigtimedwait(struct thread *td, sigset_t waitset, ksiginfo_t *ksi,
1248 struct timespec *timeout)
1249 {
1250 struct sigacts *ps;
1251 sigset_t saved_mask, new_block;
1252 struct proc *p;
1253 int error, sig, timo, timevalid = 0;
1254 struct timespec rts, ets, ts;
1255 struct timeval tv;
1256 bool traced;
1257
1258 p = td->td_proc;
1259 error = 0;
1260 ets.tv_sec = 0;
1261 ets.tv_nsec = 0;
1262 traced = false;
1263
1264 if (timeout != NULL) {
1265 if (timeout->tv_nsec >= 0 && timeout->tv_nsec < 1000000000) {
1266 timevalid = 1;
1267 getnanouptime(&rts);
1268 timespecadd(&rts, timeout, &ets);
1269 }
1270 }
1271 ksiginfo_init(ksi);
1272 /* Some signals can not be waited for. */
1273 SIG_CANTMASK(waitset);
1274 ps = p->p_sigacts;
1275 PROC_LOCK(p);
1276 saved_mask = td->td_sigmask;
1277 SIGSETNAND(td->td_sigmask, waitset);
1278 for (;;) {
1279 mtx_lock(&ps->ps_mtx);
1280 sig = cursig(td);
1281 mtx_unlock(&ps->ps_mtx);
1282 KASSERT(sig >= 0, ("sig %d", sig));
1283 if (sig != 0 && SIGISMEMBER(waitset, sig)) {
1284 if (sigqueue_get(&td->td_sigqueue, sig, ksi) != 0 ||
1285 sigqueue_get(&p->p_sigqueue, sig, ksi) != 0) {
1286 error = 0;
1287 break;
1288 }
1289 }
1290
1291 if (error != 0)
1292 break;
1293
1294 /*
1295 * POSIX says this must be checked after looking for pending
1296 * signals.
1297 */
1298 if (timeout != NULL) {
1299 if (!timevalid) {
1300 error = EINVAL;
1301 break;
1302 }
1303 getnanouptime(&rts);
1304 if (timespeccmp(&rts, &ets, >=)) {
1305 error = EAGAIN;
1306 break;
1307 }
1308 timespecsub(&ets, &rts, &ts);
1309 TIMESPEC_TO_TIMEVAL(&tv, &ts);
1310 timo = tvtohz(&tv);
1311 } else {
1312 timo = 0;
1313 }
1314
1315 if (traced) {
1316 error = EINTR;
1317 break;
1318 }
1319
1320 error = msleep(&p->p_sigacts, &p->p_mtx, PPAUSE | PCATCH,
1321 "sigwait", timo);
1322
1323 if (timeout != NULL) {
1324 if (error == ERESTART) {
1325 /* Timeout can not be restarted. */
1326 error = EINTR;
1327 } else if (error == EAGAIN) {
1328 /* We will calculate timeout by ourself. */
1329 error = 0;
1330 }
1331 }
1332
1333 /*
1334 * If PTRACE_SCE or PTRACE_SCX were set after
1335 * userspace entered the syscall, return spurious
1336 * EINTR after wait was done. Only do this as last
1337 * resort after rechecking for possible queued signals
1338 * and expired timeouts.
1339 */
1340 if (error == 0 && (p->p_ptevents & PTRACE_SYSCALL) != 0)
1341 traced = true;
1342 }
1343
1344 new_block = saved_mask;
1345 SIGSETNAND(new_block, td->td_sigmask);
1346 td->td_sigmask = saved_mask;
1347 /*
1348 * Fewer signals can be delivered to us, reschedule signal
1349 * notification.
1350 */
1351 if (p->p_numthreads != 1)
1352 reschedule_signals(p, new_block, 0);
1353
1354 if (error == 0) {
1355 SDT_PROBE2(proc, , , signal__clear, sig, ksi);
1356
1357 if (ksi->ksi_code == SI_TIMER)
1358 itimer_accept(p, ksi->ksi_timerid, ksi);
1359
1360 #ifdef KTRACE
1361 if (KTRPOINT(td, KTR_PSIG)) {
1362 sig_t action;
1363
1364 mtx_lock(&ps->ps_mtx);
1365 action = ps->ps_sigact[_SIG_IDX(sig)];
1366 mtx_unlock(&ps->ps_mtx);
1367 ktrpsig(sig, action, &td->td_sigmask, ksi->ksi_code);
1368 }
1369 #endif
1370 if (sig == SIGKILL) {
1371 proc_td_siginfo_capture(td, &ksi->ksi_info);
1372 sigexit(td, sig);
1373 }
1374 }
1375 PROC_UNLOCK(p);
1376 return (error);
1377 }
1378
1379 #ifndef _SYS_SYSPROTO_H_
1380 struct sigpending_args {
1381 sigset_t *set;
1382 };
1383 #endif
1384 int
1385 sys_sigpending(struct thread *td, struct sigpending_args *uap)
1386 {
1387 struct proc *p = td->td_proc;
1388 sigset_t pending;
1389
1390 PROC_LOCK(p);
1391 pending = p->p_sigqueue.sq_signals;
1392 SIGSETOR(pending, td->td_sigqueue.sq_signals);
1393 PROC_UNLOCK(p);
1394 return (copyout(&pending, uap->set, sizeof(sigset_t)));
1395 }
1396
1397 #ifdef COMPAT_43 /* XXX - COMPAT_FBSD3 */
1398 #ifndef _SYS_SYSPROTO_H_
1399 struct osigpending_args {
1400 int dummy;
1401 };
1402 #endif
1403 int
1404 osigpending(struct thread *td, struct osigpending_args *uap)
1405 {
1406 struct proc *p = td->td_proc;
1407 sigset_t pending;
1408
1409 PROC_LOCK(p);
1410 pending = p->p_sigqueue.sq_signals;
1411 SIGSETOR(pending, td->td_sigqueue.sq_signals);
1412 PROC_UNLOCK(p);
1413 SIG2OSIG(pending, td->td_retval[0]);
1414 return (0);
1415 }
1416 #endif /* COMPAT_43 */
1417
1418 #if defined(COMPAT_43)
1419 /*
1420 * Generalized interface signal handler, 4.3-compatible.
1421 */
1422 #ifndef _SYS_SYSPROTO_H_
1423 struct osigvec_args {
1424 int signum;
1425 struct sigvec *nsv;
1426 struct sigvec *osv;
1427 };
1428 #endif
1429 /* ARGSUSED */
1430 int
1431 osigvec(struct thread *td, struct osigvec_args *uap)
1432 {
1433 struct sigvec vec;
1434 struct sigaction nsa, osa;
1435 struct sigaction *nsap, *osap;
1436 int error;
1437
1438 if (uap->signum <= 0 || uap->signum >= ONSIG)
1439 return (EINVAL);
1440 nsap = (uap->nsv != NULL) ? &nsa : NULL;
1441 osap = (uap->osv != NULL) ? &osa : NULL;
1442 if (nsap) {
1443 error = copyin(uap->nsv, &vec, sizeof(vec));
1444 if (error)
1445 return (error);
1446 nsap->sa_handler = vec.sv_handler;
1447 OSIG2SIG(vec.sv_mask, nsap->sa_mask);
1448 nsap->sa_flags = vec.sv_flags;
1449 nsap->sa_flags ^= SA_RESTART; /* opposite of SV_INTERRUPT */
1450 }
1451 error = kern_sigaction(td, uap->signum, nsap, osap, KSA_OSIGSET);
1452 if (osap && !error) {
1453 vec.sv_handler = osap->sa_handler;
1454 SIG2OSIG(osap->sa_mask, vec.sv_mask);
1455 vec.sv_flags = osap->sa_flags;
1456 vec.sv_flags &= ~SA_NOCLDWAIT;
1457 vec.sv_flags ^= SA_RESTART;
1458 error = copyout(&vec, uap->osv, sizeof(vec));
1459 }
1460 return (error);
1461 }
1462
1463 #ifndef _SYS_SYSPROTO_H_
1464 struct osigblock_args {
1465 int mask;
1466 };
1467 #endif
1468 int
1469 osigblock(struct thread *td, struct osigblock_args *uap)
1470 {
1471 sigset_t set, oset;
1472
1473 OSIG2SIG(uap->mask, set);
1474 kern_sigprocmask(td, SIG_BLOCK, &set, &oset, 0);
1475 SIG2OSIG(oset, td->td_retval[0]);
1476 return (0);
1477 }
1478
1479 #ifndef _SYS_SYSPROTO_H_
1480 struct osigsetmask_args {
1481 int mask;
1482 };
1483 #endif
1484 int
1485 osigsetmask(struct thread *td, struct osigsetmask_args *uap)
1486 {
1487 sigset_t set, oset;
1488
1489 OSIG2SIG(uap->mask, set);
1490 kern_sigprocmask(td, SIG_SETMASK, &set, &oset, 0);
1491 SIG2OSIG(oset, td->td_retval[0]);
1492 return (0);
1493 }
1494 #endif /* COMPAT_43 */
1495
1496 /*
1497 * Suspend calling thread until signal, providing mask to be set in the
1498 * meantime.
1499 */
1500 #ifndef _SYS_SYSPROTO_H_
1501 struct sigsuspend_args {
1502 const sigset_t *sigmask;
1503 };
1504 #endif
1505 /* ARGSUSED */
1506 int
1507 sys_sigsuspend(struct thread *td, struct sigsuspend_args *uap)
1508 {
1509 sigset_t mask;
1510 int error;
1511
1512 error = copyin(uap->sigmask, &mask, sizeof(mask));
1513 if (error)
1514 return (error);
1515 return (kern_sigsuspend(td, mask));
1516 }
1517
1518 int
1519 kern_sigsuspend(struct thread *td, sigset_t mask)
1520 {
1521 struct proc *p = td->td_proc;
1522 int has_sig, sig;
1523
1524 /*
1525 * When returning from sigsuspend, we want
1526 * the old mask to be restored after the
1527 * signal handler has finished. Thus, we
1528 * save it here and mark the sigacts structure
1529 * to indicate this.
1530 */
1531 PROC_LOCK(p);
1532 kern_sigprocmask(td, SIG_SETMASK, &mask, &td->td_oldsigmask,
1533 SIGPROCMASK_PROC_LOCKED);
1534 td->td_pflags |= TDP_OLDMASK;
1535
1536 /*
1537 * Process signals now. Otherwise, we can get spurious wakeup
1538 * due to signal entered process queue, but delivered to other
1539 * thread. But sigsuspend should return only on signal
1540 * delivery.
1541 */
1542 (p->p_sysent->sv_set_syscall_retval)(td, EINTR);
1543 for (has_sig = 0; !has_sig;) {
1544 while (msleep(&p->p_sigacts, &p->p_mtx, PPAUSE|PCATCH, "pause",
1545 0) == 0)
1546 /* void */;
1547 thread_suspend_check(0);
1548 mtx_lock(&p->p_sigacts->ps_mtx);
1549 while ((sig = cursig(td)) != 0) {
1550 KASSERT(sig >= 0, ("sig %d", sig));
1551 has_sig += postsig(sig);
1552 }
1553 mtx_unlock(&p->p_sigacts->ps_mtx);
1554
1555 /*
1556 * If PTRACE_SCE or PTRACE_SCX were set after
1557 * userspace entered the syscall, return spurious
1558 * EINTR.
1559 */
1560 if ((p->p_ptevents & PTRACE_SYSCALL) != 0)
1561 has_sig += 1;
1562 }
1563 PROC_UNLOCK(p);
1564 td->td_errno = EINTR;
1565 td->td_pflags |= TDP_NERRNO;
1566 return (EJUSTRETURN);
1567 }
1568
1569 #ifdef COMPAT_43 /* XXX - COMPAT_FBSD3 */
1570 /*
1571 * Compatibility sigsuspend call for old binaries. Note nonstandard calling
1572 * convention: libc stub passes mask, not pointer, to save a copyin.
1573 */
1574 #ifndef _SYS_SYSPROTO_H_
1575 struct osigsuspend_args {
1576 osigset_t mask;
1577 };
1578 #endif
1579 /* ARGSUSED */
1580 int
1581 osigsuspend(struct thread *td, struct osigsuspend_args *uap)
1582 {
1583 sigset_t mask;
1584
1585 OSIG2SIG(uap->mask, mask);
1586 return (kern_sigsuspend(td, mask));
1587 }
1588 #endif /* COMPAT_43 */
1589
1590 #if defined(COMPAT_43)
1591 #ifndef _SYS_SYSPROTO_H_
1592 struct osigstack_args {
1593 struct sigstack *nss;
1594 struct sigstack *oss;
1595 };
1596 #endif
1597 /* ARGSUSED */
1598 int
1599 osigstack(struct thread *td, struct osigstack_args *uap)
1600 {
1601 struct sigstack nss, oss;
1602 int error = 0;
1603
1604 if (uap->nss != NULL) {
1605 error = copyin(uap->nss, &nss, sizeof(nss));
1606 if (error)
1607 return (error);
1608 }
1609 oss.ss_sp = td->td_sigstk.ss_sp;
1610 oss.ss_onstack = sigonstack(cpu_getstack(td));
1611 if (uap->nss != NULL) {
1612 td->td_sigstk.ss_sp = nss.ss_sp;
1613 td->td_sigstk.ss_size = 0;
1614 td->td_sigstk.ss_flags |= nss.ss_onstack & SS_ONSTACK;
1615 td->td_pflags |= TDP_ALTSTACK;
1616 }
1617 if (uap->oss != NULL)
1618 error = copyout(&oss, uap->oss, sizeof(oss));
1619
1620 return (error);
1621 }
1622 #endif /* COMPAT_43 */
1623
1624 #ifndef _SYS_SYSPROTO_H_
1625 struct sigaltstack_args {
1626 stack_t *ss;
1627 stack_t *oss;
1628 };
1629 #endif
1630 /* ARGSUSED */
1631 int
1632 sys_sigaltstack(struct thread *td, struct sigaltstack_args *uap)
1633 {
1634 stack_t ss, oss;
1635 int error;
1636
1637 if (uap->ss != NULL) {
1638 error = copyin(uap->ss, &ss, sizeof(ss));
1639 if (error)
1640 return (error);
1641 }
1642 error = kern_sigaltstack(td, (uap->ss != NULL) ? &ss : NULL,
1643 (uap->oss != NULL) ? &oss : NULL);
1644 if (error)
1645 return (error);
1646 if (uap->oss != NULL)
1647 error = copyout(&oss, uap->oss, sizeof(stack_t));
1648 return (error);
1649 }
1650
1651 int
1652 kern_sigaltstack(struct thread *td, stack_t *ss, stack_t *oss)
1653 {
1654 struct proc *p = td->td_proc;
1655 int oonstack;
1656
1657 oonstack = sigonstack(cpu_getstack(td));
1658
1659 if (oss != NULL) {
1660 *oss = td->td_sigstk;
1661 oss->ss_flags = (td->td_pflags & TDP_ALTSTACK)
1662 ? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE;
1663 }
1664
1665 if (ss != NULL) {
1666 if (oonstack)
1667 return (EPERM);
1668 if ((ss->ss_flags & ~SS_DISABLE) != 0)
1669 return (EINVAL);
1670 if (!(ss->ss_flags & SS_DISABLE)) {
1671 if (ss->ss_size < p->p_sysent->sv_minsigstksz)
1672 return (ENOMEM);
1673
1674 td->td_sigstk = *ss;
1675 td->td_pflags |= TDP_ALTSTACK;
1676 } else {
1677 td->td_pflags &= ~TDP_ALTSTACK;
1678 }
1679 }
1680 return (0);
1681 }
1682
1683 struct killpg1_ctx {
1684 struct thread *td;
1685 ksiginfo_t *ksi;
1686 int sig;
1687 bool sent;
1688 bool found;
1689 int ret;
1690 };
1691
1692 static void
1693 killpg1_sendsig(struct proc *p, bool notself, struct killpg1_ctx *arg)
1694 {
1695 int err;
1696
1697 if (p->p_pid <= 1 || (p->p_flag & P_SYSTEM) != 0 ||
1698 (notself && p == arg->td->td_proc) || p->p_state == PRS_NEW)
1699 return;
1700 PROC_LOCK(p);
1701 err = p_cansignal(arg->td, p, arg->sig);
1702 if (err == 0 && arg->sig != 0)
1703 pksignal(p, arg->sig, arg->ksi);
1704 PROC_UNLOCK(p);
1705 if (err != ESRCH)
1706 arg->found = true;
1707 if (err == 0)
1708 arg->sent = true;
1709 else if (arg->ret == 0 && err != ESRCH && err != EPERM)
1710 arg->ret = err;
1711 }
1712
1713 /*
1714 * Common code for kill process group/broadcast kill.
1715 * cp is calling process.
1716 */
1717 static int
1718 killpg1(struct thread *td, int sig, int pgid, int all, ksiginfo_t *ksi)
1719 {
1720 struct proc *p;
1721 struct pgrp *pgrp;
1722 struct killpg1_ctx arg;
1723
1724 arg.td = td;
1725 arg.ksi = ksi;
1726 arg.sig = sig;
1727 arg.sent = false;
1728 arg.found = false;
1729 arg.ret = 0;
1730 if (all) {
1731 /*
1732 * broadcast
1733 */
1734 sx_slock(&allproc_lock);
1735 FOREACH_PROC_IN_SYSTEM(p) {
1736 killpg1_sendsig(p, true, &arg);
1737 }
1738 sx_sunlock(&allproc_lock);
1739 } else {
1740 sx_slock(&proctree_lock);
1741 if (pgid == 0) {
1742 /*
1743 * zero pgid means send to my process group.
1744 */
1745 pgrp = td->td_proc->p_pgrp;
1746 PGRP_LOCK(pgrp);
1747 } else {
1748 pgrp = pgfind(pgid);
1749 if (pgrp == NULL) {
1750 sx_sunlock(&proctree_lock);
1751 return (ESRCH);
1752 }
1753 }
1754 sx_sunlock(&proctree_lock);
1755 LIST_FOREACH(p, &pgrp->pg_members, p_pglist) {
1756 killpg1_sendsig(p, false, &arg);
1757 }
1758 PGRP_UNLOCK(pgrp);
1759 }
1760 MPASS(arg.ret != 0 || arg.found || !arg.sent);
1761 if (arg.ret == 0 && !arg.sent)
1762 arg.ret = arg.found ? EPERM : ESRCH;
1763 return (arg.ret);
1764 }
1765
1766 #ifndef _SYS_SYSPROTO_H_
1767 struct kill_args {
1768 int pid;
1769 int signum;
1770 };
1771 #endif
1772 /* ARGSUSED */
1773 int
1774 sys_kill(struct thread *td, struct kill_args *uap)
1775 {
1776 ksiginfo_t ksi;
1777 struct proc *p;
1778 int error;
1779
1780 /*
1781 * A process in capability mode can send signals only to himself.
1782 * The main rationale behind this is that abort(3) is implemented as
1783 * kill(getpid(), SIGABRT).
1784 */
1785 if (IN_CAPABILITY_MODE(td) && uap->pid != td->td_proc->p_pid)
1786 return (ECAPMODE);
1787
1788 AUDIT_ARG_SIGNUM(uap->signum);
1789 AUDIT_ARG_PID(uap->pid);
1790 if ((u_int)uap->signum > _SIG_MAXSIG)
1791 return (EINVAL);
1792
1793 ksiginfo_init(&ksi);
1794 ksi.ksi_signo = uap->signum;
1795 ksi.ksi_code = SI_USER;
1796 ksi.ksi_pid = td->td_proc->p_pid;
1797 ksi.ksi_uid = td->td_ucred->cr_ruid;
1798
1799 if (uap->pid > 0) {
1800 /* kill single process */
1801 if ((p = pfind_any(uap->pid)) == NULL)
1802 return (ESRCH);
1803 AUDIT_ARG_PROCESS(p);
1804 error = p_cansignal(td, p, uap->signum);
1805 if (error == 0 && uap->signum)
1806 pksignal(p, uap->signum, &ksi);
1807 PROC_UNLOCK(p);
1808 return (error);
1809 }
1810 switch (uap->pid) {
1811 case -1: /* broadcast signal */
1812 return (killpg1(td, uap->signum, 0, 1, &ksi));
1813 case 0: /* signal own process group */
1814 return (killpg1(td, uap->signum, 0, 0, &ksi));
1815 default: /* negative explicit process group */
1816 return (killpg1(td, uap->signum, -uap->pid, 0, &ksi));
1817 }
1818 /* NOTREACHED */
1819 }
1820
1821 int
1822 sys_pdkill(struct thread *td, struct pdkill_args *uap)
1823 {
1824 struct proc *p;
1825 int error;
1826
1827 AUDIT_ARG_SIGNUM(uap->signum);
1828 AUDIT_ARG_FD(uap->fd);
1829 if ((u_int)uap->signum > _SIG_MAXSIG)
1830 return (EINVAL);
1831
1832 error = procdesc_find(td, uap->fd, &cap_pdkill_rights, &p);
1833 if (error)
1834 return (error);
1835 AUDIT_ARG_PROCESS(p);
1836 error = p_cansignal(td, p, uap->signum);
1837 if (error == 0 && uap->signum)
1838 kern_psignal(p, uap->signum);
1839 PROC_UNLOCK(p);
1840 return (error);
1841 }
1842
1843 #if defined(COMPAT_43)
1844 #ifndef _SYS_SYSPROTO_H_
1845 struct okillpg_args {
1846 int pgid;
1847 int signum;
1848 };
1849 #endif
1850 /* ARGSUSED */
1851 int
1852 okillpg(struct thread *td, struct okillpg_args *uap)
1853 {
1854 ksiginfo_t ksi;
1855
1856 AUDIT_ARG_SIGNUM(uap->signum);
1857 AUDIT_ARG_PID(uap->pgid);
1858 if ((u_int)uap->signum > _SIG_MAXSIG)
1859 return (EINVAL);
1860
1861 ksiginfo_init(&ksi);
1862 ksi.ksi_signo = uap->signum;
1863 ksi.ksi_code = SI_USER;
1864 ksi.ksi_pid = td->td_proc->p_pid;
1865 ksi.ksi_uid = td->td_ucred->cr_ruid;
1866 return (killpg1(td, uap->signum, uap->pgid, 0, &ksi));
1867 }
1868 #endif /* COMPAT_43 */
1869
1870 #ifndef _SYS_SYSPROTO_H_
1871 struct sigqueue_args {
1872 pid_t pid;
1873 int signum;
1874 /* union sigval */ void *value;
1875 };
1876 #endif
1877 int
1878 sys_sigqueue(struct thread *td, struct sigqueue_args *uap)
1879 {
1880 union sigval sv;
1881
1882 sv.sival_ptr = uap->value;
1883
1884 return (kern_sigqueue(td, uap->pid, uap->signum, &sv));
1885 }
1886
1887 int
1888 kern_sigqueue(struct thread *td, pid_t pid, int signum, union sigval *value)
1889 {
1890 ksiginfo_t ksi;
1891 struct proc *p;
1892 int error;
1893
1894 if ((u_int)signum > _SIG_MAXSIG)
1895 return (EINVAL);
1896
1897 /*
1898 * Specification says sigqueue can only send signal to
1899 * single process.
1900 */
1901 if (pid <= 0)
1902 return (EINVAL);
1903
1904 if ((p = pfind_any(pid)) == NULL)
1905 return (ESRCH);
1906 error = p_cansignal(td, p, signum);
1907 if (error == 0 && signum != 0) {
1908 ksiginfo_init(&ksi);
1909 ksi.ksi_flags = KSI_SIGQ;
1910 ksi.ksi_signo = signum;
1911 ksi.ksi_code = SI_QUEUE;
1912 ksi.ksi_pid = td->td_proc->p_pid;
1913 ksi.ksi_uid = td->td_ucred->cr_ruid;
1914 ksi.ksi_value = *value;
1915 error = pksignal(p, ksi.ksi_signo, &ksi);
1916 }
1917 PROC_UNLOCK(p);
1918 return (error);
1919 }
1920
1921 /*
1922 * Send a signal to a process group.
1923 */
1924 void
1925 gsignal(int pgid, int sig, ksiginfo_t *ksi)
1926 {
1927 struct pgrp *pgrp;
1928
1929 if (pgid != 0) {
1930 sx_slock(&proctree_lock);
1931 pgrp = pgfind(pgid);
1932 sx_sunlock(&proctree_lock);
1933 if (pgrp != NULL) {
1934 pgsignal(pgrp, sig, 0, ksi);
1935 PGRP_UNLOCK(pgrp);
1936 }
1937 }
1938 }
1939
1940 /*
1941 * Send a signal to a process group. If checktty is 1,
1942 * limit to members which have a controlling terminal.
1943 */
1944 void
1945 pgsignal(struct pgrp *pgrp, int sig, int checkctty, ksiginfo_t *ksi)
1946 {
1947 struct proc *p;
1948
1949 if (pgrp) {
1950 PGRP_LOCK_ASSERT(pgrp, MA_OWNED);
1951 LIST_FOREACH(p, &pgrp->pg_members, p_pglist) {
1952 PROC_LOCK(p);
1953 if (p->p_state == PRS_NORMAL &&
1954 (checkctty == 0 || p->p_flag & P_CONTROLT))
1955 pksignal(p, sig, ksi);
1956 PROC_UNLOCK(p);
1957 }
1958 }
1959 }
1960
1961
1962 /*
1963 * Recalculate the signal mask and reset the signal disposition after
1964 * usermode frame for delivery is formed. Should be called after
1965 * mach-specific routine, because sysent->sv_sendsig() needs correct
1966 * ps_siginfo and signal mask.
1967 */
1968 static void
1969 postsig_done(int sig, struct thread *td, struct sigacts *ps)
1970 {
1971 sigset_t mask;
1972
1973 mtx_assert(&ps->ps_mtx, MA_OWNED);
1974 td->td_ru.ru_nsignals++;
1975 mask = ps->ps_catchmask[_SIG_IDX(sig)];
1976 if (!SIGISMEMBER(ps->ps_signodefer, sig))
1977 SIGADDSET(mask, sig);
1978 kern_sigprocmask(td, SIG_BLOCK, &mask, NULL,
1979 SIGPROCMASK_PROC_LOCKED | SIGPROCMASK_PS_LOCKED);
1980 if (SIGISMEMBER(ps->ps_sigreset, sig))
1981 sigdflt(ps, sig);
1982 }
1983
1984
1985 /*
1986 * Send a signal caused by a trap to the current thread. If it will be
1987 * caught immediately, deliver it with correct code. Otherwise, post it
1988 * normally.
1989 */
1990 void
1991 trapsignal(struct thread *td, ksiginfo_t *ksi)
1992 {
1993 struct sigacts *ps;
1994 struct proc *p;
1995 int sig;
1996 int code;
1997
1998 p = td->td_proc;
1999 sig = ksi->ksi_signo;
2000 code = ksi->ksi_code;
2001 KASSERT(_SIG_VALID(sig), ("invalid signal"));
2002
2003 PROC_LOCK(p);
2004 ps = p->p_sigacts;
2005 mtx_lock(&ps->ps_mtx);
2006 if ((p->p_flag & P_TRACED) == 0 && SIGISMEMBER(ps->ps_sigcatch, sig) &&
2007 !SIGISMEMBER(td->td_sigmask, sig)) {
2008 #ifdef KTRACE
2009 if (KTRPOINT(curthread, KTR_PSIG))
2010 ktrpsig(sig, ps->ps_sigact[_SIG_IDX(sig)],
2011 &td->td_sigmask, code);
2012 #endif
2013 (*p->p_sysent->sv_sendsig)(ps->ps_sigact[_SIG_IDX(sig)],
2014 ksi, &td->td_sigmask);
2015 postsig_done(sig, td, ps);
2016 mtx_unlock(&ps->ps_mtx);
2017 } else {
2018 /*
2019 * Avoid a possible infinite loop if the thread
2020 * masking the signal or process is ignoring the
2021 * signal.
2022 */
2023 if (kern_forcesigexit &&
2024 (SIGISMEMBER(td->td_sigmask, sig) ||
2025 ps->ps_sigact[_SIG_IDX(sig)] == SIG_IGN)) {
2026 SIGDELSET(td->td_sigmask, sig);
2027 SIGDELSET(ps->ps_sigcatch, sig);
2028 SIGDELSET(ps->ps_sigignore, sig);
2029 ps->ps_sigact[_SIG_IDX(sig)] = SIG_DFL;
2030 }
2031 mtx_unlock(&ps->ps_mtx);
2032 p->p_code = code; /* XXX for core dump/debugger */
2033 p->p_sig = sig; /* XXX to verify code */
2034 tdsendsignal(p, td, sig, ksi);
2035 }
2036 PROC_UNLOCK(p);
2037 }
2038
2039 static struct thread *
2040 sigtd(struct proc *p, int sig, int prop)
2041 {
2042 struct thread *td, *signal_td;
2043
2044 PROC_LOCK_ASSERT(p, MA_OWNED);
2045
2046 /*
2047 * Check if current thread can handle the signal without
2048 * switching context to another thread.
2049 */
2050 if (curproc == p && !SIGISMEMBER(curthread->td_sigmask, sig))
2051 return (curthread);
2052 signal_td = NULL;
2053 FOREACH_THREAD_IN_PROC(p, td) {
2054 if (!SIGISMEMBER(td->td_sigmask, sig)) {
2055 signal_td = td;
2056 break;
2057 }
2058 }
2059 if (signal_td == NULL)
2060 signal_td = FIRST_THREAD_IN_PROC(p);
2061 return (signal_td);
2062 }
2063
2064 /*
2065 * Send the signal to the process. If the signal has an action, the action
2066 * is usually performed by the target process rather than the caller; we add
2067 * the signal to the set of pending signals for the process.
2068 *
2069 * Exceptions:
2070 * o When a stop signal is sent to a sleeping process that takes the
2071 * default action, the process is stopped without awakening it.
2072 * o SIGCONT restarts stopped processes (or puts them back to sleep)
2073 * regardless of the signal action (eg, blocked or ignored).
2074 *
2075 * Other ignored signals are discarded immediately.
2076 *
2077 * NB: This function may be entered from the debugger via the "kill" DDB
2078 * command. There is little that can be done to mitigate the possibly messy
2079 * side effects of this unwise possibility.
2080 */
2081 void
2082 kern_psignal(struct proc *p, int sig)
2083 {
2084 ksiginfo_t ksi;
2085
2086 ksiginfo_init(&ksi);
2087 ksi.ksi_signo = sig;
2088 ksi.ksi_code = SI_KERNEL;
2089 (void) tdsendsignal(p, NULL, sig, &ksi);
2090 }
2091
2092 int
2093 pksignal(struct proc *p, int sig, ksiginfo_t *ksi)
2094 {
2095
2096 return (tdsendsignal(p, NULL, sig, ksi));
2097 }
2098
2099 /* Utility function for finding a thread to send signal event to. */
2100 int
2101 sigev_findtd(struct proc *p ,struct sigevent *sigev, struct thread **ttd)
2102 {
2103 struct thread *td;
2104
2105 if (sigev->sigev_notify == SIGEV_THREAD_ID) {
2106 td = tdfind(sigev->sigev_notify_thread_id, p->p_pid);
2107 if (td == NULL)
2108 return (ESRCH);
2109 *ttd = td;
2110 } else {
2111 *ttd = NULL;
2112 PROC_LOCK(p);
2113 }
2114 return (0);
2115 }
2116
2117 void
2118 tdsignal(struct thread *td, int sig)
2119 {
2120 ksiginfo_t ksi;
2121
2122 ksiginfo_init(&ksi);
2123 ksi.ksi_signo = sig;
2124 ksi.ksi_code = SI_KERNEL;
2125 (void) tdsendsignal(td->td_proc, td, sig, &ksi);
2126 }
2127
2128 void
2129 tdksignal(struct thread *td, int sig, ksiginfo_t *ksi)
2130 {
2131
2132 (void) tdsendsignal(td->td_proc, td, sig, ksi);
2133 }
2134
2135 int
2136 tdsendsignal(struct proc *p, struct thread *td, int sig, ksiginfo_t *ksi)
2137 {
2138 sig_t action;
2139 sigqueue_t *sigqueue;
2140 int prop;
2141 struct sigacts *ps;
2142 int intrval;
2143 int ret = 0;
2144 int wakeup_swapper;
2145
2146 MPASS(td == NULL || p == td->td_proc);
2147 PROC_LOCK_ASSERT(p, MA_OWNED);
2148
2149 if (!_SIG_VALID(sig))
2150 panic("%s(): invalid signal %d", __func__, sig);
2151
2152 KASSERT(ksi == NULL || !KSI_ONQ(ksi), ("%s: ksi on queue", __func__));
2153
2154 /*
2155 * IEEE Std 1003.1-2001: return success when killing a zombie.
2156 */
2157 if (p->p_state == PRS_ZOMBIE) {
2158 if (ksi && (ksi->ksi_flags & KSI_INS))
2159 ksiginfo_tryfree(ksi);
2160 return (ret);
2161 }
2162
2163 ps = p->p_sigacts;
2164 KNOTE_LOCKED(p->p_klist, NOTE_SIGNAL | sig);
2165 prop = sigprop(sig);
2166
2167 if (td == NULL) {
2168 td = sigtd(p, sig, prop);
2169 sigqueue = &p->p_sigqueue;
2170 } else
2171 sigqueue = &td->td_sigqueue;
2172
2173 SDT_PROBE3(proc, , , signal__send, td, p, sig);
2174
2175 /*
2176 * If the signal is being ignored,
2177 * then we forget about it immediately.
2178 * (Note: we don't set SIGCONT in ps_sigignore,
2179 * and if it is set to SIG_IGN,
2180 * action will be SIG_DFL here.)
2181 */
2182 mtx_lock(&ps->ps_mtx);
2183 if (SIGISMEMBER(ps->ps_sigignore, sig)) {
2184 SDT_PROBE3(proc, , , signal__discard, td, p, sig);
2185
2186 mtx_unlock(&ps->ps_mtx);
2187 if (ksi && (ksi->ksi_flags & KSI_INS))
2188 ksiginfo_tryfree(ksi);
2189 return (ret);
2190 }
2191 if (SIGISMEMBER(td->td_sigmask, sig))
2192 action = SIG_HOLD;
2193 else if (SIGISMEMBER(ps->ps_sigcatch, sig))
2194 action = SIG_CATCH;
2195 else
2196 action = SIG_DFL;
2197 if (SIGISMEMBER(ps->ps_sigintr, sig))
2198 intrval = EINTR;
2199 else
2200 intrval = ERESTART;
2201 mtx_unlock(&ps->ps_mtx);
2202
2203 if (prop & SIGPROP_CONT)
2204 sigqueue_delete_stopmask_proc(p);
2205 else if (prop & SIGPROP_STOP) {
2206 /*
2207 * If sending a tty stop signal to a member of an orphaned
2208 * process group, discard the signal here if the action
2209 * is default; don't stop the process below if sleeping,
2210 * and don't clear any pending SIGCONT.
2211 */
2212 if ((prop & SIGPROP_TTYSTOP) != 0 &&
2213 (p->p_pgrp->pg_flags & PGRP_ORPHANED) != 0 &&
2214 action == SIG_DFL) {
2215 if (ksi && (ksi->ksi_flags & KSI_INS))
2216 ksiginfo_tryfree(ksi);
2217 return (ret);
2218 }
2219 sigqueue_delete_proc(p, SIGCONT);
2220 if (p->p_flag & P_CONTINUED) {
2221 p->p_flag &= ~P_CONTINUED;
2222 PROC_LOCK(p->p_pptr);
2223 sigqueue_take(p->p_ksi);
2224 PROC_UNLOCK(p->p_pptr);
2225 }
2226 }
2227
2228 ret = sigqueue_add(sigqueue, sig, ksi);
2229 if (ret != 0)
2230 return (ret);
2231 signotify(td);
2232 /*
2233 * Defer further processing for signals which are held,
2234 * except that stopped processes must be continued by SIGCONT.
2235 */
2236 if (action == SIG_HOLD &&
2237 !((prop & SIGPROP_CONT) && (p->p_flag & P_STOPPED_SIG)))
2238 return (ret);
2239
2240 /* SIGKILL: Remove procfs STOPEVENTs. */
2241 if (sig == SIGKILL) {
2242 /* from procfs_ioctl.c: PIOCBIC */
2243 p->p_stops = 0;
2244 /* from procfs_ioctl.c: PIOCCONT */
2245 p->p_step = 0;
2246 wakeup(&p->p_step);
2247 }
2248 /*
2249 * Some signals have a process-wide effect and a per-thread
2250 * component. Most processing occurs when the process next
2251 * tries to cross the user boundary, however there are some
2252 * times when processing needs to be done immediately, such as
2253 * waking up threads so that they can cross the user boundary.
2254 * We try to do the per-process part here.
2255 */
2256 if (P_SHOULDSTOP(p)) {
2257 KASSERT(!(p->p_flag & P_WEXIT),
2258 ("signal to stopped but exiting process"));
2259 if (sig == SIGKILL) {
2260 /*
2261 * If traced process is already stopped,
2262 * then no further action is necessary.
2263 */
2264 if (p->p_flag & P_TRACED)
2265 goto out;
2266 /*
2267 * SIGKILL sets process running.
2268 * It will die elsewhere.
2269 * All threads must be restarted.
2270 */
2271 p->p_flag &= ~P_STOPPED_SIG;
2272 goto runfast;
2273 }
2274
2275 if (prop & SIGPROP_CONT) {
2276 /*
2277 * If traced process is already stopped,
2278 * then no further action is necessary.
2279 */
2280 if (p->p_flag & P_TRACED)
2281 goto out;
2282 /*
2283 * If SIGCONT is default (or ignored), we continue the
2284 * process but don't leave the signal in sigqueue as
2285 * it has no further action. If SIGCONT is held, we
2286 * continue the process and leave the signal in
2287 * sigqueue. If the process catches SIGCONT, let it
2288 * handle the signal itself. If it isn't waiting on
2289 * an event, it goes back to run state.
2290 * Otherwise, process goes back to sleep state.
2291 */
2292 p->p_flag &= ~P_STOPPED_SIG;
2293 PROC_SLOCK(p);
2294 if (p->p_numthreads == p->p_suspcount) {
2295 PROC_SUNLOCK(p);
2296 p->p_flag |= P_CONTINUED;
2297 p->p_xsig = SIGCONT;
2298 PROC_LOCK(p->p_pptr);
2299 childproc_continued(p);
2300 PROC_UNLOCK(p->p_pptr);
2301 PROC_SLOCK(p);
2302 }
2303 if (action == SIG_DFL) {
2304 thread_unsuspend(p);
2305 PROC_SUNLOCK(p);
2306 sigqueue_delete(sigqueue, sig);
2307 goto out;
2308 }
2309 if (action == SIG_CATCH) {
2310 /*
2311 * The process wants to catch it so it needs
2312 * to run at least one thread, but which one?
2313 */
2314 PROC_SUNLOCK(p);
2315 goto runfast;
2316 }
2317 /*
2318 * The signal is not ignored or caught.
2319 */
2320 thread_unsuspend(p);
2321 PROC_SUNLOCK(p);
2322 goto out;
2323 }
2324
2325 if (prop & SIGPROP_STOP) {
2326 /*
2327 * If traced process is already stopped,
2328 * then no further action is necessary.
2329 */
2330 if (p->p_flag & P_TRACED)
2331 goto out;
2332 /*
2333 * Already stopped, don't need to stop again
2334 * (If we did the shell could get confused).
2335 * Just make sure the signal STOP bit set.
2336 */
2337 p->p_flag |= P_STOPPED_SIG;
2338 sigqueue_delete(sigqueue, sig);
2339 goto out;
2340 }
2341
2342 /*
2343 * All other kinds of signals:
2344 * If a thread is sleeping interruptibly, simulate a
2345 * wakeup so that when it is continued it will be made
2346 * runnable and can look at the signal. However, don't make
2347 * the PROCESS runnable, leave it stopped.
2348 * It may run a bit until it hits a thread_suspend_check().
2349 */
2350 wakeup_swapper = 0;
2351 PROC_SLOCK(p);
2352 thread_lock(td);
2353 if (TD_ON_SLEEPQ(td) && (td->td_flags & TDF_SINTR))
2354 wakeup_swapper = sleepq_abort(td, intrval);
2355 thread_unlock(td);
2356 PROC_SUNLOCK(p);
2357 if (wakeup_swapper)
2358 kick_proc0();
2359 goto out;
2360 /*
2361 * Mutexes are short lived. Threads waiting on them will
2362 * hit thread_suspend_check() soon.
2363 */
2364 } else if (p->p_state == PRS_NORMAL) {
2365 if (p->p_flag & P_TRACED || action == SIG_CATCH) {
2366 tdsigwakeup(td, sig, action, intrval);
2367 goto out;
2368 }
2369
2370 MPASS(action == SIG_DFL);
2371
2372 if (prop & SIGPROP_STOP) {
2373 if (p->p_flag & (P_PPWAIT|P_WEXIT))
2374 goto out;
2375 p->p_flag |= P_STOPPED_SIG;
2376 p->p_xsig = sig;
2377 PROC_SLOCK(p);
2378 wakeup_swapper = sig_suspend_threads(td, p, 1);
2379 if (p->p_numthreads == p->p_suspcount) {
2380 /*
2381 * only thread sending signal to another
2382 * process can reach here, if thread is sending
2383 * signal to its process, because thread does
2384 * not suspend itself here, p_numthreads
2385 * should never be equal to p_suspcount.
2386 */
2387 thread_stopped(p);
2388 PROC_SUNLOCK(p);
2389 sigqueue_delete_proc(p, p->p_xsig);
2390 } else
2391 PROC_SUNLOCK(p);
2392 if (wakeup_swapper)
2393 kick_proc0();
2394 goto out;
2395 }
2396 } else {
2397 /* Not in "NORMAL" state. discard the signal. */
2398 sigqueue_delete(sigqueue, sig);
2399 goto out;
2400 }
2401
2402 /*
2403 * The process is not stopped so we need to apply the signal to all the
2404 * running threads.
2405 */
2406 runfast:
2407 tdsigwakeup(td, sig, action, intrval);
2408 PROC_SLOCK(p);
2409 thread_unsuspend(p);
2410 PROC_SUNLOCK(p);
2411 out:
2412 /* If we jump here, proc slock should not be owned. */
2413 PROC_SLOCK_ASSERT(p, MA_NOTOWNED);
2414 return (ret);
2415 }
2416
2417 /*
2418 * The force of a signal has been directed against a single
2419 * thread. We need to see what we can do about knocking it
2420 * out of any sleep it may be in etc.
2421 */
2422 static void
2423 tdsigwakeup(struct thread *td, int sig, sig_t action, int intrval)
2424 {
2425 struct proc *p = td->td_proc;
2426 int prop;
2427 int wakeup_swapper;
2428
2429 wakeup_swapper = 0;
2430 PROC_LOCK_ASSERT(p, MA_OWNED);
2431 prop = sigprop(sig);
2432
2433 PROC_SLOCK(p);
2434 thread_lock(td);
2435 /*
2436 * Bring the priority of a thread up if we want it to get
2437 * killed in this lifetime. Be careful to avoid bumping the
2438 * priority of the idle thread, since we still allow to signal
2439 * kernel processes.
2440 */
2441 if (action == SIG_DFL && (prop & SIGPROP_KILL) != 0 &&
2442 td->td_priority > PUSER && !TD_IS_IDLETHREAD(td))
2443 sched_prio(td, PUSER);
2444 if (TD_ON_SLEEPQ(td)) {
2445 /*
2446 * If thread is sleeping uninterruptibly
2447 * we can't interrupt the sleep... the signal will
2448 * be noticed when the process returns through
2449 * trap() or syscall().
2450 */
2451 if ((td->td_flags & TDF_SINTR) == 0)
2452 goto out;
2453 /*
2454 * If SIGCONT is default (or ignored) and process is
2455 * asleep, we are finished; the process should not
2456 * be awakened.
2457 */
2458 if ((prop & SIGPROP_CONT) && action == SIG_DFL) {
2459 thread_unlock(td);
2460 PROC_SUNLOCK(p);
2461 sigqueue_delete(&p->p_sigqueue, sig);
2462 /*
2463 * It may be on either list in this state.
2464 * Remove from both for now.
2465 */
2466 sigqueue_delete(&td->td_sigqueue, sig);
2467 return;
2468 }
2469
2470 /*
2471 * Don't awaken a sleeping thread for SIGSTOP if the
2472 * STOP signal is deferred.
2473 */
2474 if ((prop & SIGPROP_STOP) != 0 && (td->td_flags & (TDF_SBDRY |
2475 TDF_SERESTART | TDF_SEINTR)) == TDF_SBDRY)
2476 goto out;
2477
2478 /*
2479 * Give low priority threads a better chance to run.
2480 */
2481 if (td->td_priority > PUSER && !TD_IS_IDLETHREAD(td))
2482 sched_prio(td, PUSER);
2483
2484 wakeup_swapper = sleepq_abort(td, intrval);
2485 } else {
2486 /*
2487 * Other states do nothing with the signal immediately,
2488 * other than kicking ourselves if we are running.
2489 * It will either never be noticed, or noticed very soon.
2490 */
2491 #ifdef SMP
2492 if (TD_IS_RUNNING(td) && td != curthread)
2493 forward_signal(td);
2494 #endif
2495 }
2496 out:
2497 PROC_SUNLOCK(p);
2498 thread_unlock(td);
2499 if (wakeup_swapper)
2500 kick_proc0();
2501 }
2502
2503 static int
2504 sig_suspend_threads(struct thread *td, struct proc *p, int sending)
2505 {
2506 struct thread *td2;
2507 int wakeup_swapper;
2508
2509 PROC_LOCK_ASSERT(p, MA_OWNED);
2510 PROC_SLOCK_ASSERT(p, MA_OWNED);
2511 MPASS(sending || td == curthread);
2512
2513 wakeup_swapper = 0;
2514 FOREACH_THREAD_IN_PROC(p, td2) {
2515 thread_lock(td2);
2516 td2->td_flags |= TDF_ASTPENDING | TDF_NEEDSUSPCHK;
2517 if ((TD_IS_SLEEPING(td2) || TD_IS_SWAPPED(td2)) &&
2518 (td2->td_flags & TDF_SINTR)) {
2519 if (td2->td_flags & TDF_SBDRY) {
2520 /*
2521 * Once a thread is asleep with
2522 * TDF_SBDRY and without TDF_SERESTART
2523 * or TDF_SEINTR set, it should never
2524 * become suspended due to this check.
2525 */
2526 KASSERT(!TD_IS_SUSPENDED(td2),
2527 ("thread with deferred stops suspended"));
2528 if (TD_SBDRY_INTR(td2))
2529 wakeup_swapper |= sleepq_abort(td2,
2530 TD_SBDRY_ERRNO(td2));
2531 } else if (!TD_IS_SUSPENDED(td2)) {
2532 thread_suspend_one(td2);
2533 }
2534 } else if (!TD_IS_SUSPENDED(td2)) {
2535 if (sending || td != td2)
2536 td2->td_flags |= TDF_ASTPENDING;
2537 #ifdef SMP
2538 if (TD_IS_RUNNING(td2) && td2 != td)
2539 forward_signal(td2);
2540 #endif
2541 }
2542 thread_unlock(td2);
2543 }
2544 return (wakeup_swapper);
2545 }
2546
2547 /*
2548 * Stop the process for an event deemed interesting to the debugger. If si is
2549 * non-NULL, this is a signal exchange; the new signal requested by the
2550 * debugger will be returned for handling. If si is NULL, this is some other
2551 * type of interesting event. The debugger may request a signal be delivered in
2552 * that case as well, however it will be deferred until it can be handled.
2553 */
2554 int
2555 ptracestop(struct thread *td, int sig, ksiginfo_t *si)
2556 {
2557 struct proc *p = td->td_proc;
2558 struct thread *td2;
2559 ksiginfo_t ksi;
2560 int prop;
2561
2562 PROC_LOCK_ASSERT(p, MA_OWNED);
2563 KASSERT(!(p->p_flag & P_WEXIT), ("Stopping exiting process"));
2564 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK,
2565 &p->p_mtx.lock_object, "Stopping for traced signal");
2566
2567 td->td_xsig = sig;
2568
2569 if (si == NULL || (si->ksi_flags & KSI_PTRACE) == 0) {
2570 td->td_dbgflags |= TDB_XSIG;
2571 CTR4(KTR_PTRACE, "ptracestop: tid %d (pid %d) flags %#x sig %d",
2572 td->td_tid, p->p_pid, td->td_dbgflags, sig);
2573 PROC_SLOCK(p);
2574 while ((p->p_flag & P_TRACED) && (td->td_dbgflags & TDB_XSIG)) {
2575 if (P_KILLED(p)) {
2576 /*
2577 * Ensure that, if we've been PT_KILLed, the
2578 * exit status reflects that. Another thread
2579 * may also be in ptracestop(), having just
2580 * received the SIGKILL, but this thread was
2581 * unsuspended first.
2582 */
2583 td->td_dbgflags &= ~TDB_XSIG;
2584 td->td_xsig = SIGKILL;
2585 p->p_ptevents = 0;
2586 break;
2587 }
2588 if (p->p_flag & P_SINGLE_EXIT &&
2589 !(td->td_dbgflags & TDB_EXIT)) {
2590 /*
2591 * Ignore ptrace stops except for thread exit
2592 * events when the process exits.
2593 */
2594 td->td_dbgflags &= ~TDB_XSIG;
2595 PROC_SUNLOCK(p);
2596 return (0);
2597 }
2598
2599 /*
2600 * Make wait(2) work. Ensure that right after the
2601 * attach, the thread which was decided to become the
2602 * leader of attach gets reported to the waiter.
2603 * Otherwise, just avoid overwriting another thread's
2604 * assignment to p_xthread. If another thread has
2605 * already set p_xthread, the current thread will get
2606 * a chance to report itself upon the next iteration.
2607 */
2608 if ((td->td_dbgflags & TDB_FSTP) != 0 ||
2609 ((p->p_flag2 & P2_PTRACE_FSTP) == 0 &&
2610 p->p_xthread == NULL)) {
2611 p->p_xsig = sig;
2612 p->p_xthread = td;
2613
2614 /*
2615 * If we are on sleepqueue already,
2616 * let sleepqueue code decide if it
2617 * needs to go sleep after attach.
2618 */
2619 if (td->td_wchan == NULL)
2620 td->td_dbgflags &= ~TDB_FSTP;
2621
2622 p->p_flag2 &= ~P2_PTRACE_FSTP;
2623 p->p_flag |= P_STOPPED_SIG | P_STOPPED_TRACE;
2624 sig_suspend_threads(td, p, 0);
2625 }
2626 if ((td->td_dbgflags & TDB_STOPATFORK) != 0) {
2627 td->td_dbgflags &= ~TDB_STOPATFORK;
2628 }
2629 stopme:
2630 thread_suspend_switch(td, p);
2631 if (p->p_xthread == td)
2632 p->p_xthread = NULL;
2633 if (!(p->p_flag & P_TRACED))
2634 break;
2635 if (td->td_dbgflags & TDB_SUSPEND) {
2636 if (p->p_flag & P_SINGLE_EXIT)
2637 break;
2638 goto stopme;
2639 }
2640 }
2641 PROC_SUNLOCK(p);
2642 }
2643
2644 if (si != NULL && sig == td->td_xsig) {
2645 /* Parent wants us to take the original signal unchanged. */
2646 si->ksi_flags |= KSI_HEAD;
2647 if (sigqueue_add(&td->td_sigqueue, sig, si) != 0)
2648 si->ksi_signo = 0;
2649 } else if (td->td_xsig != 0) {
2650 /*
2651 * If parent wants us to take a new signal, then it will leave
2652 * it in td->td_xsig; otherwise we just look for signals again.
2653 */
2654 ksiginfo_init(&ksi);
2655 ksi.ksi_signo = td->td_xsig;
2656 ksi.ksi_flags |= KSI_PTRACE;
2657 prop = sigprop(td->td_xsig);
2658 td2 = sigtd(p, td->td_xsig, prop);
2659 tdsendsignal(p, td2, td->td_xsig, &ksi);
2660 if (td != td2)
2661 return (0);
2662 }
2663
2664 return (td->td_xsig);
2665 }
2666
2667 static void
2668 reschedule_signals(struct proc *p, sigset_t block, int flags)
2669 {
2670 struct sigacts *ps;
2671 struct thread *td;
2672 int sig;
2673
2674 PROC_LOCK_ASSERT(p, MA_OWNED);
2675 ps = p->p_sigacts;
2676 mtx_assert(&ps->ps_mtx, (flags & SIGPROCMASK_PS_LOCKED) != 0 ?
2677 MA_OWNED : MA_NOTOWNED);
2678 if (SIGISEMPTY(p->p_siglist))
2679 return;
2680 SIGSETAND(block, p->p_siglist);
2681 while ((sig = sig_ffs(&block)) != 0) {
2682 SIGDELSET(block, sig);
2683 td = sigtd(p, sig, 0);
2684 signotify(td);
2685 if (!(flags & SIGPROCMASK_PS_LOCKED))
2686 mtx_lock(&ps->ps_mtx);
2687 if (p->p_flag & P_TRACED ||
2688 (SIGISMEMBER(ps->ps_sigcatch, sig) &&
2689 !SIGISMEMBER(td->td_sigmask, sig)))
2690 tdsigwakeup(td, sig, SIG_CATCH,
2691 (SIGISMEMBER(ps->ps_sigintr, sig) ? EINTR :
2692 ERESTART));
2693 if (!(flags & SIGPROCMASK_PS_LOCKED))
2694 mtx_unlock(&ps->ps_mtx);
2695 }
2696 }
2697
2698 void
2699 tdsigcleanup(struct thread *td)
2700 {
2701 struct proc *p;
2702 sigset_t unblocked;
2703
2704 p = td->td_proc;
2705 PROC_LOCK_ASSERT(p, MA_OWNED);
2706
2707 sigqueue_flush(&td->td_sigqueue);
2708 if (p->p_numthreads == 1)
2709 return;
2710
2711 /*
2712 * Since we cannot handle signals, notify signal post code
2713 * about this by filling the sigmask.
2714 *
2715 * Also, if needed, wake up thread(s) that do not block the
2716 * same signals as the exiting thread, since the thread might
2717 * have been selected for delivery and woken up.
2718 */
2719 SIGFILLSET(unblocked);
2720 SIGSETNAND(unblocked, td->td_sigmask);
2721 SIGFILLSET(td->td_sigmask);
2722 reschedule_signals(p, unblocked, 0);
2723
2724 }
2725
2726 static int
2727 sigdeferstop_curr_flags(int cflags)
2728 {
2729
2730 MPASS((cflags & (TDF_SEINTR | TDF_SERESTART)) == 0 ||
2731 (cflags & TDF_SBDRY) != 0);
2732 return (cflags & (TDF_SBDRY | TDF_SEINTR | TDF_SERESTART));
2733 }
2734
2735 /*
2736 * Defer the delivery of SIGSTOP for the current thread, according to
2737 * the requested mode. Returns previous flags, which must be restored
2738 * by sigallowstop().
2739 *
2740 * TDF_SBDRY, TDF_SEINTR, and TDF_SERESTART flags are only set and
2741 * cleared by the current thread, which allow the lock-less read-only
2742 * accesses below.
2743 */
2744 int
2745 sigdeferstop_impl(int mode)
2746 {
2747 struct thread *td;
2748 int cflags, nflags;
2749
2750 td = curthread;
2751 cflags = sigdeferstop_curr_flags(td->td_flags);
2752 switch (mode) {
2753 case SIGDEFERSTOP_NOP:
2754 nflags = cflags;
2755 break;
2756 case SIGDEFERSTOP_OFF:
2757 nflags = 0;
2758 break;
2759 case SIGDEFERSTOP_SILENT:
2760 nflags = (cflags | TDF_SBDRY) & ~(TDF_SEINTR | TDF_SERESTART);
2761 break;
2762 case SIGDEFERSTOP_EINTR:
2763 nflags = (cflags | TDF_SBDRY | TDF_SEINTR) & ~TDF_SERESTART;
2764 break;
2765 case SIGDEFERSTOP_ERESTART:
2766 nflags = (cflags | TDF_SBDRY | TDF_SERESTART) & ~TDF_SEINTR;
2767 break;
2768 default:
2769 panic("sigdeferstop: invalid mode %x", mode);
2770 break;
2771 }
2772 if (cflags == nflags)
2773 return (SIGDEFERSTOP_VAL_NCHG);
2774 thread_lock(td);
2775 td->td_flags = (td->td_flags & ~cflags) | nflags;
2776 thread_unlock(td);
2777 return (cflags);
2778 }
2779
2780 /*
2781 * Restores the STOP handling mode, typically permitting the delivery
2782 * of SIGSTOP for the current thread. This does not immediately
2783 * suspend if a stop was posted. Instead, the thread will suspend
2784 * either via ast() or a subsequent interruptible sleep.
2785 */
2786 void
2787 sigallowstop_impl(int prev)
2788 {
2789 struct thread *td;
2790 int cflags;
2791
2792 KASSERT(prev != SIGDEFERSTOP_VAL_NCHG, ("failed sigallowstop"));
2793 KASSERT((prev & ~(TDF_SBDRY | TDF_SEINTR | TDF_SERESTART)) == 0,
2794 ("sigallowstop: incorrect previous mode %x", prev));
2795 td = curthread;
2796 cflags = sigdeferstop_curr_flags(td->td_flags);
2797 if (cflags != prev) {
2798 thread_lock(td);
2799 td->td_flags = (td->td_flags & ~cflags) | prev;
2800 thread_unlock(td);
2801 }
2802 }
2803
2804 /*
2805 * If the current process has received a signal (should be caught or cause
2806 * termination, should interrupt current syscall), return the signal number.
2807 * Stop signals with default action are processed immediately, then cleared;
2808 * they aren't returned. This is checked after each entry to the system for
2809 * a syscall or trap (though this can usually be done without calling issignal
2810 * by checking the pending signal masks in cursig.) The normal call
2811 * sequence is
2812 *
2813 * while (sig = cursig(curthread))
2814 * postsig(sig);
2815 */
2816 static int
2817 issignal(struct thread *td)
2818 {
2819 struct proc *p;
2820 struct sigacts *ps;
2821 struct sigqueue *queue;
2822 sigset_t sigpending;
2823 ksiginfo_t ksi;
2824 int prop, sig, traced;
2825
2826 p = td->td_proc;
2827 ps = p->p_sigacts;
2828 mtx_assert(&ps->ps_mtx, MA_OWNED);
2829 PROC_LOCK_ASSERT(p, MA_OWNED);
2830 for (;;) {
2831 traced = (p->p_flag & P_TRACED) || (p->p_stops & S_SIG);
2832
2833 sigpending = td->td_sigqueue.sq_signals;
2834 SIGSETOR(sigpending, p->p_sigqueue.sq_signals);
2835 SIGSETNAND(sigpending, td->td_sigmask);
2836
2837 if ((p->p_flag & P_PPWAIT) != 0 || (td->td_flags &
2838 (TDF_SBDRY | TDF_SERESTART | TDF_SEINTR)) == TDF_SBDRY)
2839 SIG_STOPSIGMASK(sigpending);
2840 if (SIGISEMPTY(sigpending)) /* no signal to send */
2841 return (0);
2842 if ((p->p_flag & (P_TRACED | P_PPTRACE)) == P_TRACED &&
2843 (p->p_flag2 & P2_PTRACE_FSTP) != 0 &&
2844 SIGISMEMBER(sigpending, SIGSTOP)) {
2845 /*
2846 * If debugger just attached, always consume
2847 * SIGSTOP from ptrace(PT_ATTACH) first, to
2848 * execute the debugger attach ritual in
2849 * order.
2850 */
2851 sig = SIGSTOP;
2852 td->td_dbgflags |= TDB_FSTP;
2853 } else {
2854 sig = sig_ffs(&sigpending);
2855 }
2856
2857 if (p->p_stops & S_SIG) {
2858 mtx_unlock(&ps->ps_mtx);
2859 stopevent(p, S_SIG, sig);
2860 mtx_lock(&ps->ps_mtx);
2861 }
2862
2863 /*
2864 * We should see pending but ignored signals
2865 * only if P_TRACED was on when they were posted.
2866 */
2867 if (SIGISMEMBER(ps->ps_sigignore, sig) && (traced == 0)) {
2868 sigqueue_delete(&td->td_sigqueue, sig);
2869 sigqueue_delete(&p->p_sigqueue, sig);
2870 continue;
2871 }
2872 if ((p->p_flag & (P_TRACED | P_PPTRACE)) == P_TRACED) {
2873 /*
2874 * If traced, always stop.
2875 * Remove old signal from queue before the stop.
2876 * XXX shrug off debugger, it causes siginfo to
2877 * be thrown away.
2878 */
2879 queue = &td->td_sigqueue;
2880 ksiginfo_init(&ksi);
2881 if (sigqueue_get(queue, sig, &ksi) == 0) {
2882 queue = &p->p_sigqueue;
2883 sigqueue_get(queue, sig, &ksi);
2884 }
2885 td->td_si = ksi.ksi_info;
2886
2887 mtx_unlock(&ps->ps_mtx);
2888 sig = ptracestop(td, sig, &ksi);
2889 mtx_lock(&ps->ps_mtx);
2890
2891 td->td_si.si_signo = 0;
2892
2893 /*
2894 * Keep looking if the debugger discarded or
2895 * replaced the signal.
2896 */
2897 if (sig == 0)
2898 continue;
2899
2900 /*
2901 * If the signal became masked, re-queue it.
2902 */
2903 if (SIGISMEMBER(td->td_sigmask, sig)) {
2904 ksi.ksi_flags |= KSI_HEAD;
2905 sigqueue_add(&p->p_sigqueue, sig, &ksi);
2906 continue;
2907 }
2908
2909 /*
2910 * If the traced bit got turned off, requeue
2911 * the signal and go back up to the top to
2912 * rescan signals. This ensures that p_sig*
2913 * and p_sigact are consistent.
2914 */
2915 if ((p->p_flag & P_TRACED) == 0) {
2916 ksi.ksi_flags |= KSI_HEAD;
2917 sigqueue_add(queue, sig, &ksi);
2918 continue;
2919 }
2920 }
2921
2922 prop = sigprop(sig);
2923
2924 /*
2925 * Decide whether the signal should be returned.
2926 * Return the signal's number, or fall through
2927 * to clear it from the pending mask.
2928 */
2929 switch ((intptr_t)p->p_sigacts->ps_sigact[_SIG_IDX(sig)]) {
2930
2931 case (intptr_t)SIG_DFL:
2932 /*
2933 * Don't take default actions on system processes.
2934 */
2935 if (p->p_pid <= 1) {
2936 #ifdef DIAGNOSTIC
2937 /*
2938 * Are you sure you want to ignore SIGSEGV
2939 * in init? XXX
2940 */
2941 printf("Process (pid %lu) got signal %d\n",
2942 (u_long)p->p_pid, sig);
2943 #endif
2944 break; /* == ignore */
2945 }
2946 /*
2947 * If there is a pending stop signal to process with
2948 * default action, stop here, then clear the signal.
2949 * Traced or exiting processes should ignore stops.
2950 * Additionally, a member of an orphaned process group
2951 * should ignore tty stops.
2952 */
2953 if (prop & SIGPROP_STOP) {
2954 mtx_unlock(&ps->ps_mtx);
2955 if ((p->p_flag & (P_TRACED | P_WEXIT |
2956 P_SINGLE_EXIT)) != 0 || ((p->p_pgrp->
2957 pg_flags & PGRP_ORPHANED) != 0 &&
2958 (prop & SIGPROP_TTYSTOP) != 0)) {
2959 mtx_lock(&ps->ps_mtx);
2960 break; /* == ignore */
2961 }
2962 if (TD_SBDRY_INTR(td)) {
2963 KASSERT((td->td_flags & TDF_SBDRY) != 0,
2964 ("lost TDF_SBDRY"));
2965 mtx_lock(&ps->ps_mtx);
2966 return (-1);
2967 }
2968 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK,
2969 &p->p_mtx.lock_object, "Catching SIGSTOP");
2970 sigqueue_delete(&td->td_sigqueue, sig);
2971 sigqueue_delete(&p->p_sigqueue, sig);
2972 p->p_flag |= P_STOPPED_SIG;
2973 p->p_xsig = sig;
2974 PROC_SLOCK(p);
2975 sig_suspend_threads(td, p, 0);
2976 thread_suspend_switch(td, p);
2977 PROC_SUNLOCK(p);
2978 mtx_lock(&ps->ps_mtx);
2979 goto next;
2980 } else if (prop & SIGPROP_IGNORE) {
2981 /*
2982 * Except for SIGCONT, shouldn't get here.
2983 * Default action is to ignore; drop it.
2984 */
2985 break; /* == ignore */
2986 } else
2987 return (sig);
2988 /*NOTREACHED*/
2989
2990 case (intptr_t)SIG_IGN:
2991 /*
2992 * Masking above should prevent us ever trying
2993 * to take action on an ignored signal other
2994 * than SIGCONT, unless process is traced.
2995 */
2996 if ((prop & SIGPROP_CONT) == 0 &&
2997 (p->p_flag & P_TRACED) == 0)
2998 printf("issignal\n");
2999 break; /* == ignore */
3000
3001 default:
3002 /*
3003 * This signal has an action, let
3004 * postsig() process it.
3005 */
3006 return (sig);
3007 }
3008 sigqueue_delete(&td->td_sigqueue, sig); /* take the signal! */
3009 sigqueue_delete(&p->p_sigqueue, sig);
3010 next:;
3011 }
3012 /* NOTREACHED */
3013 }
3014
3015 void
3016 thread_stopped(struct proc *p)
3017 {
3018 int n;
3019
3020 PROC_LOCK_ASSERT(p, MA_OWNED);
3021 PROC_SLOCK_ASSERT(p, MA_OWNED);
3022 n = p->p_suspcount;
3023 if (p == curproc)
3024 n++;
3025 if ((p->p_flag & P_STOPPED_SIG) && (n == p->p_numthreads)) {
3026 PROC_SUNLOCK(p);
3027 p->p_flag &= ~P_WAITED;
3028 PROC_LOCK(p->p_pptr);
3029 childproc_stopped(p, (p->p_flag & P_TRACED) ?
3030 CLD_TRAPPED : CLD_STOPPED);
3031 PROC_UNLOCK(p->p_pptr);
3032 PROC_SLOCK(p);
3033 }
3034 }
3035
3036 /*
3037 * Take the action for the specified signal
3038 * from the current set of pending signals.
3039 */
3040 int
3041 postsig(int sig)
3042 {
3043 struct thread *td;
3044 struct proc *p;
3045 struct sigacts *ps;
3046 sig_t action;
3047 ksiginfo_t ksi;
3048 sigset_t returnmask;
3049
3050 KASSERT(sig != 0, ("postsig"));
3051
3052 td = curthread;
3053 p = td->td_proc;
3054 PROC_LOCK_ASSERT(p, MA_OWNED);
3055 ps = p->p_sigacts;
3056 mtx_assert(&ps->ps_mtx, MA_OWNED);
3057 ksiginfo_init(&ksi);
3058 if (sigqueue_get(&td->td_sigqueue, sig, &ksi) == 0 &&
3059 sigqueue_get(&p->p_sigqueue, sig, &ksi) == 0)
3060 return (0);
3061 ksi.ksi_signo = sig;
3062 if (ksi.ksi_code == SI_TIMER)
3063 itimer_accept(p, ksi.ksi_timerid, &ksi);
3064 action = ps->ps_sigact[_SIG_IDX(sig)];
3065 #ifdef KTRACE
3066 if (KTRPOINT(td, KTR_PSIG))
3067 ktrpsig(sig, action, td->td_pflags & TDP_OLDMASK ?
3068 &td->td_oldsigmask : &td->td_sigmask, ksi.ksi_code);
3069 #endif
3070 if ((p->p_stops & S_SIG) != 0) {
3071 mtx_unlock(&ps->ps_mtx);
3072 stopevent(p, S_SIG, sig);
3073 mtx_lock(&ps->ps_mtx);
3074 }
3075
3076 if (action == SIG_DFL) {
3077 /*
3078 * Default action, where the default is to kill
3079 * the process. (Other cases were ignored above.)
3080 */
3081 mtx_unlock(&ps->ps_mtx);
3082 proc_td_siginfo_capture(td, &ksi.ksi_info);
3083 sigexit(td, sig);
3084 /* NOTREACHED */
3085 } else {
3086 /*
3087 * If we get here, the signal must be caught.
3088 */
3089 KASSERT(action != SIG_IGN, ("postsig action %p", action));
3090 KASSERT(!SIGISMEMBER(td->td_sigmask, sig),
3091 ("postsig action: blocked sig %d", sig));
3092
3093 /*
3094 * Set the new mask value and also defer further
3095 * occurrences of this signal.
3096 *
3097 * Special case: user has done a sigsuspend. Here the
3098 * current mask is not of interest, but rather the
3099 * mask from before the sigsuspend is what we want
3100 * restored after the signal processing is completed.
3101 */
3102 if (td->td_pflags & TDP_OLDMASK) {
3103 returnmask = td->td_oldsigmask;
3104 td->td_pflags &= ~TDP_OLDMASK;
3105 } else
3106 returnmask = td->td_sigmask;
3107
3108 if (p->p_sig == sig) {
3109 p->p_code = 0;
3110 p->p_sig = 0;
3111 }
3112 (*p->p_sysent->sv_sendsig)(action, &ksi, &returnmask);
3113 postsig_done(sig, td, ps);
3114 }
3115 return (1);
3116 }
3117
3118 void
3119 proc_wkilled(struct proc *p)
3120 {
3121
3122 PROC_LOCK_ASSERT(p, MA_OWNED);
3123 if ((p->p_flag & P_WKILLED) == 0) {
3124 p->p_flag |= P_WKILLED;
3125 /*
3126 * Notify swapper that there is a process to swap in.
3127 * The notification is racy, at worst it would take 10
3128 * seconds for the swapper process to notice.
3129 */
3130 if ((p->p_flag & (P_INMEM | P_SWAPPINGIN)) == 0)
3131 wakeup(&proc0);
3132 }
3133 }
3134
3135 /*
3136 * Kill the current process for stated reason.
3137 */
3138 void
3139 killproc(struct proc *p, char *why)
3140 {
3141
3142 PROC_LOCK_ASSERT(p, MA_OWNED);
3143 CTR3(KTR_PROC, "killproc: proc %p (pid %d, %s)", p, p->p_pid,
3144 p->p_comm);
3145 log(LOG_ERR, "pid %d (%s), jid %d, uid %d, was killed: %s\n",
3146 p->p_pid, p->p_comm, p->p_ucred->cr_prison->pr_id,
3147 p->p_ucred ? p->p_ucred->cr_uid : -1, why);
3148 proc_wkilled(p);
3149 kern_psignal(p, SIGKILL);
3150 }
3151
3152 /*
3153 * Force the current process to exit with the specified signal, dumping core
3154 * if appropriate. We bypass the normal tests for masked and caught signals,
3155 * allowing unrecoverable failures to terminate the process without changing
3156 * signal state. Mark the accounting record with the signal termination.
3157 * If dumping core, save the signal number for the debugger. Calls exit and
3158 * does not return.
3159 */
3160 void
3161 sigexit(struct thread *td, int sig)
3162 {
3163 struct proc *p = td->td_proc;
3164
3165 PROC_LOCK_ASSERT(p, MA_OWNED);
3166 p->p_acflag |= AXSIG;
3167 /*
3168 * We must be single-threading to generate a core dump. This
3169 * ensures that the registers in the core file are up-to-date.
3170 * Also, the ELF dump handler assumes that the thread list doesn't
3171 * change out from under it.
3172 *
3173 * XXX If another thread attempts to single-thread before us
3174 * (e.g. via fork()), we won't get a dump at all.
3175 */
3176 if ((sigprop(sig) & SIGPROP_CORE) &&
3177 thread_single(p, SINGLE_NO_EXIT) == 0) {
3178 p->p_sig = sig;
3179 /*
3180 * Log signals which would cause core dumps
3181 * (Log as LOG_INFO to appease those who don't want
3182 * these messages.)
3183 * XXX : Todo, as well as euid, write out ruid too
3184 * Note that coredump() drops proc lock.
3185 */
3186 if (coredump(td) == 0)
3187 sig |= WCOREFLAG;
3188 if (kern_logsigexit)
3189 log(LOG_INFO,
3190 "pid %d (%s), jid %d, uid %d: exited on "
3191 "signal %d%s\n", p->p_pid, p->p_comm,
3192 p->p_ucred->cr_prison->pr_id,
3193 td->td_ucred ? td->td_ucred->cr_uid : -1,
3194 sig &~ WCOREFLAG,
3195 sig & WCOREFLAG ? " (core dumped)" : "");
3196 } else
3197 PROC_UNLOCK(p);
3198 exit1(td, 0, sig);
3199 /* NOTREACHED */
3200 }
3201
3202 /*
3203 * Send queued SIGCHLD to parent when child process's state
3204 * is changed.
3205 */
3206 static void
3207 sigparent(struct proc *p, int reason, int status)
3208 {
3209 PROC_LOCK_ASSERT(p, MA_OWNED);
3210 PROC_LOCK_ASSERT(p->p_pptr, MA_OWNED);
3211
3212 if (p->p_ksi != NULL) {
3213 p->p_ksi->ksi_signo = SIGCHLD;
3214 p->p_ksi->ksi_code = reason;
3215 p->p_ksi->ksi_status = status;
3216 p->p_ksi->ksi_pid = p->p_pid;
3217 p->p_ksi->ksi_uid = p->p_ucred->cr_ruid;
3218 if (KSI_ONQ(p->p_ksi))
3219 return;
3220 }
3221 pksignal(p->p_pptr, SIGCHLD, p->p_ksi);
3222 }
3223
3224 static void
3225 childproc_jobstate(struct proc *p, int reason, int sig)
3226 {
3227 struct sigacts *ps;
3228
3229 PROC_LOCK_ASSERT(p, MA_OWNED);
3230 PROC_LOCK_ASSERT(p->p_pptr, MA_OWNED);
3231
3232 /*
3233 * Wake up parent sleeping in kern_wait(), also send
3234 * SIGCHLD to parent, but SIGCHLD does not guarantee
3235 * that parent will awake, because parent may masked
3236 * the signal.
3237 */
3238 p->p_pptr->p_flag |= P_STATCHILD;
3239 wakeup(p->p_pptr);
3240
3241 ps = p->p_pptr->p_sigacts;
3242 mtx_lock(&ps->ps_mtx);
3243 if ((ps->ps_flag & PS_NOCLDSTOP) == 0) {
3244 mtx_unlock(&ps->ps_mtx);
3245 sigparent(p, reason, sig);
3246 } else
3247 mtx_unlock(&ps->ps_mtx);
3248 }
3249
3250 void
3251 childproc_stopped(struct proc *p, int reason)
3252 {
3253
3254 childproc_jobstate(p, reason, p->p_xsig);
3255 }
3256
3257 void
3258 childproc_continued(struct proc *p)
3259 {
3260 childproc_jobstate(p, CLD_CONTINUED, SIGCONT);
3261 }
3262
3263 void
3264 childproc_exited(struct proc *p)
3265 {
3266 int reason, status;
3267
3268 if (WCOREDUMP(p->p_xsig)) {
3269 reason = CLD_DUMPED;
3270 status = WTERMSIG(p->p_xsig);
3271 } else if (WIFSIGNALED(p->p_xsig)) {
3272 reason = CLD_KILLED;
3273 status = WTERMSIG(p->p_xsig);
3274 } else {
3275 reason = CLD_EXITED;
3276 status = p->p_xexit;
3277 }
3278 /*
3279 * XXX avoid calling wakeup(p->p_pptr), the work is
3280 * done in exit1().
3281 */
3282 sigparent(p, reason, status);
3283 }
3284
3285 #define MAX_NUM_CORE_FILES 100000
3286 #ifndef NUM_CORE_FILES
3287 #define NUM_CORE_FILES 5
3288 #endif
3289 CTASSERT(NUM_CORE_FILES >= 0 && NUM_CORE_FILES <= MAX_NUM_CORE_FILES);
3290 static int num_cores = NUM_CORE_FILES;
3291
3292 static int
3293 sysctl_debug_num_cores_check (SYSCTL_HANDLER_ARGS)
3294 {
3295 int error;
3296 int new_val;
3297
3298 new_val = num_cores;
3299 error = sysctl_handle_int(oidp, &new_val, 0, req);
3300 if (error != 0 || req->newptr == NULL)
3301 return (error);
3302 if (new_val > MAX_NUM_CORE_FILES)
3303 new_val = MAX_NUM_CORE_FILES;
3304 if (new_val < 0)
3305 new_val = 0;
3306 num_cores = new_val;
3307 return (0);
3308 }
3309 SYSCTL_PROC(_debug, OID_AUTO, ncores, CTLTYPE_INT|CTLFLAG_RW,
3310 0, sizeof(int), sysctl_debug_num_cores_check, "I",
3311 "Maximum number of generated process corefiles while using index format");
3312
3313 #define GZIP_SUFFIX ".gz"
3314 #define ZSTD_SUFFIX ".zst"
3315
3316 int compress_user_cores = 0;
3317
3318 static int
3319 sysctl_compress_user_cores(SYSCTL_HANDLER_ARGS)
3320 {
3321 int error, val;
3322
3323 val = compress_user_cores;
3324 error = sysctl_handle_int(oidp, &val, 0, req);
3325 if (error != 0 || req->newptr == NULL)
3326 return (error);
3327 if (val != 0 && !compressor_avail(val))
3328 return (EINVAL);
3329 compress_user_cores = val;
3330 return (error);
3331 }
3332 SYSCTL_PROC(_kern, OID_AUTO, compress_user_cores, CTLTYPE_INT | CTLFLAG_RWTUN,
3333 0, sizeof(int), sysctl_compress_user_cores, "I",
3334 "Enable compression of user corefiles ("
3335 __XSTRING(COMPRESS_GZIP) " = gzip, "
3336 __XSTRING(COMPRESS_ZSTD) " = zstd)");
3337
3338 int compress_user_cores_level = 6;
3339 SYSCTL_INT(_kern, OID_AUTO, compress_user_cores_level, CTLFLAG_RWTUN,
3340 &compress_user_cores_level, 0,
3341 "Corefile compression level");
3342
3343 /*
3344 * Protect the access to corefilename[] by allproc_lock.
3345 */
3346 #define corefilename_lock allproc_lock
3347
3348 static char corefilename[MAXPATHLEN] = {"%N.core"};
3349 TUNABLE_STR("kern.corefile", corefilename, sizeof(corefilename));
3350
3351 static int
3352 sysctl_kern_corefile(SYSCTL_HANDLER_ARGS)
3353 {
3354 int error;
3355
3356 sx_xlock(&corefilename_lock);
3357 error = sysctl_handle_string(oidp, corefilename, sizeof(corefilename),
3358 req);
3359 sx_xunlock(&corefilename_lock);
3360
3361 return (error);
3362 }
3363 SYSCTL_PROC(_kern, OID_AUTO, corefile, CTLTYPE_STRING | CTLFLAG_RW |
3364 CTLFLAG_MPSAFE, 0, 0, sysctl_kern_corefile, "A",
3365 "Process corefile name format string");
3366
3367 static void
3368 vnode_close_locked(struct thread *td, struct vnode *vp)
3369 {
3370
3371 VOP_UNLOCK(vp, 0);
3372 vn_close(vp, FWRITE, td->td_ucred, td);
3373 }
3374
3375 /*
3376 * If the core format has a %I in it, then we need to check
3377 * for existing corefiles before defining a name.
3378 * To do this we iterate over 0..ncores to find a
3379 * non-existing core file name to use. If all core files are
3380 * already used we choose the oldest one.
3381 */
3382 static int
3383 corefile_open_last(struct thread *td, char *name, int indexpos,
3384 int indexlen, int ncores, struct vnode **vpp)
3385 {
3386 struct vnode *oldvp, *nextvp, *vp;
3387 struct vattr vattr;
3388 struct nameidata nd;
3389 int error, i, flags, oflags, cmode;
3390 char ch;
3391 struct timespec lasttime;
3392
3393 nextvp = oldvp = NULL;
3394 cmode = S_IRUSR | S_IWUSR;
3395 oflags = VN_OPEN_NOAUDIT | VN_OPEN_NAMECACHE |
3396 (capmode_coredump ? VN_OPEN_NOCAPCHECK : 0);
3397
3398 for (i = 0; i < ncores; i++) {
3399 flags = O_CREAT | FWRITE | O_NOFOLLOW;
3400
3401 ch = name[indexpos + indexlen];
3402 (void)snprintf(name + indexpos, indexlen + 1, "%.*u", indexlen,
3403 i);
3404 name[indexpos + indexlen] = ch;
3405
3406 NDINIT(&nd, LOOKUP, NOFOLLOW, UIO_SYSSPACE, name, td);
3407 error = vn_open_cred(&nd, &flags, cmode, oflags, td->td_ucred,
3408 NULL);
3409 if (error != 0)
3410 break;
3411
3412 vp = nd.ni_vp;
3413 NDFREE(&nd, NDF_ONLY_PNBUF);
3414 if ((flags & O_CREAT) == O_CREAT) {
3415 nextvp = vp;
3416 break;
3417 }
3418
3419 error = VOP_GETATTR(vp, &vattr, td->td_ucred);
3420 if (error != 0) {
3421 vnode_close_locked(td, vp);
3422 break;
3423 }
3424
3425 if (oldvp == NULL ||
3426 lasttime.tv_sec > vattr.va_mtime.tv_sec ||
3427 (lasttime.tv_sec == vattr.va_mtime.tv_sec &&
3428 lasttime.tv_nsec >= vattr.va_mtime.tv_nsec)) {
3429 if (oldvp != NULL)
3430 vn_close(oldvp, FWRITE, td->td_ucred, td);
3431 oldvp = vp;
3432 VOP_UNLOCK(oldvp, 0);
3433 lasttime = vattr.va_mtime;
3434 } else {
3435 vnode_close_locked(td, vp);
3436 }
3437 }
3438
3439 if (oldvp != NULL) {
3440 if (nextvp == NULL) {
3441 if ((td->td_proc->p_flag & P_SUGID) != 0) {
3442 error = EFAULT;
3443 vn_close(oldvp, FWRITE, td->td_ucred, td);
3444 } else {
3445 nextvp = oldvp;
3446 error = vn_lock(nextvp, LK_EXCLUSIVE);
3447 if (error != 0) {
3448 vn_close(nextvp, FWRITE, td->td_ucred,
3449 td);
3450 nextvp = NULL;
3451 }
3452 }
3453 } else {
3454 vn_close(oldvp, FWRITE, td->td_ucred, td);
3455 }
3456 }
3457 if (error != 0) {
3458 if (nextvp != NULL)
3459 vnode_close_locked(td, oldvp);
3460 } else {
3461 *vpp = nextvp;
3462 }
3463
3464 return (error);
3465 }
3466
3467 /*
3468 * corefile_open(comm, uid, pid, td, compress, vpp, namep)
3469 * Expand the name described in corefilename, using name, uid, and pid
3470 * and open/create core file.
3471 * corefilename is a printf-like string, with three format specifiers:
3472 * %N name of process ("name")
3473 * %P process id (pid)
3474 * %U user id (uid)
3475 * For example, "%N.core" is the default; they can be disabled completely
3476 * by using "/dev/null", or all core files can be stored in "/cores/%U/%N-%P".
3477 * This is controlled by the sysctl variable kern.corefile (see above).
3478 */
3479 static int
3480 corefile_open(const char *comm, uid_t uid, pid_t pid, struct thread *td,
3481 int compress, struct vnode **vpp, char **namep)
3482 {
3483 struct sbuf sb;
3484 struct nameidata nd;
3485 const char *format;
3486 char *hostname, *name;
3487 int cmode, error, flags, i, indexpos, indexlen, oflags, ncores;
3488
3489 hostname = NULL;
3490 format = corefilename;
3491 name = malloc(MAXPATHLEN, M_TEMP, M_WAITOK | M_ZERO);
3492 indexlen = 0;
3493 indexpos = -1;
3494 ncores = num_cores;
3495 (void)sbuf_new(&sb, name, MAXPATHLEN, SBUF_FIXEDLEN);
3496 sx_slock(&corefilename_lock);
3497 for (i = 0; format[i] != '\0'; i++) {
3498 switch (format[i]) {
3499 case '%': /* Format character */
3500 i++;
3501 switch (format[i]) {
3502 case '%':
3503 sbuf_putc(&sb, '%');
3504 break;
3505 case 'H': /* hostname */
3506 if (hostname == NULL) {
3507 hostname = malloc(MAXHOSTNAMELEN,
3508 M_TEMP, M_WAITOK);
3509 }
3510 getcredhostname(td->td_ucred, hostname,
3511 MAXHOSTNAMELEN);
3512 sbuf_printf(&sb, "%s", hostname);
3513 break;
3514 case 'I': /* autoincrementing index */
3515 if (indexpos != -1) {
3516 sbuf_printf(&sb, "%%I");
3517 break;
3518 }
3519
3520 indexpos = sbuf_len(&sb);
3521 sbuf_printf(&sb, "%u", ncores - 1);
3522 indexlen = sbuf_len(&sb) - indexpos;
3523 break;
3524 case 'N': /* process name */
3525 sbuf_printf(&sb, "%s", comm);
3526 break;
3527 case 'P': /* process id */
3528 sbuf_printf(&sb, "%u", pid);
3529 break;
3530 case 'U': /* user id */
3531 sbuf_printf(&sb, "%u", uid);
3532 break;
3533 default:
3534 log(LOG_ERR,
3535 "Unknown format character %c in "
3536 "corename `%s'\n", format[i], format);
3537 break;
3538 }
3539 break;
3540 default:
3541 sbuf_putc(&sb, format[i]);
3542 break;
3543 }
3544 }
3545 sx_sunlock(&corefilename_lock);
3546 free(hostname, M_TEMP);
3547 if (compress == COMPRESS_GZIP)
3548 sbuf_printf(&sb, GZIP_SUFFIX);
3549 else if (compress == COMPRESS_ZSTD)
3550 sbuf_printf(&sb, ZSTD_SUFFIX);
3551 if (sbuf_error(&sb) != 0) {
3552 log(LOG_ERR, "pid %ld (%s), uid (%lu): corename is too "
3553 "long\n", (long)pid, comm, (u_long)uid);
3554 sbuf_delete(&sb);
3555 free(name, M_TEMP);
3556 return (ENOMEM);
3557 }
3558 sbuf_finish(&sb);
3559 sbuf_delete(&sb);
3560
3561 if (indexpos != -1) {
3562 error = corefile_open_last(td, name, indexpos, indexlen, ncores,
3563 vpp);
3564 if (error != 0) {
3565 log(LOG_ERR,
3566 "pid %d (%s), uid (%u): Path `%s' failed "
3567 "on initial open test, error = %d\n",
3568 pid, comm, uid, name, error);
3569 }
3570 } else {
3571 cmode = S_IRUSR | S_IWUSR;
3572 oflags = VN_OPEN_NOAUDIT | VN_OPEN_NAMECACHE |
3573 (capmode_coredump ? VN_OPEN_NOCAPCHECK : 0);
3574 flags = O_CREAT | FWRITE | O_NOFOLLOW;
3575 if ((td->td_proc->p_flag & P_SUGID) != 0)
3576 flags |= O_EXCL;
3577
3578 NDINIT(&nd, LOOKUP, NOFOLLOW, UIO_SYSSPACE, name, td);
3579 error = vn_open_cred(&nd, &flags, cmode, oflags, td->td_ucred,
3580 NULL);
3581 if (error == 0) {
3582 *vpp = nd.ni_vp;
3583 NDFREE(&nd, NDF_ONLY_PNBUF);
3584 }
3585 }
3586
3587 if (error != 0) {
3588 #ifdef AUDIT
3589 audit_proc_coredump(td, name, error);
3590 #endif
3591 free(name, M_TEMP);
3592 return (error);
3593 }
3594 *namep = name;
3595 return (0);
3596 }
3597
3598 /*
3599 * Dump a process' core. The main routine does some
3600 * policy checking, and creates the name of the coredump;
3601 * then it passes on a vnode and a size limit to the process-specific
3602 * coredump routine if there is one; if there _is not_ one, it returns
3603 * ENOSYS; otherwise it returns the error from the process-specific routine.
3604 */
3605
3606 static int
3607 coredump(struct thread *td)
3608 {
3609 struct proc *p = td->td_proc;
3610 struct ucred *cred = td->td_ucred;
3611 struct vnode *vp;
3612 struct flock lf;
3613 struct vattr vattr;
3614 int error, error1, locked;
3615 char *name; /* name of corefile */
3616 void *rl_cookie;
3617 off_t limit;
3618 char *fullpath, *freepath = NULL;
3619 struct sbuf *sb;
3620
3621 PROC_LOCK_ASSERT(p, MA_OWNED);
3622 MPASS((p->p_flag & P_HADTHREADS) == 0 || p->p_singlethread == td);
3623 _STOPEVENT(p, S_CORE, 0);
3624
3625 if (!do_coredump || (!sugid_coredump && (p->p_flag & P_SUGID) != 0) ||
3626 (p->p_flag2 & P2_NOTRACE) != 0) {
3627 PROC_UNLOCK(p);
3628 return (EFAULT);
3629 }
3630
3631 /*
3632 * Note that the bulk of limit checking is done after
3633 * the corefile is created. The exception is if the limit
3634 * for corefiles is 0, in which case we don't bother
3635 * creating the corefile at all. This layout means that
3636 * a corefile is truncated instead of not being created,
3637 * if it is larger than the limit.
3638 */
3639 limit = (off_t)lim_cur(td, RLIMIT_CORE);
3640 if (limit == 0 || racct_get_available(p, RACCT_CORE) == 0) {
3641 PROC_UNLOCK(p);
3642 return (EFBIG);
3643 }
3644 PROC_UNLOCK(p);
3645
3646 error = corefile_open(p->p_comm, cred->cr_uid, p->p_pid, td,
3647 compress_user_cores, &vp, &name);
3648 if (error != 0)
3649 return (error);
3650
3651 /*
3652 * Don't dump to non-regular files or files with links.
3653 * Do not dump into system files. Effective user must own the corefile.
3654 */
3655 if (vp->v_type != VREG || VOP_GETATTR(vp, &vattr, cred) != 0 ||
3656 vattr.va_nlink != 1 || (vp->v_vflag & VV_SYSTEM) != 0 ||
3657 vattr.va_uid != cred->cr_uid) {
3658 VOP_UNLOCK(vp, 0);
3659 error = EFAULT;
3660 goto out;
3661 }
3662
3663 VOP_UNLOCK(vp, 0);
3664
3665 /* Postpone other writers, including core dumps of other processes. */
3666 rl_cookie = vn_rangelock_wlock(vp, 0, OFF_MAX);
3667
3668 lf.l_whence = SEEK_SET;
3669 lf.l_start = 0;
3670 lf.l_len = 0;
3671 lf.l_type = F_WRLCK;
3672 locked = (VOP_ADVLOCK(vp, (caddr_t)p, F_SETLK, &lf, F_FLOCK) == 0);
3673
3674 VATTR_NULL(&vattr);
3675 vattr.va_size = 0;
3676 if (set_core_nodump_flag)
3677 vattr.va_flags = UF_NODUMP;
3678 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
3679 VOP_SETATTR(vp, &vattr, cred);
3680 VOP_UNLOCK(vp, 0);
3681 PROC_LOCK(p);
3682 p->p_acflag |= ACORE;
3683 PROC_UNLOCK(p);
3684
3685 if (p->p_sysent->sv_coredump != NULL) {
3686 error = p->p_sysent->sv_coredump(td, vp, limit, 0);
3687 } else {
3688 error = ENOSYS;
3689 }
3690
3691 if (locked) {
3692 lf.l_type = F_UNLCK;
3693 VOP_ADVLOCK(vp, (caddr_t)p, F_UNLCK, &lf, F_FLOCK);
3694 }
3695 vn_rangelock_unlock(vp, rl_cookie);
3696
3697 /*
3698 * Notify the userland helper that a process triggered a core dump.
3699 * This allows the helper to run an automated debugging session.
3700 */
3701 if (error != 0 || coredump_devctl == 0)
3702 goto out;
3703 sb = sbuf_new_auto();
3704 if (vn_fullpath_global(td, p->p_textvp, &fullpath, &freepath) != 0)
3705 goto out2;
3706 sbuf_printf(sb, "comm=\"");
3707 devctl_safe_quote_sb(sb, fullpath);
3708 free(freepath, M_TEMP);
3709 sbuf_printf(sb, "\" core=\"");
3710
3711 /*
3712 * We can't lookup core file vp directly. When we're replacing a core, and
3713 * other random times, we flush the name cache, so it will fail. Instead,
3714 * if the path of the core is relative, add the current dir in front if it.
3715 */
3716 if (name[0] != '/') {
3717 fullpath = malloc(MAXPATHLEN, M_TEMP, M_WAITOK);
3718 if (kern___getcwd(td, fullpath, UIO_SYSSPACE, MAXPATHLEN, MAXPATHLEN) != 0) {
3719 free(fullpath, M_TEMP);
3720 goto out2;
3721 }
3722 devctl_safe_quote_sb(sb, fullpath);
3723 free(fullpath, M_TEMP);
3724 sbuf_putc(sb, '/');
3725 }
3726 devctl_safe_quote_sb(sb, name);
3727 sbuf_printf(sb, "\"");
3728 if (sbuf_finish(sb) == 0)
3729 devctl_notify("kernel", "signal", "coredump", sbuf_data(sb));
3730 out2:
3731 sbuf_delete(sb);
3732 out:
3733 error1 = vn_close(vp, FWRITE, cred, td);
3734 if (error == 0)
3735 error = error1;
3736 #ifdef AUDIT
3737 audit_proc_coredump(td, name, error);
3738 #endif
3739 free(name, M_TEMP);
3740 return (error);
3741 }
3742
3743 /*
3744 * Nonexistent system call-- signal process (may want to handle it). Flag
3745 * error in case process won't see signal immediately (blocked or ignored).
3746 */
3747 #ifndef _SYS_SYSPROTO_H_
3748 struct nosys_args {
3749 int dummy;
3750 };
3751 #endif
3752 /* ARGSUSED */
3753 int
3754 nosys(struct thread *td, struct nosys_args *args)
3755 {
3756 struct proc *p;
3757
3758 p = td->td_proc;
3759
3760 PROC_LOCK(p);
3761 tdsignal(td, SIGSYS);
3762 PROC_UNLOCK(p);
3763 if (kern_lognosys == 1 || kern_lognosys == 3) {
3764 uprintf("pid %d comm %s: nosys %d\n", p->p_pid, p->p_comm,
3765 td->td_sa.code);
3766 }
3767 if (kern_lognosys == 2 || kern_lognosys == 3 ||
3768 (p->p_pid == 1 && (kern_lognosys & 3) == 0)) {
3769 printf("pid %d comm %s: nosys %d\n", p->p_pid, p->p_comm,
3770 td->td_sa.code);
3771 }
3772 return (ENOSYS);
3773 }
3774
3775 /*
3776 * Send a SIGIO or SIGURG signal to a process or process group using stored
3777 * credentials rather than those of the current process.
3778 */
3779 void
3780 pgsigio(struct sigio **sigiop, int sig, int checkctty)
3781 {
3782 ksiginfo_t ksi;
3783 struct sigio *sigio;
3784
3785 ksiginfo_init(&ksi);
3786 ksi.ksi_signo = sig;
3787 ksi.ksi_code = SI_KERNEL;
3788
3789 SIGIO_LOCK();
3790 sigio = *sigiop;
3791 if (sigio == NULL) {
3792 SIGIO_UNLOCK();
3793 return;
3794 }
3795 if (sigio->sio_pgid > 0) {
3796 PROC_LOCK(sigio->sio_proc);
3797 if (CANSIGIO(sigio->sio_ucred, sigio->sio_proc->p_ucred))
3798 kern_psignal(sigio->sio_proc, sig);
3799 PROC_UNLOCK(sigio->sio_proc);
3800 } else if (sigio->sio_pgid < 0) {
3801 struct proc *p;
3802
3803 PGRP_LOCK(sigio->sio_pgrp);
3804 LIST_FOREACH(p, &sigio->sio_pgrp->pg_members, p_pglist) {
3805 PROC_LOCK(p);
3806 if (p->p_state == PRS_NORMAL &&
3807 CANSIGIO(sigio->sio_ucred, p->p_ucred) &&
3808 (checkctty == 0 || (p->p_flag & P_CONTROLT)))
3809 kern_psignal(p, sig);
3810 PROC_UNLOCK(p);
3811 }
3812 PGRP_UNLOCK(sigio->sio_pgrp);
3813 }
3814 SIGIO_UNLOCK();
3815 }
3816
3817 static int
3818 filt_sigattach(struct knote *kn)
3819 {
3820 struct proc *p = curproc;
3821
3822 kn->kn_ptr.p_proc = p;
3823 kn->kn_flags |= EV_CLEAR; /* automatically set */
3824
3825 knlist_add(p->p_klist, kn, 0);
3826
3827 return (0);
3828 }
3829
3830 static void
3831 filt_sigdetach(struct knote *kn)
3832 {
3833 struct proc *p = kn->kn_ptr.p_proc;
3834
3835 knlist_remove(p->p_klist, kn, 0);
3836 }
3837
3838 /*
3839 * signal knotes are shared with proc knotes, so we apply a mask to
3840 * the hint in order to differentiate them from process hints. This
3841 * could be avoided by using a signal-specific knote list, but probably
3842 * isn't worth the trouble.
3843 */
3844 static int
3845 filt_signal(struct knote *kn, long hint)
3846 {
3847
3848 if (hint & NOTE_SIGNAL) {
3849 hint &= ~NOTE_SIGNAL;
3850
3851 if (kn->kn_id == hint)
3852 kn->kn_data++;
3853 }
3854 return (kn->kn_data != 0);
3855 }
3856
3857 struct sigacts *
3858 sigacts_alloc(void)
3859 {
3860 struct sigacts *ps;
3861
3862 ps = malloc(sizeof(struct sigacts), M_SUBPROC, M_WAITOK | M_ZERO);
3863 refcount_init(&ps->ps_refcnt, 1);
3864 mtx_init(&ps->ps_mtx, "sigacts", NULL, MTX_DEF);
3865 return (ps);
3866 }
3867
3868 void
3869 sigacts_free(struct sigacts *ps)
3870 {
3871
3872 if (refcount_release(&ps->ps_refcnt) == 0)
3873 return;
3874 mtx_destroy(&ps->ps_mtx);
3875 free(ps, M_SUBPROC);
3876 }
3877
3878 struct sigacts *
3879 sigacts_hold(struct sigacts *ps)
3880 {
3881
3882 refcount_acquire(&ps->ps_refcnt);
3883 return (ps);
3884 }
3885
3886 void
3887 sigacts_copy(struct sigacts *dest, struct sigacts *src)
3888 {
3889
3890 KASSERT(dest->ps_refcnt == 1, ("sigacts_copy to shared dest"));
3891 mtx_lock(&src->ps_mtx);
3892 bcopy(src, dest, offsetof(struct sigacts, ps_refcnt));
3893 mtx_unlock(&src->ps_mtx);
3894 }
3895
3896 int
3897 sigacts_shared(struct sigacts *ps)
3898 {
3899
3900 return (ps->ps_refcnt > 1);
3901 }
3902
3903 void
3904 sig_drop_caught(struct proc *p)
3905 {
3906 int sig;
3907 struct sigacts *ps;
3908
3909 ps = p->p_sigacts;
3910 PROC_LOCK_ASSERT(p, MA_OWNED);
3911 mtx_assert(&ps->ps_mtx, MA_OWNED);
3912 while (SIGNOTEMPTY(ps->ps_sigcatch)) {
3913 sig = sig_ffs(&ps->ps_sigcatch);
3914 sigdflt(ps, sig);
3915 if ((sigprop(sig) & SIGPROP_IGNORE) != 0)
3916 sigqueue_delete_proc(p, sig);
3917 }
3918 }
Cache object: 4231c0c771c604b0e99a4d01b9b267e6
|