FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_sig.c
1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 1982, 1986, 1989, 1991, 1993
5 * The Regents of the University of California. All rights reserved.
6 * (c) UNIX System Laboratories, Inc.
7 * All or some portions of this file are derived from material licensed
8 * to the University of California by American Telephone and Telegraph
9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
10 * the permission of UNIX System Laboratories, Inc.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * @(#)kern_sig.c 8.7 (Berkeley) 4/18/94
37 */
38
39 #include <sys/cdefs.h>
40 __FBSDID("$FreeBSD$");
41
42 #include "opt_ktrace.h"
43
44 #include <sys/param.h>
45 #include <sys/ctype.h>
46 #include <sys/systm.h>
47 #include <sys/signalvar.h>
48 #include <sys/vnode.h>
49 #include <sys/acct.h>
50 #include <sys/capsicum.h>
51 #include <sys/compressor.h>
52 #include <sys/condvar.h>
53 #include <sys/devctl.h>
54 #include <sys/event.h>
55 #include <sys/fcntl.h>
56 #include <sys/imgact.h>
57 #include <sys/kernel.h>
58 #include <sys/ktr.h>
59 #include <sys/ktrace.h>
60 #include <sys/limits.h>
61 #include <sys/lock.h>
62 #include <sys/malloc.h>
63 #include <sys/mutex.h>
64 #include <sys/refcount.h>
65 #include <sys/namei.h>
66 #include <sys/proc.h>
67 #include <sys/procdesc.h>
68 #include <sys/ptrace.h>
69 #include <sys/posix4.h>
70 #include <sys/racct.h>
71 #include <sys/resourcevar.h>
72 #include <sys/sdt.h>
73 #include <sys/sbuf.h>
74 #include <sys/sleepqueue.h>
75 #include <sys/smp.h>
76 #include <sys/stat.h>
77 #include <sys/sx.h>
78 #include <sys/syscallsubr.h>
79 #include <sys/sysctl.h>
80 #include <sys/sysent.h>
81 #include <sys/syslog.h>
82 #include <sys/sysproto.h>
83 #include <sys/timers.h>
84 #include <sys/unistd.h>
85 #include <sys/wait.h>
86 #include <vm/vm.h>
87 #include <vm/vm_extern.h>
88 #include <vm/uma.h>
89
90 #include <sys/jail.h>
91
92 #include <machine/cpu.h>
93
94 #include <security/audit/audit.h>
95
96 #define ONSIG 32 /* NSIG for osig* syscalls. XXX. */
97
98 SDT_PROVIDER_DECLARE(proc);
99 SDT_PROBE_DEFINE3(proc, , , signal__send,
100 "struct thread *", "struct proc *", "int");
101 SDT_PROBE_DEFINE2(proc, , , signal__clear,
102 "int", "ksiginfo_t *");
103 SDT_PROBE_DEFINE3(proc, , , signal__discard,
104 "struct thread *", "struct proc *", "int");
105
106 static int coredump(struct thread *);
107 static int killpg1(struct thread *td, int sig, int pgid, int all,
108 ksiginfo_t *ksi);
109 static int issignal(struct thread *td);
110 static void reschedule_signals(struct proc *p, sigset_t block, int flags);
111 static int sigprop(int sig);
112 static void tdsigwakeup(struct thread *, int, sig_t, int);
113 static int sig_suspend_threads(struct thread *, struct proc *, int);
114 static int filt_sigattach(struct knote *kn);
115 static void filt_sigdetach(struct knote *kn);
116 static int filt_signal(struct knote *kn, long hint);
117 static struct thread *sigtd(struct proc *p, int sig, bool fast_sigblock);
118 static void sigqueue_start(void);
119
120 static uma_zone_t ksiginfo_zone = NULL;
121 struct filterops sig_filtops = {
122 .f_isfd = 0,
123 .f_attach = filt_sigattach,
124 .f_detach = filt_sigdetach,
125 .f_event = filt_signal,
126 };
127
128 static int kern_logsigexit = 1;
129 SYSCTL_INT(_kern, KERN_LOGSIGEXIT, logsigexit, CTLFLAG_RW,
130 &kern_logsigexit, 0,
131 "Log processes quitting on abnormal signals to syslog(3)");
132
133 static int kern_forcesigexit = 1;
134 SYSCTL_INT(_kern, OID_AUTO, forcesigexit, CTLFLAG_RW,
135 &kern_forcesigexit, 0, "Force trap signal to be handled");
136
137 static SYSCTL_NODE(_kern, OID_AUTO, sigqueue, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
138 "POSIX real time signal");
139
140 static int max_pending_per_proc = 128;
141 SYSCTL_INT(_kern_sigqueue, OID_AUTO, max_pending_per_proc, CTLFLAG_RW,
142 &max_pending_per_proc, 0, "Max pending signals per proc");
143
144 static int preallocate_siginfo = 1024;
145 SYSCTL_INT(_kern_sigqueue, OID_AUTO, preallocate, CTLFLAG_RDTUN,
146 &preallocate_siginfo, 0, "Preallocated signal memory size");
147
148 static int signal_overflow = 0;
149 SYSCTL_INT(_kern_sigqueue, OID_AUTO, overflow, CTLFLAG_RD,
150 &signal_overflow, 0, "Number of signals overflew");
151
152 static int signal_alloc_fail = 0;
153 SYSCTL_INT(_kern_sigqueue, OID_AUTO, alloc_fail, CTLFLAG_RD,
154 &signal_alloc_fail, 0, "signals failed to be allocated");
155
156 static int kern_lognosys = 0;
157 SYSCTL_INT(_kern, OID_AUTO, lognosys, CTLFLAG_RWTUN, &kern_lognosys, 0,
158 "Log invalid syscalls");
159
160 __read_frequently bool sigfastblock_fetch_always = false;
161 SYSCTL_BOOL(_kern, OID_AUTO, sigfastblock_fetch_always, CTLFLAG_RWTUN,
162 &sigfastblock_fetch_always, 0,
163 "Fetch sigfastblock word on each syscall entry for proper "
164 "blocking semantic");
165
166 static bool kern_sig_discard_ign = true;
167 SYSCTL_BOOL(_kern, OID_AUTO, sig_discard_ign, CTLFLAG_RWTUN,
168 &kern_sig_discard_ign, 0,
169 "Discard ignored signals on delivery, otherwise queue them to "
170 "the target queue");
171
172 SYSINIT(signal, SI_SUB_P1003_1B, SI_ORDER_FIRST+3, sigqueue_start, NULL);
173
174 /*
175 * Policy -- Can ucred cr1 send SIGIO to process cr2?
176 * Should use cr_cansignal() once cr_cansignal() allows SIGIO and SIGURG
177 * in the right situations.
178 */
179 #define CANSIGIO(cr1, cr2) \
180 ((cr1)->cr_uid == 0 || \
181 (cr1)->cr_ruid == (cr2)->cr_ruid || \
182 (cr1)->cr_uid == (cr2)->cr_ruid || \
183 (cr1)->cr_ruid == (cr2)->cr_uid || \
184 (cr1)->cr_uid == (cr2)->cr_uid)
185
186 static int sugid_coredump;
187 SYSCTL_INT(_kern, OID_AUTO, sugid_coredump, CTLFLAG_RWTUN,
188 &sugid_coredump, 0, "Allow setuid and setgid processes to dump core");
189
190 static int capmode_coredump;
191 SYSCTL_INT(_kern, OID_AUTO, capmode_coredump, CTLFLAG_RWTUN,
192 &capmode_coredump, 0, "Allow processes in capability mode to dump core");
193
194 static int do_coredump = 1;
195 SYSCTL_INT(_kern, OID_AUTO, coredump, CTLFLAG_RW,
196 &do_coredump, 0, "Enable/Disable coredumps");
197
198 static int set_core_nodump_flag = 0;
199 SYSCTL_INT(_kern, OID_AUTO, nodump_coredump, CTLFLAG_RW, &set_core_nodump_flag,
200 0, "Enable setting the NODUMP flag on coredump files");
201
202 static int coredump_devctl = 0;
203 SYSCTL_INT(_kern, OID_AUTO, coredump_devctl, CTLFLAG_RW, &coredump_devctl,
204 0, "Generate a devctl notification when processes coredump");
205
206 /*
207 * Signal properties and actions.
208 * The array below categorizes the signals and their default actions
209 * according to the following properties:
210 */
211 #define SIGPROP_KILL 0x01 /* terminates process by default */
212 #define SIGPROP_CORE 0x02 /* ditto and coredumps */
213 #define SIGPROP_STOP 0x04 /* suspend process */
214 #define SIGPROP_TTYSTOP 0x08 /* ditto, from tty */
215 #define SIGPROP_IGNORE 0x10 /* ignore by default */
216 #define SIGPROP_CONT 0x20 /* continue if suspended */
217
218 static int sigproptbl[NSIG] = {
219 [SIGHUP] = SIGPROP_KILL,
220 [SIGINT] = SIGPROP_KILL,
221 [SIGQUIT] = SIGPROP_KILL | SIGPROP_CORE,
222 [SIGILL] = SIGPROP_KILL | SIGPROP_CORE,
223 [SIGTRAP] = SIGPROP_KILL | SIGPROP_CORE,
224 [SIGABRT] = SIGPROP_KILL | SIGPROP_CORE,
225 [SIGEMT] = SIGPROP_KILL | SIGPROP_CORE,
226 [SIGFPE] = SIGPROP_KILL | SIGPROP_CORE,
227 [SIGKILL] = SIGPROP_KILL,
228 [SIGBUS] = SIGPROP_KILL | SIGPROP_CORE,
229 [SIGSEGV] = SIGPROP_KILL | SIGPROP_CORE,
230 [SIGSYS] = SIGPROP_KILL | SIGPROP_CORE,
231 [SIGPIPE] = SIGPROP_KILL,
232 [SIGALRM] = SIGPROP_KILL,
233 [SIGTERM] = SIGPROP_KILL,
234 [SIGURG] = SIGPROP_IGNORE,
235 [SIGSTOP] = SIGPROP_STOP,
236 [SIGTSTP] = SIGPROP_STOP | SIGPROP_TTYSTOP,
237 [SIGCONT] = SIGPROP_IGNORE | SIGPROP_CONT,
238 [SIGCHLD] = SIGPROP_IGNORE,
239 [SIGTTIN] = SIGPROP_STOP | SIGPROP_TTYSTOP,
240 [SIGTTOU] = SIGPROP_STOP | SIGPROP_TTYSTOP,
241 [SIGIO] = SIGPROP_IGNORE,
242 [SIGXCPU] = SIGPROP_KILL,
243 [SIGXFSZ] = SIGPROP_KILL,
244 [SIGVTALRM] = SIGPROP_KILL,
245 [SIGPROF] = SIGPROP_KILL,
246 [SIGWINCH] = SIGPROP_IGNORE,
247 [SIGINFO] = SIGPROP_IGNORE,
248 [SIGUSR1] = SIGPROP_KILL,
249 [SIGUSR2] = SIGPROP_KILL,
250 };
251
252 #define _SIG_FOREACH_ADVANCE(i, set) ({ \
253 int __found; \
254 for (;;) { \
255 if (__bits != 0) { \
256 int __sig = ffs(__bits); \
257 __bits &= ~(1u << (__sig - 1)); \
258 sig = __i * sizeof((set)->__bits[0]) * NBBY + __sig; \
259 __found = 1; \
260 break; \
261 } \
262 if (++__i == _SIG_WORDS) { \
263 __found = 0; \
264 break; \
265 } \
266 __bits = (set)->__bits[__i]; \
267 } \
268 __found != 0; \
269 })
270
271 #define SIG_FOREACH(i, set) \
272 for (int32_t __i = -1, __bits = 0; \
273 _SIG_FOREACH_ADVANCE(i, set); ) \
274
275 sigset_t fastblock_mask;
276
277 static void
278 sigqueue_start(void)
279 {
280 ksiginfo_zone = uma_zcreate("ksiginfo", sizeof(ksiginfo_t),
281 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
282 uma_prealloc(ksiginfo_zone, preallocate_siginfo);
283 p31b_setcfg(CTL_P1003_1B_REALTIME_SIGNALS, _POSIX_REALTIME_SIGNALS);
284 p31b_setcfg(CTL_P1003_1B_RTSIG_MAX, SIGRTMAX - SIGRTMIN + 1);
285 p31b_setcfg(CTL_P1003_1B_SIGQUEUE_MAX, max_pending_per_proc);
286 SIGFILLSET(fastblock_mask);
287 SIG_CANTMASK(fastblock_mask);
288 }
289
290 ksiginfo_t *
291 ksiginfo_alloc(int wait)
292 {
293 int flags;
294
295 flags = M_ZERO;
296 if (! wait)
297 flags |= M_NOWAIT;
298 if (ksiginfo_zone != NULL)
299 return ((ksiginfo_t *)uma_zalloc(ksiginfo_zone, flags));
300 return (NULL);
301 }
302
303 void
304 ksiginfo_free(ksiginfo_t *ksi)
305 {
306 uma_zfree(ksiginfo_zone, ksi);
307 }
308
309 static __inline int
310 ksiginfo_tryfree(ksiginfo_t *ksi)
311 {
312 if (!(ksi->ksi_flags & KSI_EXT)) {
313 uma_zfree(ksiginfo_zone, ksi);
314 return (1);
315 }
316 return (0);
317 }
318
319 void
320 sigqueue_init(sigqueue_t *list, struct proc *p)
321 {
322 SIGEMPTYSET(list->sq_signals);
323 SIGEMPTYSET(list->sq_kill);
324 SIGEMPTYSET(list->sq_ptrace);
325 TAILQ_INIT(&list->sq_list);
326 list->sq_proc = p;
327 list->sq_flags = SQ_INIT;
328 }
329
330 /*
331 * Get a signal's ksiginfo.
332 * Return:
333 * 0 - signal not found
334 * others - signal number
335 */
336 static int
337 sigqueue_get(sigqueue_t *sq, int signo, ksiginfo_t *si)
338 {
339 struct proc *p = sq->sq_proc;
340 struct ksiginfo *ksi, *next;
341 int count = 0;
342
343 KASSERT(sq->sq_flags & SQ_INIT, ("sigqueue not inited"));
344
345 if (!SIGISMEMBER(sq->sq_signals, signo))
346 return (0);
347
348 if (SIGISMEMBER(sq->sq_ptrace, signo)) {
349 count++;
350 SIGDELSET(sq->sq_ptrace, signo);
351 si->ksi_flags |= KSI_PTRACE;
352 }
353 if (SIGISMEMBER(sq->sq_kill, signo)) {
354 count++;
355 if (count == 1)
356 SIGDELSET(sq->sq_kill, signo);
357 }
358
359 TAILQ_FOREACH_SAFE(ksi, &sq->sq_list, ksi_link, next) {
360 if (ksi->ksi_signo == signo) {
361 if (count == 0) {
362 TAILQ_REMOVE(&sq->sq_list, ksi, ksi_link);
363 ksi->ksi_sigq = NULL;
364 ksiginfo_copy(ksi, si);
365 if (ksiginfo_tryfree(ksi) && p != NULL)
366 p->p_pendingcnt--;
367 }
368 if (++count > 1)
369 break;
370 }
371 }
372
373 if (count <= 1)
374 SIGDELSET(sq->sq_signals, signo);
375 si->ksi_signo = signo;
376 return (signo);
377 }
378
379 void
380 sigqueue_take(ksiginfo_t *ksi)
381 {
382 struct ksiginfo *kp;
383 struct proc *p;
384 sigqueue_t *sq;
385
386 if (ksi == NULL || (sq = ksi->ksi_sigq) == NULL)
387 return;
388
389 p = sq->sq_proc;
390 TAILQ_REMOVE(&sq->sq_list, ksi, ksi_link);
391 ksi->ksi_sigq = NULL;
392 if (!(ksi->ksi_flags & KSI_EXT) && p != NULL)
393 p->p_pendingcnt--;
394
395 for (kp = TAILQ_FIRST(&sq->sq_list); kp != NULL;
396 kp = TAILQ_NEXT(kp, ksi_link)) {
397 if (kp->ksi_signo == ksi->ksi_signo)
398 break;
399 }
400 if (kp == NULL && !SIGISMEMBER(sq->sq_kill, ksi->ksi_signo) &&
401 !SIGISMEMBER(sq->sq_ptrace, ksi->ksi_signo))
402 SIGDELSET(sq->sq_signals, ksi->ksi_signo);
403 }
404
405 static int
406 sigqueue_add(sigqueue_t *sq, int signo, ksiginfo_t *si)
407 {
408 struct proc *p = sq->sq_proc;
409 struct ksiginfo *ksi;
410 int ret = 0;
411
412 KASSERT(sq->sq_flags & SQ_INIT, ("sigqueue not inited"));
413
414 /*
415 * SIGKILL/SIGSTOP cannot be caught or masked, so take the fast path
416 * for these signals.
417 */
418 if (signo == SIGKILL || signo == SIGSTOP || si == NULL) {
419 SIGADDSET(sq->sq_kill, signo);
420 goto out_set_bit;
421 }
422
423 /* directly insert the ksi, don't copy it */
424 if (si->ksi_flags & KSI_INS) {
425 if (si->ksi_flags & KSI_HEAD)
426 TAILQ_INSERT_HEAD(&sq->sq_list, si, ksi_link);
427 else
428 TAILQ_INSERT_TAIL(&sq->sq_list, si, ksi_link);
429 si->ksi_sigq = sq;
430 goto out_set_bit;
431 }
432
433 if (__predict_false(ksiginfo_zone == NULL)) {
434 SIGADDSET(sq->sq_kill, signo);
435 goto out_set_bit;
436 }
437
438 if (p != NULL && p->p_pendingcnt >= max_pending_per_proc) {
439 signal_overflow++;
440 ret = EAGAIN;
441 } else if ((ksi = ksiginfo_alloc(0)) == NULL) {
442 signal_alloc_fail++;
443 ret = EAGAIN;
444 } else {
445 if (p != NULL)
446 p->p_pendingcnt++;
447 ksiginfo_copy(si, ksi);
448 ksi->ksi_signo = signo;
449 if (si->ksi_flags & KSI_HEAD)
450 TAILQ_INSERT_HEAD(&sq->sq_list, ksi, ksi_link);
451 else
452 TAILQ_INSERT_TAIL(&sq->sq_list, ksi, ksi_link);
453 ksi->ksi_sigq = sq;
454 }
455
456 if (ret != 0) {
457 if ((si->ksi_flags & KSI_PTRACE) != 0) {
458 SIGADDSET(sq->sq_ptrace, signo);
459 ret = 0;
460 goto out_set_bit;
461 } else if ((si->ksi_flags & KSI_TRAP) != 0 ||
462 (si->ksi_flags & KSI_SIGQ) == 0) {
463 SIGADDSET(sq->sq_kill, signo);
464 ret = 0;
465 goto out_set_bit;
466 }
467 return (ret);
468 }
469
470 out_set_bit:
471 SIGADDSET(sq->sq_signals, signo);
472 return (ret);
473 }
474
475 void
476 sigqueue_flush(sigqueue_t *sq)
477 {
478 struct proc *p = sq->sq_proc;
479 ksiginfo_t *ksi;
480
481 KASSERT(sq->sq_flags & SQ_INIT, ("sigqueue not inited"));
482
483 if (p != NULL)
484 PROC_LOCK_ASSERT(p, MA_OWNED);
485
486 while ((ksi = TAILQ_FIRST(&sq->sq_list)) != NULL) {
487 TAILQ_REMOVE(&sq->sq_list, ksi, ksi_link);
488 ksi->ksi_sigq = NULL;
489 if (ksiginfo_tryfree(ksi) && p != NULL)
490 p->p_pendingcnt--;
491 }
492
493 SIGEMPTYSET(sq->sq_signals);
494 SIGEMPTYSET(sq->sq_kill);
495 SIGEMPTYSET(sq->sq_ptrace);
496 }
497
498 static void
499 sigqueue_move_set(sigqueue_t *src, sigqueue_t *dst, const sigset_t *set)
500 {
501 sigset_t tmp;
502 struct proc *p1, *p2;
503 ksiginfo_t *ksi, *next;
504
505 KASSERT(src->sq_flags & SQ_INIT, ("src sigqueue not inited"));
506 KASSERT(dst->sq_flags & SQ_INIT, ("dst sigqueue not inited"));
507 p1 = src->sq_proc;
508 p2 = dst->sq_proc;
509 /* Move siginfo to target list */
510 TAILQ_FOREACH_SAFE(ksi, &src->sq_list, ksi_link, next) {
511 if (SIGISMEMBER(*set, ksi->ksi_signo)) {
512 TAILQ_REMOVE(&src->sq_list, ksi, ksi_link);
513 if (p1 != NULL)
514 p1->p_pendingcnt--;
515 TAILQ_INSERT_TAIL(&dst->sq_list, ksi, ksi_link);
516 ksi->ksi_sigq = dst;
517 if (p2 != NULL)
518 p2->p_pendingcnt++;
519 }
520 }
521
522 /* Move pending bits to target list */
523 tmp = src->sq_kill;
524 SIGSETAND(tmp, *set);
525 SIGSETOR(dst->sq_kill, tmp);
526 SIGSETNAND(src->sq_kill, tmp);
527
528 tmp = src->sq_ptrace;
529 SIGSETAND(tmp, *set);
530 SIGSETOR(dst->sq_ptrace, tmp);
531 SIGSETNAND(src->sq_ptrace, tmp);
532
533 tmp = src->sq_signals;
534 SIGSETAND(tmp, *set);
535 SIGSETOR(dst->sq_signals, tmp);
536 SIGSETNAND(src->sq_signals, tmp);
537 }
538
539 #if 0
540 static void
541 sigqueue_move(sigqueue_t *src, sigqueue_t *dst, int signo)
542 {
543 sigset_t set;
544
545 SIGEMPTYSET(set);
546 SIGADDSET(set, signo);
547 sigqueue_move_set(src, dst, &set);
548 }
549 #endif
550
551 static void
552 sigqueue_delete_set(sigqueue_t *sq, const sigset_t *set)
553 {
554 struct proc *p = sq->sq_proc;
555 ksiginfo_t *ksi, *next;
556
557 KASSERT(sq->sq_flags & SQ_INIT, ("src sigqueue not inited"));
558
559 /* Remove siginfo queue */
560 TAILQ_FOREACH_SAFE(ksi, &sq->sq_list, ksi_link, next) {
561 if (SIGISMEMBER(*set, ksi->ksi_signo)) {
562 TAILQ_REMOVE(&sq->sq_list, ksi, ksi_link);
563 ksi->ksi_sigq = NULL;
564 if (ksiginfo_tryfree(ksi) && p != NULL)
565 p->p_pendingcnt--;
566 }
567 }
568 SIGSETNAND(sq->sq_kill, *set);
569 SIGSETNAND(sq->sq_ptrace, *set);
570 SIGSETNAND(sq->sq_signals, *set);
571 }
572
573 void
574 sigqueue_delete(sigqueue_t *sq, int signo)
575 {
576 sigset_t set;
577
578 SIGEMPTYSET(set);
579 SIGADDSET(set, signo);
580 sigqueue_delete_set(sq, &set);
581 }
582
583 /* Remove a set of signals for a process */
584 static void
585 sigqueue_delete_set_proc(struct proc *p, const sigset_t *set)
586 {
587 sigqueue_t worklist;
588 struct thread *td0;
589
590 PROC_LOCK_ASSERT(p, MA_OWNED);
591
592 sigqueue_init(&worklist, NULL);
593 sigqueue_move_set(&p->p_sigqueue, &worklist, set);
594
595 FOREACH_THREAD_IN_PROC(p, td0)
596 sigqueue_move_set(&td0->td_sigqueue, &worklist, set);
597
598 sigqueue_flush(&worklist);
599 }
600
601 void
602 sigqueue_delete_proc(struct proc *p, int signo)
603 {
604 sigset_t set;
605
606 SIGEMPTYSET(set);
607 SIGADDSET(set, signo);
608 sigqueue_delete_set_proc(p, &set);
609 }
610
611 static void
612 sigqueue_delete_stopmask_proc(struct proc *p)
613 {
614 sigset_t set;
615
616 SIGEMPTYSET(set);
617 SIGADDSET(set, SIGSTOP);
618 SIGADDSET(set, SIGTSTP);
619 SIGADDSET(set, SIGTTIN);
620 SIGADDSET(set, SIGTTOU);
621 sigqueue_delete_set_proc(p, &set);
622 }
623
624 /*
625 * Determine signal that should be delivered to thread td, the current
626 * thread, 0 if none. If there is a pending stop signal with default
627 * action, the process stops in issignal().
628 */
629 int
630 cursig(struct thread *td)
631 {
632 PROC_LOCK_ASSERT(td->td_proc, MA_OWNED);
633 mtx_assert(&td->td_proc->p_sigacts->ps_mtx, MA_OWNED);
634 THREAD_LOCK_ASSERT(td, MA_NOTOWNED);
635 return (SIGPENDING(td) ? issignal(td) : 0);
636 }
637
638 /*
639 * Arrange for ast() to handle unmasked pending signals on return to user
640 * mode. This must be called whenever a signal is added to td_sigqueue or
641 * unmasked in td_sigmask.
642 */
643 void
644 signotify(struct thread *td)
645 {
646
647 PROC_LOCK_ASSERT(td->td_proc, MA_OWNED);
648
649 if (SIGPENDING(td)) {
650 thread_lock(td);
651 td->td_flags |= TDF_NEEDSIGCHK | TDF_ASTPENDING;
652 thread_unlock(td);
653 }
654 }
655
656 /*
657 * Returns 1 (true) if altstack is configured for the thread, and the
658 * passed stack bottom address falls into the altstack range. Handles
659 * the 43 compat special case where the alt stack size is zero.
660 */
661 int
662 sigonstack(size_t sp)
663 {
664 struct thread *td;
665
666 td = curthread;
667 if ((td->td_pflags & TDP_ALTSTACK) == 0)
668 return (0);
669 #if defined(COMPAT_43)
670 if (SV_PROC_FLAG(td->td_proc, SV_AOUT) && td->td_sigstk.ss_size == 0)
671 return ((td->td_sigstk.ss_flags & SS_ONSTACK) != 0);
672 #endif
673 return (sp >= (size_t)td->td_sigstk.ss_sp &&
674 sp < td->td_sigstk.ss_size + (size_t)td->td_sigstk.ss_sp);
675 }
676
677 static __inline int
678 sigprop(int sig)
679 {
680
681 if (sig > 0 && sig < nitems(sigproptbl))
682 return (sigproptbl[sig]);
683 return (0);
684 }
685
686 static bool
687 sigact_flag_test(const struct sigaction *act, int flag)
688 {
689
690 /*
691 * SA_SIGINFO is reset when signal disposition is set to
692 * ignore or default. Other flags are kept according to user
693 * settings.
694 */
695 return ((act->sa_flags & flag) != 0 && (flag != SA_SIGINFO ||
696 ((__sighandler_t *)act->sa_sigaction != SIG_IGN &&
697 (__sighandler_t *)act->sa_sigaction != SIG_DFL)));
698 }
699
700 /*
701 * kern_sigaction
702 * sigaction
703 * freebsd4_sigaction
704 * osigaction
705 */
706 int
707 kern_sigaction(struct thread *td, int sig, const struct sigaction *act,
708 struct sigaction *oact, int flags)
709 {
710 struct sigacts *ps;
711 struct proc *p = td->td_proc;
712
713 if (!_SIG_VALID(sig))
714 return (EINVAL);
715 if (act != NULL && act->sa_handler != SIG_DFL &&
716 act->sa_handler != SIG_IGN && (act->sa_flags & ~(SA_ONSTACK |
717 SA_RESTART | SA_RESETHAND | SA_NOCLDSTOP | SA_NODEFER |
718 SA_NOCLDWAIT | SA_SIGINFO)) != 0)
719 return (EINVAL);
720
721 PROC_LOCK(p);
722 ps = p->p_sigacts;
723 mtx_lock(&ps->ps_mtx);
724 if (oact) {
725 memset(oact, 0, sizeof(*oact));
726 oact->sa_mask = ps->ps_catchmask[_SIG_IDX(sig)];
727 if (SIGISMEMBER(ps->ps_sigonstack, sig))
728 oact->sa_flags |= SA_ONSTACK;
729 if (!SIGISMEMBER(ps->ps_sigintr, sig))
730 oact->sa_flags |= SA_RESTART;
731 if (SIGISMEMBER(ps->ps_sigreset, sig))
732 oact->sa_flags |= SA_RESETHAND;
733 if (SIGISMEMBER(ps->ps_signodefer, sig))
734 oact->sa_flags |= SA_NODEFER;
735 if (SIGISMEMBER(ps->ps_siginfo, sig)) {
736 oact->sa_flags |= SA_SIGINFO;
737 oact->sa_sigaction =
738 (__siginfohandler_t *)ps->ps_sigact[_SIG_IDX(sig)];
739 } else
740 oact->sa_handler = ps->ps_sigact[_SIG_IDX(sig)];
741 if (sig == SIGCHLD && ps->ps_flag & PS_NOCLDSTOP)
742 oact->sa_flags |= SA_NOCLDSTOP;
743 if (sig == SIGCHLD && ps->ps_flag & PS_NOCLDWAIT)
744 oact->sa_flags |= SA_NOCLDWAIT;
745 }
746 if (act) {
747 if ((sig == SIGKILL || sig == SIGSTOP) &&
748 act->sa_handler != SIG_DFL) {
749 mtx_unlock(&ps->ps_mtx);
750 PROC_UNLOCK(p);
751 return (EINVAL);
752 }
753
754 /*
755 * Change setting atomically.
756 */
757
758 ps->ps_catchmask[_SIG_IDX(sig)] = act->sa_mask;
759 SIG_CANTMASK(ps->ps_catchmask[_SIG_IDX(sig)]);
760 if (sigact_flag_test(act, SA_SIGINFO)) {
761 ps->ps_sigact[_SIG_IDX(sig)] =
762 (__sighandler_t *)act->sa_sigaction;
763 SIGADDSET(ps->ps_siginfo, sig);
764 } else {
765 ps->ps_sigact[_SIG_IDX(sig)] = act->sa_handler;
766 SIGDELSET(ps->ps_siginfo, sig);
767 }
768 if (!sigact_flag_test(act, SA_RESTART))
769 SIGADDSET(ps->ps_sigintr, sig);
770 else
771 SIGDELSET(ps->ps_sigintr, sig);
772 if (sigact_flag_test(act, SA_ONSTACK))
773 SIGADDSET(ps->ps_sigonstack, sig);
774 else
775 SIGDELSET(ps->ps_sigonstack, sig);
776 if (sigact_flag_test(act, SA_RESETHAND))
777 SIGADDSET(ps->ps_sigreset, sig);
778 else
779 SIGDELSET(ps->ps_sigreset, sig);
780 if (sigact_flag_test(act, SA_NODEFER))
781 SIGADDSET(ps->ps_signodefer, sig);
782 else
783 SIGDELSET(ps->ps_signodefer, sig);
784 if (sig == SIGCHLD) {
785 if (act->sa_flags & SA_NOCLDSTOP)
786 ps->ps_flag |= PS_NOCLDSTOP;
787 else
788 ps->ps_flag &= ~PS_NOCLDSTOP;
789 if (act->sa_flags & SA_NOCLDWAIT) {
790 /*
791 * Paranoia: since SA_NOCLDWAIT is implemented
792 * by reparenting the dying child to PID 1 (and
793 * trust it to reap the zombie), PID 1 itself
794 * is forbidden to set SA_NOCLDWAIT.
795 */
796 if (p->p_pid == 1)
797 ps->ps_flag &= ~PS_NOCLDWAIT;
798 else
799 ps->ps_flag |= PS_NOCLDWAIT;
800 } else
801 ps->ps_flag &= ~PS_NOCLDWAIT;
802 if (ps->ps_sigact[_SIG_IDX(SIGCHLD)] == SIG_IGN)
803 ps->ps_flag |= PS_CLDSIGIGN;
804 else
805 ps->ps_flag &= ~PS_CLDSIGIGN;
806 }
807 /*
808 * Set bit in ps_sigignore for signals that are set to SIG_IGN,
809 * and for signals set to SIG_DFL where the default is to
810 * ignore. However, don't put SIGCONT in ps_sigignore, as we
811 * have to restart the process.
812 */
813 if (ps->ps_sigact[_SIG_IDX(sig)] == SIG_IGN ||
814 (sigprop(sig) & SIGPROP_IGNORE &&
815 ps->ps_sigact[_SIG_IDX(sig)] == SIG_DFL)) {
816 /* never to be seen again */
817 sigqueue_delete_proc(p, sig);
818 if (sig != SIGCONT)
819 /* easier in psignal */
820 SIGADDSET(ps->ps_sigignore, sig);
821 SIGDELSET(ps->ps_sigcatch, sig);
822 } else {
823 SIGDELSET(ps->ps_sigignore, sig);
824 if (ps->ps_sigact[_SIG_IDX(sig)] == SIG_DFL)
825 SIGDELSET(ps->ps_sigcatch, sig);
826 else
827 SIGADDSET(ps->ps_sigcatch, sig);
828 }
829 #ifdef COMPAT_FREEBSD4
830 if (ps->ps_sigact[_SIG_IDX(sig)] == SIG_IGN ||
831 ps->ps_sigact[_SIG_IDX(sig)] == SIG_DFL ||
832 (flags & KSA_FREEBSD4) == 0)
833 SIGDELSET(ps->ps_freebsd4, sig);
834 else
835 SIGADDSET(ps->ps_freebsd4, sig);
836 #endif
837 #ifdef COMPAT_43
838 if (ps->ps_sigact[_SIG_IDX(sig)] == SIG_IGN ||
839 ps->ps_sigact[_SIG_IDX(sig)] == SIG_DFL ||
840 (flags & KSA_OSIGSET) == 0)
841 SIGDELSET(ps->ps_osigset, sig);
842 else
843 SIGADDSET(ps->ps_osigset, sig);
844 #endif
845 }
846 mtx_unlock(&ps->ps_mtx);
847 PROC_UNLOCK(p);
848 return (0);
849 }
850
851 #ifndef _SYS_SYSPROTO_H_
852 struct sigaction_args {
853 int sig;
854 struct sigaction *act;
855 struct sigaction *oact;
856 };
857 #endif
858 int
859 sys_sigaction(struct thread *td, struct sigaction_args *uap)
860 {
861 struct sigaction act, oact;
862 struct sigaction *actp, *oactp;
863 int error;
864
865 actp = (uap->act != NULL) ? &act : NULL;
866 oactp = (uap->oact != NULL) ? &oact : NULL;
867 if (actp) {
868 error = copyin(uap->act, actp, sizeof(act));
869 if (error)
870 return (error);
871 }
872 error = kern_sigaction(td, uap->sig, actp, oactp, 0);
873 if (oactp && !error)
874 error = copyout(oactp, uap->oact, sizeof(oact));
875 return (error);
876 }
877
878 #ifdef COMPAT_FREEBSD4
879 #ifndef _SYS_SYSPROTO_H_
880 struct freebsd4_sigaction_args {
881 int sig;
882 struct sigaction *act;
883 struct sigaction *oact;
884 };
885 #endif
886 int
887 freebsd4_sigaction(struct thread *td, struct freebsd4_sigaction_args *uap)
888 {
889 struct sigaction act, oact;
890 struct sigaction *actp, *oactp;
891 int error;
892
893 actp = (uap->act != NULL) ? &act : NULL;
894 oactp = (uap->oact != NULL) ? &oact : NULL;
895 if (actp) {
896 error = copyin(uap->act, actp, sizeof(act));
897 if (error)
898 return (error);
899 }
900 error = kern_sigaction(td, uap->sig, actp, oactp, KSA_FREEBSD4);
901 if (oactp && !error)
902 error = copyout(oactp, uap->oact, sizeof(oact));
903 return (error);
904 }
905 #endif /* COMAPT_FREEBSD4 */
906
907 #ifdef COMPAT_43 /* XXX - COMPAT_FBSD3 */
908 #ifndef _SYS_SYSPROTO_H_
909 struct osigaction_args {
910 int signum;
911 struct osigaction *nsa;
912 struct osigaction *osa;
913 };
914 #endif
915 int
916 osigaction(struct thread *td, struct osigaction_args *uap)
917 {
918 struct osigaction sa;
919 struct sigaction nsa, osa;
920 struct sigaction *nsap, *osap;
921 int error;
922
923 if (uap->signum <= 0 || uap->signum >= ONSIG)
924 return (EINVAL);
925
926 nsap = (uap->nsa != NULL) ? &nsa : NULL;
927 osap = (uap->osa != NULL) ? &osa : NULL;
928
929 if (nsap) {
930 error = copyin(uap->nsa, &sa, sizeof(sa));
931 if (error)
932 return (error);
933 nsap->sa_handler = sa.sa_handler;
934 nsap->sa_flags = sa.sa_flags;
935 OSIG2SIG(sa.sa_mask, nsap->sa_mask);
936 }
937 error = kern_sigaction(td, uap->signum, nsap, osap, KSA_OSIGSET);
938 if (osap && !error) {
939 sa.sa_handler = osap->sa_handler;
940 sa.sa_flags = osap->sa_flags;
941 SIG2OSIG(osap->sa_mask, sa.sa_mask);
942 error = copyout(&sa, uap->osa, sizeof(sa));
943 }
944 return (error);
945 }
946
947 #if !defined(__i386__)
948 /* Avoid replicating the same stub everywhere */
949 int
950 osigreturn(struct thread *td, struct osigreturn_args *uap)
951 {
952
953 return (nosys(td, (struct nosys_args *)uap));
954 }
955 #endif
956 #endif /* COMPAT_43 */
957
958 /*
959 * Initialize signal state for process 0;
960 * set to ignore signals that are ignored by default.
961 */
962 void
963 siginit(struct proc *p)
964 {
965 int i;
966 struct sigacts *ps;
967
968 PROC_LOCK(p);
969 ps = p->p_sigacts;
970 mtx_lock(&ps->ps_mtx);
971 for (i = 1; i <= NSIG; i++) {
972 if (sigprop(i) & SIGPROP_IGNORE && i != SIGCONT) {
973 SIGADDSET(ps->ps_sigignore, i);
974 }
975 }
976 mtx_unlock(&ps->ps_mtx);
977 PROC_UNLOCK(p);
978 }
979
980 /*
981 * Reset specified signal to the default disposition.
982 */
983 static void
984 sigdflt(struct sigacts *ps, int sig)
985 {
986
987 mtx_assert(&ps->ps_mtx, MA_OWNED);
988 SIGDELSET(ps->ps_sigcatch, sig);
989 if ((sigprop(sig) & SIGPROP_IGNORE) != 0 && sig != SIGCONT)
990 SIGADDSET(ps->ps_sigignore, sig);
991 ps->ps_sigact[_SIG_IDX(sig)] = SIG_DFL;
992 SIGDELSET(ps->ps_siginfo, sig);
993 }
994
995 /*
996 * Reset signals for an exec of the specified process.
997 */
998 void
999 execsigs(struct proc *p)
1000 {
1001 sigset_t osigignore;
1002 struct sigacts *ps;
1003 int sig;
1004 struct thread *td;
1005
1006 /*
1007 * Reset caught signals. Held signals remain held
1008 * through td_sigmask (unless they were caught,
1009 * and are now ignored by default).
1010 */
1011 PROC_LOCK_ASSERT(p, MA_OWNED);
1012 ps = p->p_sigacts;
1013 mtx_lock(&ps->ps_mtx);
1014 sig_drop_caught(p);
1015
1016 /*
1017 * As CloudABI processes cannot modify signal handlers, fully
1018 * reset all signals to their default behavior. Do ignore
1019 * SIGPIPE, as it would otherwise be impossible to recover from
1020 * writes to broken pipes and sockets.
1021 */
1022 if (SV_PROC_ABI(p) == SV_ABI_CLOUDABI) {
1023 osigignore = ps->ps_sigignore;
1024 SIG_FOREACH(sig, &osigignore) {
1025 if (sig != SIGPIPE)
1026 sigdflt(ps, sig);
1027 }
1028 SIGADDSET(ps->ps_sigignore, SIGPIPE);
1029 }
1030
1031 /*
1032 * Reset stack state to the user stack.
1033 * Clear set of signals caught on the signal stack.
1034 */
1035 td = curthread;
1036 MPASS(td->td_proc == p);
1037 td->td_sigstk.ss_flags = SS_DISABLE;
1038 td->td_sigstk.ss_size = 0;
1039 td->td_sigstk.ss_sp = 0;
1040 td->td_pflags &= ~TDP_ALTSTACK;
1041 /*
1042 * Reset no zombies if child dies flag as Solaris does.
1043 */
1044 ps->ps_flag &= ~(PS_NOCLDWAIT | PS_CLDSIGIGN);
1045 if (ps->ps_sigact[_SIG_IDX(SIGCHLD)] == SIG_IGN)
1046 ps->ps_sigact[_SIG_IDX(SIGCHLD)] = SIG_DFL;
1047 mtx_unlock(&ps->ps_mtx);
1048 }
1049
1050 /*
1051 * kern_sigprocmask()
1052 *
1053 * Manipulate signal mask.
1054 */
1055 int
1056 kern_sigprocmask(struct thread *td, int how, sigset_t *set, sigset_t *oset,
1057 int flags)
1058 {
1059 sigset_t new_block, oset1;
1060 struct proc *p;
1061 int error;
1062
1063 p = td->td_proc;
1064 if ((flags & SIGPROCMASK_PROC_LOCKED) != 0)
1065 PROC_LOCK_ASSERT(p, MA_OWNED);
1066 else
1067 PROC_LOCK(p);
1068 mtx_assert(&p->p_sigacts->ps_mtx, (flags & SIGPROCMASK_PS_LOCKED) != 0
1069 ? MA_OWNED : MA_NOTOWNED);
1070 if (oset != NULL)
1071 *oset = td->td_sigmask;
1072
1073 error = 0;
1074 if (set != NULL) {
1075 switch (how) {
1076 case SIG_BLOCK:
1077 SIG_CANTMASK(*set);
1078 oset1 = td->td_sigmask;
1079 SIGSETOR(td->td_sigmask, *set);
1080 new_block = td->td_sigmask;
1081 SIGSETNAND(new_block, oset1);
1082 break;
1083 case SIG_UNBLOCK:
1084 SIGSETNAND(td->td_sigmask, *set);
1085 signotify(td);
1086 goto out;
1087 case SIG_SETMASK:
1088 SIG_CANTMASK(*set);
1089 oset1 = td->td_sigmask;
1090 if (flags & SIGPROCMASK_OLD)
1091 SIGSETLO(td->td_sigmask, *set);
1092 else
1093 td->td_sigmask = *set;
1094 new_block = td->td_sigmask;
1095 SIGSETNAND(new_block, oset1);
1096 signotify(td);
1097 break;
1098 default:
1099 error = EINVAL;
1100 goto out;
1101 }
1102
1103 /*
1104 * The new_block set contains signals that were not previously
1105 * blocked, but are blocked now.
1106 *
1107 * In case we block any signal that was not previously blocked
1108 * for td, and process has the signal pending, try to schedule
1109 * signal delivery to some thread that does not block the
1110 * signal, possibly waking it up.
1111 */
1112 if (p->p_numthreads != 1)
1113 reschedule_signals(p, new_block, flags);
1114 }
1115
1116 out:
1117 if (!(flags & SIGPROCMASK_PROC_LOCKED))
1118 PROC_UNLOCK(p);
1119 return (error);
1120 }
1121
1122 #ifndef _SYS_SYSPROTO_H_
1123 struct sigprocmask_args {
1124 int how;
1125 const sigset_t *set;
1126 sigset_t *oset;
1127 };
1128 #endif
1129 int
1130 sys_sigprocmask(struct thread *td, struct sigprocmask_args *uap)
1131 {
1132 sigset_t set, oset;
1133 sigset_t *setp, *osetp;
1134 int error;
1135
1136 setp = (uap->set != NULL) ? &set : NULL;
1137 osetp = (uap->oset != NULL) ? &oset : NULL;
1138 if (setp) {
1139 error = copyin(uap->set, setp, sizeof(set));
1140 if (error)
1141 return (error);
1142 }
1143 error = kern_sigprocmask(td, uap->how, setp, osetp, 0);
1144 if (osetp && !error) {
1145 error = copyout(osetp, uap->oset, sizeof(oset));
1146 }
1147 return (error);
1148 }
1149
1150 #ifdef COMPAT_43 /* XXX - COMPAT_FBSD3 */
1151 #ifndef _SYS_SYSPROTO_H_
1152 struct osigprocmask_args {
1153 int how;
1154 osigset_t mask;
1155 };
1156 #endif
1157 int
1158 osigprocmask(struct thread *td, struct osigprocmask_args *uap)
1159 {
1160 sigset_t set, oset;
1161 int error;
1162
1163 OSIG2SIG(uap->mask, set);
1164 error = kern_sigprocmask(td, uap->how, &set, &oset, 1);
1165 SIG2OSIG(oset, td->td_retval[0]);
1166 return (error);
1167 }
1168 #endif /* COMPAT_43 */
1169
1170 int
1171 sys_sigwait(struct thread *td, struct sigwait_args *uap)
1172 {
1173 ksiginfo_t ksi;
1174 sigset_t set;
1175 int error;
1176
1177 error = copyin(uap->set, &set, sizeof(set));
1178 if (error) {
1179 td->td_retval[0] = error;
1180 return (0);
1181 }
1182
1183 error = kern_sigtimedwait(td, set, &ksi, NULL);
1184 if (error) {
1185 /*
1186 * sigwait() function shall not return EINTR, but
1187 * the syscall does. Non-ancient libc provides the
1188 * wrapper which hides EINTR. Otherwise, EINTR return
1189 * is used by libthr to handle required cancellation
1190 * point in the sigwait().
1191 */
1192 if (error == EINTR && td->td_proc->p_osrel < P_OSREL_SIGWAIT)
1193 return (ERESTART);
1194 td->td_retval[0] = error;
1195 return (0);
1196 }
1197
1198 error = copyout(&ksi.ksi_signo, uap->sig, sizeof(ksi.ksi_signo));
1199 td->td_retval[0] = error;
1200 return (0);
1201 }
1202
1203 int
1204 sys_sigtimedwait(struct thread *td, struct sigtimedwait_args *uap)
1205 {
1206 struct timespec ts;
1207 struct timespec *timeout;
1208 sigset_t set;
1209 ksiginfo_t ksi;
1210 int error;
1211
1212 if (uap->timeout) {
1213 error = copyin(uap->timeout, &ts, sizeof(ts));
1214 if (error)
1215 return (error);
1216
1217 timeout = &ts;
1218 } else
1219 timeout = NULL;
1220
1221 error = copyin(uap->set, &set, sizeof(set));
1222 if (error)
1223 return (error);
1224
1225 error = kern_sigtimedwait(td, set, &ksi, timeout);
1226 if (error)
1227 return (error);
1228
1229 if (uap->info)
1230 error = copyout(&ksi.ksi_info, uap->info, sizeof(siginfo_t));
1231
1232 if (error == 0)
1233 td->td_retval[0] = ksi.ksi_signo;
1234 return (error);
1235 }
1236
1237 int
1238 sys_sigwaitinfo(struct thread *td, struct sigwaitinfo_args *uap)
1239 {
1240 ksiginfo_t ksi;
1241 sigset_t set;
1242 int error;
1243
1244 error = copyin(uap->set, &set, sizeof(set));
1245 if (error)
1246 return (error);
1247
1248 error = kern_sigtimedwait(td, set, &ksi, NULL);
1249 if (error)
1250 return (error);
1251
1252 if (uap->info)
1253 error = copyout(&ksi.ksi_info, uap->info, sizeof(siginfo_t));
1254
1255 if (error == 0)
1256 td->td_retval[0] = ksi.ksi_signo;
1257 return (error);
1258 }
1259
1260 static void
1261 proc_td_siginfo_capture(struct thread *td, siginfo_t *si)
1262 {
1263 struct thread *thr;
1264
1265 FOREACH_THREAD_IN_PROC(td->td_proc, thr) {
1266 if (thr == td)
1267 thr->td_si = *si;
1268 else
1269 thr->td_si.si_signo = 0;
1270 }
1271 }
1272
1273 int
1274 kern_sigtimedwait(struct thread *td, sigset_t waitset, ksiginfo_t *ksi,
1275 struct timespec *timeout)
1276 {
1277 struct sigacts *ps;
1278 sigset_t saved_mask, new_block;
1279 struct proc *p;
1280 int error, sig, timo, timevalid = 0;
1281 struct timespec rts, ets, ts;
1282 struct timeval tv;
1283 bool traced;
1284
1285 p = td->td_proc;
1286 error = 0;
1287 ets.tv_sec = 0;
1288 ets.tv_nsec = 0;
1289 traced = false;
1290
1291 /* Ensure the sigfastblock value is up to date. */
1292 sigfastblock_fetch(td);
1293
1294 if (timeout != NULL) {
1295 if (timeout->tv_nsec >= 0 && timeout->tv_nsec < 1000000000) {
1296 timevalid = 1;
1297 getnanouptime(&rts);
1298 timespecadd(&rts, timeout, &ets);
1299 }
1300 }
1301 ksiginfo_init(ksi);
1302 /* Some signals can not be waited for. */
1303 SIG_CANTMASK(waitset);
1304 ps = p->p_sigacts;
1305 PROC_LOCK(p);
1306 saved_mask = td->td_sigmask;
1307 SIGSETNAND(td->td_sigmask, waitset);
1308 if ((p->p_sysent->sv_flags & SV_SIG_DISCIGN) != 0 ||
1309 !kern_sig_discard_ign) {
1310 thread_lock(td);
1311 td->td_flags |= TDF_SIGWAIT;
1312 thread_unlock(td);
1313 }
1314 for (;;) {
1315 mtx_lock(&ps->ps_mtx);
1316 sig = cursig(td);
1317 mtx_unlock(&ps->ps_mtx);
1318 KASSERT(sig >= 0, ("sig %d", sig));
1319 if (sig != 0 && SIGISMEMBER(waitset, sig)) {
1320 if (sigqueue_get(&td->td_sigqueue, sig, ksi) != 0 ||
1321 sigqueue_get(&p->p_sigqueue, sig, ksi) != 0) {
1322 error = 0;
1323 break;
1324 }
1325 }
1326
1327 if (error != 0)
1328 break;
1329
1330 /*
1331 * POSIX says this must be checked after looking for pending
1332 * signals.
1333 */
1334 if (timeout != NULL) {
1335 if (!timevalid) {
1336 error = EINVAL;
1337 break;
1338 }
1339 getnanouptime(&rts);
1340 if (timespeccmp(&rts, &ets, >=)) {
1341 error = EAGAIN;
1342 break;
1343 }
1344 timespecsub(&ets, &rts, &ts);
1345 TIMESPEC_TO_TIMEVAL(&tv, &ts);
1346 timo = tvtohz(&tv);
1347 } else {
1348 timo = 0;
1349 }
1350
1351 if (traced) {
1352 error = EINTR;
1353 break;
1354 }
1355
1356 error = msleep(&p->p_sigacts, &p->p_mtx, PPAUSE | PCATCH,
1357 "sigwait", timo);
1358
1359 /* The syscalls can not be restarted. */
1360 if (error == ERESTART)
1361 error = EINTR;
1362
1363 /* We will calculate timeout by ourself. */
1364 if (timeout != NULL && error == EAGAIN)
1365 error = 0;
1366
1367 /*
1368 * If PTRACE_SCE or PTRACE_SCX were set after
1369 * userspace entered the syscall, return spurious
1370 * EINTR after wait was done. Only do this as last
1371 * resort after rechecking for possible queued signals
1372 * and expired timeouts.
1373 */
1374 if (error == 0 && (p->p_ptevents & PTRACE_SYSCALL) != 0)
1375 traced = true;
1376 }
1377 thread_lock(td);
1378 td->td_flags &= ~TDF_SIGWAIT;
1379 thread_unlock(td);
1380
1381 new_block = saved_mask;
1382 SIGSETNAND(new_block, td->td_sigmask);
1383 td->td_sigmask = saved_mask;
1384 /*
1385 * Fewer signals can be delivered to us, reschedule signal
1386 * notification.
1387 */
1388 if (p->p_numthreads != 1)
1389 reschedule_signals(p, new_block, 0);
1390
1391 if (error == 0) {
1392 SDT_PROBE2(proc, , , signal__clear, sig, ksi);
1393
1394 if (ksi->ksi_code == SI_TIMER)
1395 itimer_accept(p, ksi->ksi_timerid, ksi);
1396
1397 #ifdef KTRACE
1398 if (KTRPOINT(td, KTR_PSIG)) {
1399 sig_t action;
1400
1401 mtx_lock(&ps->ps_mtx);
1402 action = ps->ps_sigact[_SIG_IDX(sig)];
1403 mtx_unlock(&ps->ps_mtx);
1404 ktrpsig(sig, action, &td->td_sigmask, ksi->ksi_code);
1405 }
1406 #endif
1407 if (sig == SIGKILL) {
1408 proc_td_siginfo_capture(td, &ksi->ksi_info);
1409 sigexit(td, sig);
1410 }
1411 }
1412 PROC_UNLOCK(p);
1413 return (error);
1414 }
1415
1416 #ifndef _SYS_SYSPROTO_H_
1417 struct sigpending_args {
1418 sigset_t *set;
1419 };
1420 #endif
1421 int
1422 sys_sigpending(struct thread *td, struct sigpending_args *uap)
1423 {
1424 struct proc *p = td->td_proc;
1425 sigset_t pending;
1426
1427 PROC_LOCK(p);
1428 pending = p->p_sigqueue.sq_signals;
1429 SIGSETOR(pending, td->td_sigqueue.sq_signals);
1430 PROC_UNLOCK(p);
1431 return (copyout(&pending, uap->set, sizeof(sigset_t)));
1432 }
1433
1434 #ifdef COMPAT_43 /* XXX - COMPAT_FBSD3 */
1435 #ifndef _SYS_SYSPROTO_H_
1436 struct osigpending_args {
1437 int dummy;
1438 };
1439 #endif
1440 int
1441 osigpending(struct thread *td, struct osigpending_args *uap)
1442 {
1443 struct proc *p = td->td_proc;
1444 sigset_t pending;
1445
1446 PROC_LOCK(p);
1447 pending = p->p_sigqueue.sq_signals;
1448 SIGSETOR(pending, td->td_sigqueue.sq_signals);
1449 PROC_UNLOCK(p);
1450 SIG2OSIG(pending, td->td_retval[0]);
1451 return (0);
1452 }
1453 #endif /* COMPAT_43 */
1454
1455 #if defined(COMPAT_43)
1456 /*
1457 * Generalized interface signal handler, 4.3-compatible.
1458 */
1459 #ifndef _SYS_SYSPROTO_H_
1460 struct osigvec_args {
1461 int signum;
1462 struct sigvec *nsv;
1463 struct sigvec *osv;
1464 };
1465 #endif
1466 /* ARGSUSED */
1467 int
1468 osigvec(struct thread *td, struct osigvec_args *uap)
1469 {
1470 struct sigvec vec;
1471 struct sigaction nsa, osa;
1472 struct sigaction *nsap, *osap;
1473 int error;
1474
1475 if (uap->signum <= 0 || uap->signum >= ONSIG)
1476 return (EINVAL);
1477 nsap = (uap->nsv != NULL) ? &nsa : NULL;
1478 osap = (uap->osv != NULL) ? &osa : NULL;
1479 if (nsap) {
1480 error = copyin(uap->nsv, &vec, sizeof(vec));
1481 if (error)
1482 return (error);
1483 nsap->sa_handler = vec.sv_handler;
1484 OSIG2SIG(vec.sv_mask, nsap->sa_mask);
1485 nsap->sa_flags = vec.sv_flags;
1486 nsap->sa_flags ^= SA_RESTART; /* opposite of SV_INTERRUPT */
1487 }
1488 error = kern_sigaction(td, uap->signum, nsap, osap, KSA_OSIGSET);
1489 if (osap && !error) {
1490 vec.sv_handler = osap->sa_handler;
1491 SIG2OSIG(osap->sa_mask, vec.sv_mask);
1492 vec.sv_flags = osap->sa_flags;
1493 vec.sv_flags &= ~SA_NOCLDWAIT;
1494 vec.sv_flags ^= SA_RESTART;
1495 error = copyout(&vec, uap->osv, sizeof(vec));
1496 }
1497 return (error);
1498 }
1499
1500 #ifndef _SYS_SYSPROTO_H_
1501 struct osigblock_args {
1502 int mask;
1503 };
1504 #endif
1505 int
1506 osigblock(struct thread *td, struct osigblock_args *uap)
1507 {
1508 sigset_t set, oset;
1509
1510 OSIG2SIG(uap->mask, set);
1511 kern_sigprocmask(td, SIG_BLOCK, &set, &oset, 0);
1512 SIG2OSIG(oset, td->td_retval[0]);
1513 return (0);
1514 }
1515
1516 #ifndef _SYS_SYSPROTO_H_
1517 struct osigsetmask_args {
1518 int mask;
1519 };
1520 #endif
1521 int
1522 osigsetmask(struct thread *td, struct osigsetmask_args *uap)
1523 {
1524 sigset_t set, oset;
1525
1526 OSIG2SIG(uap->mask, set);
1527 kern_sigprocmask(td, SIG_SETMASK, &set, &oset, 0);
1528 SIG2OSIG(oset, td->td_retval[0]);
1529 return (0);
1530 }
1531 #endif /* COMPAT_43 */
1532
1533 /*
1534 * Suspend calling thread until signal, providing mask to be set in the
1535 * meantime.
1536 */
1537 #ifndef _SYS_SYSPROTO_H_
1538 struct sigsuspend_args {
1539 const sigset_t *sigmask;
1540 };
1541 #endif
1542 /* ARGSUSED */
1543 int
1544 sys_sigsuspend(struct thread *td, struct sigsuspend_args *uap)
1545 {
1546 sigset_t mask;
1547 int error;
1548
1549 error = copyin(uap->sigmask, &mask, sizeof(mask));
1550 if (error)
1551 return (error);
1552 return (kern_sigsuspend(td, mask));
1553 }
1554
1555 int
1556 kern_sigsuspend(struct thread *td, sigset_t mask)
1557 {
1558 struct proc *p = td->td_proc;
1559 int has_sig, sig;
1560
1561 /* Ensure the sigfastblock value is up to date. */
1562 sigfastblock_fetch(td);
1563
1564 /*
1565 * When returning from sigsuspend, we want
1566 * the old mask to be restored after the
1567 * signal handler has finished. Thus, we
1568 * save it here and mark the sigacts structure
1569 * to indicate this.
1570 */
1571 PROC_LOCK(p);
1572 kern_sigprocmask(td, SIG_SETMASK, &mask, &td->td_oldsigmask,
1573 SIGPROCMASK_PROC_LOCKED);
1574 td->td_pflags |= TDP_OLDMASK;
1575
1576 /*
1577 * Process signals now. Otherwise, we can get spurious wakeup
1578 * due to signal entered process queue, but delivered to other
1579 * thread. But sigsuspend should return only on signal
1580 * delivery.
1581 */
1582 (p->p_sysent->sv_set_syscall_retval)(td, EINTR);
1583 for (has_sig = 0; !has_sig;) {
1584 while (msleep(&p->p_sigacts, &p->p_mtx, PPAUSE|PCATCH, "pause",
1585 0) == 0)
1586 /* void */;
1587 thread_suspend_check(0);
1588 mtx_lock(&p->p_sigacts->ps_mtx);
1589 while ((sig = cursig(td)) != 0) {
1590 KASSERT(sig >= 0, ("sig %d", sig));
1591 has_sig += postsig(sig);
1592 }
1593 mtx_unlock(&p->p_sigacts->ps_mtx);
1594
1595 /*
1596 * If PTRACE_SCE or PTRACE_SCX were set after
1597 * userspace entered the syscall, return spurious
1598 * EINTR.
1599 */
1600 if ((p->p_ptevents & PTRACE_SYSCALL) != 0)
1601 has_sig += 1;
1602 }
1603 PROC_UNLOCK(p);
1604 td->td_errno = EINTR;
1605 td->td_pflags |= TDP_NERRNO;
1606 return (EJUSTRETURN);
1607 }
1608
1609 #ifdef COMPAT_43 /* XXX - COMPAT_FBSD3 */
1610 /*
1611 * Compatibility sigsuspend call for old binaries. Note nonstandard calling
1612 * convention: libc stub passes mask, not pointer, to save a copyin.
1613 */
1614 #ifndef _SYS_SYSPROTO_H_
1615 struct osigsuspend_args {
1616 osigset_t mask;
1617 };
1618 #endif
1619 /* ARGSUSED */
1620 int
1621 osigsuspend(struct thread *td, struct osigsuspend_args *uap)
1622 {
1623 sigset_t mask;
1624
1625 OSIG2SIG(uap->mask, mask);
1626 return (kern_sigsuspend(td, mask));
1627 }
1628 #endif /* COMPAT_43 */
1629
1630 #if defined(COMPAT_43)
1631 #ifndef _SYS_SYSPROTO_H_
1632 struct osigstack_args {
1633 struct sigstack *nss;
1634 struct sigstack *oss;
1635 };
1636 #endif
1637 /* ARGSUSED */
1638 int
1639 osigstack(struct thread *td, struct osigstack_args *uap)
1640 {
1641 struct sigstack nss, oss;
1642 int error = 0;
1643
1644 if (uap->nss != NULL) {
1645 error = copyin(uap->nss, &nss, sizeof(nss));
1646 if (error)
1647 return (error);
1648 }
1649 oss.ss_sp = td->td_sigstk.ss_sp;
1650 oss.ss_onstack = sigonstack(cpu_getstack(td));
1651 if (uap->nss != NULL) {
1652 td->td_sigstk.ss_sp = nss.ss_sp;
1653 td->td_sigstk.ss_size = 0;
1654 td->td_sigstk.ss_flags |= nss.ss_onstack & SS_ONSTACK;
1655 td->td_pflags |= TDP_ALTSTACK;
1656 }
1657 if (uap->oss != NULL)
1658 error = copyout(&oss, uap->oss, sizeof(oss));
1659
1660 return (error);
1661 }
1662 #endif /* COMPAT_43 */
1663
1664 #ifndef _SYS_SYSPROTO_H_
1665 struct sigaltstack_args {
1666 stack_t *ss;
1667 stack_t *oss;
1668 };
1669 #endif
1670 /* ARGSUSED */
1671 int
1672 sys_sigaltstack(struct thread *td, struct sigaltstack_args *uap)
1673 {
1674 stack_t ss, oss;
1675 int error;
1676
1677 if (uap->ss != NULL) {
1678 error = copyin(uap->ss, &ss, sizeof(ss));
1679 if (error)
1680 return (error);
1681 }
1682 error = kern_sigaltstack(td, (uap->ss != NULL) ? &ss : NULL,
1683 (uap->oss != NULL) ? &oss : NULL);
1684 if (error)
1685 return (error);
1686 if (uap->oss != NULL)
1687 error = copyout(&oss, uap->oss, sizeof(stack_t));
1688 return (error);
1689 }
1690
1691 int
1692 kern_sigaltstack(struct thread *td, stack_t *ss, stack_t *oss)
1693 {
1694 struct proc *p = td->td_proc;
1695 int oonstack;
1696
1697 oonstack = sigonstack(cpu_getstack(td));
1698
1699 if (oss != NULL) {
1700 *oss = td->td_sigstk;
1701 oss->ss_flags = (td->td_pflags & TDP_ALTSTACK)
1702 ? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE;
1703 }
1704
1705 if (ss != NULL) {
1706 if (oonstack)
1707 return (EPERM);
1708 if ((ss->ss_flags & ~SS_DISABLE) != 0)
1709 return (EINVAL);
1710 if (!(ss->ss_flags & SS_DISABLE)) {
1711 if (ss->ss_size < p->p_sysent->sv_minsigstksz)
1712 return (ENOMEM);
1713
1714 td->td_sigstk = *ss;
1715 td->td_pflags |= TDP_ALTSTACK;
1716 } else {
1717 td->td_pflags &= ~TDP_ALTSTACK;
1718 }
1719 }
1720 return (0);
1721 }
1722
1723 struct killpg1_ctx {
1724 struct thread *td;
1725 ksiginfo_t *ksi;
1726 int sig;
1727 bool sent;
1728 bool found;
1729 int ret;
1730 };
1731
1732 static void
1733 killpg1_sendsig(struct proc *p, bool notself, struct killpg1_ctx *arg)
1734 {
1735 int err;
1736
1737 if (p->p_pid <= 1 || (p->p_flag & P_SYSTEM) != 0 ||
1738 (notself && p == arg->td->td_proc) || p->p_state == PRS_NEW)
1739 return;
1740 PROC_LOCK(p);
1741 err = p_cansignal(arg->td, p, arg->sig);
1742 if (err == 0 && arg->sig != 0)
1743 pksignal(p, arg->sig, arg->ksi);
1744 PROC_UNLOCK(p);
1745 if (err != ESRCH)
1746 arg->found = true;
1747 if (err == 0)
1748 arg->sent = true;
1749 else if (arg->ret == 0 && err != ESRCH && err != EPERM)
1750 arg->ret = err;
1751 }
1752
1753 /*
1754 * Common code for kill process group/broadcast kill.
1755 * cp is calling process.
1756 */
1757 static int
1758 killpg1(struct thread *td, int sig, int pgid, int all, ksiginfo_t *ksi)
1759 {
1760 struct proc *p;
1761 struct pgrp *pgrp;
1762 struct killpg1_ctx arg;
1763
1764 arg.td = td;
1765 arg.ksi = ksi;
1766 arg.sig = sig;
1767 arg.sent = false;
1768 arg.found = false;
1769 arg.ret = 0;
1770 if (all) {
1771 /*
1772 * broadcast
1773 */
1774 sx_slock(&allproc_lock);
1775 FOREACH_PROC_IN_SYSTEM(p) {
1776 killpg1_sendsig(p, true, &arg);
1777 }
1778 sx_sunlock(&allproc_lock);
1779 } else {
1780 sx_slock(&proctree_lock);
1781 if (pgid == 0) {
1782 /*
1783 * zero pgid means send to my process group.
1784 */
1785 pgrp = td->td_proc->p_pgrp;
1786 PGRP_LOCK(pgrp);
1787 } else {
1788 pgrp = pgfind(pgid);
1789 if (pgrp == NULL) {
1790 sx_sunlock(&proctree_lock);
1791 return (ESRCH);
1792 }
1793 }
1794 sx_sunlock(&proctree_lock);
1795 LIST_FOREACH(p, &pgrp->pg_members, p_pglist) {
1796 killpg1_sendsig(p, false, &arg);
1797 }
1798 PGRP_UNLOCK(pgrp);
1799 }
1800 MPASS(arg.ret != 0 || arg.found || !arg.sent);
1801 if (arg.ret == 0 && !arg.sent)
1802 arg.ret = arg.found ? EPERM : ESRCH;
1803 return (arg.ret);
1804 }
1805
1806 #ifndef _SYS_SYSPROTO_H_
1807 struct kill_args {
1808 int pid;
1809 int signum;
1810 };
1811 #endif
1812 /* ARGSUSED */
1813 int
1814 sys_kill(struct thread *td, struct kill_args *uap)
1815 {
1816
1817 return (kern_kill(td, uap->pid, uap->signum));
1818 }
1819
1820 int
1821 kern_kill(struct thread *td, pid_t pid, int signum)
1822 {
1823 ksiginfo_t ksi;
1824 struct proc *p;
1825 int error;
1826
1827 /*
1828 * A process in capability mode can send signals only to himself.
1829 * The main rationale behind this is that abort(3) is implemented as
1830 * kill(getpid(), SIGABRT).
1831 */
1832 if (IN_CAPABILITY_MODE(td) && pid != td->td_proc->p_pid)
1833 return (ECAPMODE);
1834
1835 AUDIT_ARG_SIGNUM(signum);
1836 AUDIT_ARG_PID(pid);
1837 if ((u_int)signum > _SIG_MAXSIG)
1838 return (EINVAL);
1839
1840 ksiginfo_init(&ksi);
1841 ksi.ksi_signo = signum;
1842 ksi.ksi_code = SI_USER;
1843 ksi.ksi_pid = td->td_proc->p_pid;
1844 ksi.ksi_uid = td->td_ucred->cr_ruid;
1845
1846 if (pid > 0) {
1847 /* kill single process */
1848 if ((p = pfind_any(pid)) == NULL)
1849 return (ESRCH);
1850 AUDIT_ARG_PROCESS(p);
1851 error = p_cansignal(td, p, signum);
1852 if (error == 0 && signum)
1853 pksignal(p, signum, &ksi);
1854 PROC_UNLOCK(p);
1855 return (error);
1856 }
1857 switch (pid) {
1858 case -1: /* broadcast signal */
1859 return (killpg1(td, signum, 0, 1, &ksi));
1860 case 0: /* signal own process group */
1861 return (killpg1(td, signum, 0, 0, &ksi));
1862 default: /* negative explicit process group */
1863 return (killpg1(td, signum, -pid, 0, &ksi));
1864 }
1865 /* NOTREACHED */
1866 }
1867
1868 int
1869 sys_pdkill(struct thread *td, struct pdkill_args *uap)
1870 {
1871 struct proc *p;
1872 int error;
1873
1874 AUDIT_ARG_SIGNUM(uap->signum);
1875 AUDIT_ARG_FD(uap->fd);
1876 if ((u_int)uap->signum > _SIG_MAXSIG)
1877 return (EINVAL);
1878
1879 error = procdesc_find(td, uap->fd, &cap_pdkill_rights, &p);
1880 if (error)
1881 return (error);
1882 AUDIT_ARG_PROCESS(p);
1883 error = p_cansignal(td, p, uap->signum);
1884 if (error == 0 && uap->signum)
1885 kern_psignal(p, uap->signum);
1886 PROC_UNLOCK(p);
1887 return (error);
1888 }
1889
1890 #if defined(COMPAT_43)
1891 #ifndef _SYS_SYSPROTO_H_
1892 struct okillpg_args {
1893 int pgid;
1894 int signum;
1895 };
1896 #endif
1897 /* ARGSUSED */
1898 int
1899 okillpg(struct thread *td, struct okillpg_args *uap)
1900 {
1901 ksiginfo_t ksi;
1902
1903 AUDIT_ARG_SIGNUM(uap->signum);
1904 AUDIT_ARG_PID(uap->pgid);
1905 if ((u_int)uap->signum > _SIG_MAXSIG)
1906 return (EINVAL);
1907
1908 ksiginfo_init(&ksi);
1909 ksi.ksi_signo = uap->signum;
1910 ksi.ksi_code = SI_USER;
1911 ksi.ksi_pid = td->td_proc->p_pid;
1912 ksi.ksi_uid = td->td_ucred->cr_ruid;
1913 return (killpg1(td, uap->signum, uap->pgid, 0, &ksi));
1914 }
1915 #endif /* COMPAT_43 */
1916
1917 #ifndef _SYS_SYSPROTO_H_
1918 struct sigqueue_args {
1919 pid_t pid;
1920 int signum;
1921 /* union sigval */ void *value;
1922 };
1923 #endif
1924 int
1925 sys_sigqueue(struct thread *td, struct sigqueue_args *uap)
1926 {
1927 union sigval sv;
1928
1929 sv.sival_ptr = uap->value;
1930
1931 return (kern_sigqueue(td, uap->pid, uap->signum, &sv));
1932 }
1933
1934 int
1935 kern_sigqueue(struct thread *td, pid_t pid, int signum, union sigval *value)
1936 {
1937 ksiginfo_t ksi;
1938 struct proc *p;
1939 int error;
1940
1941 if ((u_int)signum > _SIG_MAXSIG)
1942 return (EINVAL);
1943
1944 /*
1945 * Specification says sigqueue can only send signal to
1946 * single process.
1947 */
1948 if (pid <= 0)
1949 return (EINVAL);
1950
1951 if ((p = pfind_any(pid)) == NULL)
1952 return (ESRCH);
1953 error = p_cansignal(td, p, signum);
1954 if (error == 0 && signum != 0) {
1955 ksiginfo_init(&ksi);
1956 ksi.ksi_flags = KSI_SIGQ;
1957 ksi.ksi_signo = signum;
1958 ksi.ksi_code = SI_QUEUE;
1959 ksi.ksi_pid = td->td_proc->p_pid;
1960 ksi.ksi_uid = td->td_ucred->cr_ruid;
1961 ksi.ksi_value = *value;
1962 error = pksignal(p, ksi.ksi_signo, &ksi);
1963 }
1964 PROC_UNLOCK(p);
1965 return (error);
1966 }
1967
1968 /*
1969 * Send a signal to a process group.
1970 */
1971 void
1972 gsignal(int pgid, int sig, ksiginfo_t *ksi)
1973 {
1974 struct pgrp *pgrp;
1975
1976 if (pgid != 0) {
1977 sx_slock(&proctree_lock);
1978 pgrp = pgfind(pgid);
1979 sx_sunlock(&proctree_lock);
1980 if (pgrp != NULL) {
1981 pgsignal(pgrp, sig, 0, ksi);
1982 PGRP_UNLOCK(pgrp);
1983 }
1984 }
1985 }
1986
1987 /*
1988 * Send a signal to a process group. If checktty is 1,
1989 * limit to members which have a controlling terminal.
1990 */
1991 void
1992 pgsignal(struct pgrp *pgrp, int sig, int checkctty, ksiginfo_t *ksi)
1993 {
1994 struct proc *p;
1995
1996 if (pgrp) {
1997 PGRP_LOCK_ASSERT(pgrp, MA_OWNED);
1998 LIST_FOREACH(p, &pgrp->pg_members, p_pglist) {
1999 PROC_LOCK(p);
2000 if (p->p_state == PRS_NORMAL &&
2001 (checkctty == 0 || p->p_flag & P_CONTROLT))
2002 pksignal(p, sig, ksi);
2003 PROC_UNLOCK(p);
2004 }
2005 }
2006 }
2007
2008 /*
2009 * Recalculate the signal mask and reset the signal disposition after
2010 * usermode frame for delivery is formed. Should be called after
2011 * mach-specific routine, because sysent->sv_sendsig() needs correct
2012 * ps_siginfo and signal mask.
2013 */
2014 static void
2015 postsig_done(int sig, struct thread *td, struct sigacts *ps)
2016 {
2017 sigset_t mask;
2018
2019 mtx_assert(&ps->ps_mtx, MA_OWNED);
2020 td->td_ru.ru_nsignals++;
2021 mask = ps->ps_catchmask[_SIG_IDX(sig)];
2022 if (!SIGISMEMBER(ps->ps_signodefer, sig))
2023 SIGADDSET(mask, sig);
2024 kern_sigprocmask(td, SIG_BLOCK, &mask, NULL,
2025 SIGPROCMASK_PROC_LOCKED | SIGPROCMASK_PS_LOCKED);
2026 if (SIGISMEMBER(ps->ps_sigreset, sig))
2027 sigdflt(ps, sig);
2028 }
2029
2030 /*
2031 * Send a signal caused by a trap to the current thread. If it will be
2032 * caught immediately, deliver it with correct code. Otherwise, post it
2033 * normally.
2034 */
2035 void
2036 trapsignal(struct thread *td, ksiginfo_t *ksi)
2037 {
2038 struct sigacts *ps;
2039 struct proc *p;
2040 sigset_t sigmask;
2041 int code, sig;
2042
2043 p = td->td_proc;
2044 sig = ksi->ksi_signo;
2045 code = ksi->ksi_code;
2046 KASSERT(_SIG_VALID(sig), ("invalid signal"));
2047
2048 sigfastblock_fetch(td);
2049 PROC_LOCK(p);
2050 ps = p->p_sigacts;
2051 mtx_lock(&ps->ps_mtx);
2052 sigmask = td->td_sigmask;
2053 if (td->td_sigblock_val != 0)
2054 SIGSETOR(sigmask, fastblock_mask);
2055 if ((p->p_flag & P_TRACED) == 0 && SIGISMEMBER(ps->ps_sigcatch, sig) &&
2056 !SIGISMEMBER(sigmask, sig)) {
2057 #ifdef KTRACE
2058 if (KTRPOINT(curthread, KTR_PSIG))
2059 ktrpsig(sig, ps->ps_sigact[_SIG_IDX(sig)],
2060 &td->td_sigmask, code);
2061 #endif
2062 (*p->p_sysent->sv_sendsig)(ps->ps_sigact[_SIG_IDX(sig)],
2063 ksi, &td->td_sigmask);
2064 postsig_done(sig, td, ps);
2065 mtx_unlock(&ps->ps_mtx);
2066 } else {
2067 /*
2068 * Avoid a possible infinite loop if the thread
2069 * masking the signal or process is ignoring the
2070 * signal.
2071 */
2072 if (kern_forcesigexit && (SIGISMEMBER(sigmask, sig) ||
2073 ps->ps_sigact[_SIG_IDX(sig)] == SIG_IGN)) {
2074 SIGDELSET(td->td_sigmask, sig);
2075 SIGDELSET(ps->ps_sigcatch, sig);
2076 SIGDELSET(ps->ps_sigignore, sig);
2077 ps->ps_sigact[_SIG_IDX(sig)] = SIG_DFL;
2078 td->td_pflags &= ~TDP_SIGFASTBLOCK;
2079 td->td_sigblock_val = 0;
2080 }
2081 mtx_unlock(&ps->ps_mtx);
2082 p->p_sig = sig; /* XXX to verify code */
2083 tdsendsignal(p, td, sig, ksi);
2084 }
2085 PROC_UNLOCK(p);
2086 }
2087
2088 static struct thread *
2089 sigtd(struct proc *p, int sig, bool fast_sigblock)
2090 {
2091 struct thread *td, *signal_td;
2092
2093 PROC_LOCK_ASSERT(p, MA_OWNED);
2094 MPASS(!fast_sigblock || p == curproc);
2095
2096 /*
2097 * Check if current thread can handle the signal without
2098 * switching context to another thread.
2099 */
2100 if (curproc == p && !SIGISMEMBER(curthread->td_sigmask, sig) &&
2101 (!fast_sigblock || curthread->td_sigblock_val == 0))
2102 return (curthread);
2103 signal_td = NULL;
2104 FOREACH_THREAD_IN_PROC(p, td) {
2105 if (!SIGISMEMBER(td->td_sigmask, sig) && (!fast_sigblock ||
2106 td != curthread || td->td_sigblock_val == 0)) {
2107 signal_td = td;
2108 break;
2109 }
2110 }
2111 if (signal_td == NULL)
2112 signal_td = FIRST_THREAD_IN_PROC(p);
2113 return (signal_td);
2114 }
2115
2116 /*
2117 * Send the signal to the process. If the signal has an action, the action
2118 * is usually performed by the target process rather than the caller; we add
2119 * the signal to the set of pending signals for the process.
2120 *
2121 * Exceptions:
2122 * o When a stop signal is sent to a sleeping process that takes the
2123 * default action, the process is stopped without awakening it.
2124 * o SIGCONT restarts stopped processes (or puts them back to sleep)
2125 * regardless of the signal action (eg, blocked or ignored).
2126 *
2127 * Other ignored signals are discarded immediately.
2128 *
2129 * NB: This function may be entered from the debugger via the "kill" DDB
2130 * command. There is little that can be done to mitigate the possibly messy
2131 * side effects of this unwise possibility.
2132 */
2133 void
2134 kern_psignal(struct proc *p, int sig)
2135 {
2136 ksiginfo_t ksi;
2137
2138 ksiginfo_init(&ksi);
2139 ksi.ksi_signo = sig;
2140 ksi.ksi_code = SI_KERNEL;
2141 (void) tdsendsignal(p, NULL, sig, &ksi);
2142 }
2143
2144 int
2145 pksignal(struct proc *p, int sig, ksiginfo_t *ksi)
2146 {
2147
2148 return (tdsendsignal(p, NULL, sig, ksi));
2149 }
2150
2151 /* Utility function for finding a thread to send signal event to. */
2152 int
2153 sigev_findtd(struct proc *p ,struct sigevent *sigev, struct thread **ttd)
2154 {
2155 struct thread *td;
2156
2157 if (sigev->sigev_notify == SIGEV_THREAD_ID) {
2158 td = tdfind(sigev->sigev_notify_thread_id, p->p_pid);
2159 if (td == NULL)
2160 return (ESRCH);
2161 *ttd = td;
2162 } else {
2163 *ttd = NULL;
2164 PROC_LOCK(p);
2165 }
2166 return (0);
2167 }
2168
2169 void
2170 tdsignal(struct thread *td, int sig)
2171 {
2172 ksiginfo_t ksi;
2173
2174 ksiginfo_init(&ksi);
2175 ksi.ksi_signo = sig;
2176 ksi.ksi_code = SI_KERNEL;
2177 (void) tdsendsignal(td->td_proc, td, sig, &ksi);
2178 }
2179
2180 void
2181 tdksignal(struct thread *td, int sig, ksiginfo_t *ksi)
2182 {
2183
2184 (void) tdsendsignal(td->td_proc, td, sig, ksi);
2185 }
2186
2187 static int
2188 sig_sleepq_abort(struct thread *td, int intrval)
2189 {
2190 THREAD_LOCK_ASSERT(td, MA_OWNED);
2191
2192 if (intrval == 0 && (td->td_flags & TDF_SIGWAIT) == 0) {
2193 thread_unlock(td);
2194 return (0);
2195 }
2196 return (sleepq_abort(td, intrval));
2197 }
2198
2199 int
2200 tdsendsignal(struct proc *p, struct thread *td, int sig, ksiginfo_t *ksi)
2201 {
2202 sig_t action;
2203 sigqueue_t *sigqueue;
2204 int prop;
2205 struct sigacts *ps;
2206 int intrval;
2207 int ret = 0;
2208 int wakeup_swapper;
2209
2210 MPASS(td == NULL || p == td->td_proc);
2211 PROC_LOCK_ASSERT(p, MA_OWNED);
2212
2213 if (!_SIG_VALID(sig))
2214 panic("%s(): invalid signal %d", __func__, sig);
2215
2216 KASSERT(ksi == NULL || !KSI_ONQ(ksi), ("%s: ksi on queue", __func__));
2217
2218 /*
2219 * IEEE Std 1003.1-2001: return success when killing a zombie.
2220 */
2221 if (p->p_state == PRS_ZOMBIE) {
2222 if (ksi && (ksi->ksi_flags & KSI_INS))
2223 ksiginfo_tryfree(ksi);
2224 return (ret);
2225 }
2226
2227 ps = p->p_sigacts;
2228 KNOTE_LOCKED(p->p_klist, NOTE_SIGNAL | sig);
2229 prop = sigprop(sig);
2230
2231 if (td == NULL) {
2232 td = sigtd(p, sig, false);
2233 sigqueue = &p->p_sigqueue;
2234 } else
2235 sigqueue = &td->td_sigqueue;
2236
2237 SDT_PROBE3(proc, , , signal__send, td, p, sig);
2238
2239 /*
2240 * If the signal is being ignored, then we forget about it
2241 * immediately, except when the target process executes
2242 * sigwait(). (Note: we don't set SIGCONT in ps_sigignore,
2243 * and if it is set to SIG_IGN, action will be SIG_DFL here.)
2244 */
2245 mtx_lock(&ps->ps_mtx);
2246 if (SIGISMEMBER(ps->ps_sigignore, sig)) {
2247 if (kern_sig_discard_ign &&
2248 (p->p_sysent->sv_flags & SV_SIG_DISCIGN) == 0) {
2249 SDT_PROBE3(proc, , , signal__discard, td, p, sig);
2250
2251 mtx_unlock(&ps->ps_mtx);
2252 if (ksi && (ksi->ksi_flags & KSI_INS))
2253 ksiginfo_tryfree(ksi);
2254 return (ret);
2255 } else {
2256 action = SIG_CATCH;
2257 intrval = 0;
2258 }
2259 } else {
2260 if (SIGISMEMBER(td->td_sigmask, sig))
2261 action = SIG_HOLD;
2262 else if (SIGISMEMBER(ps->ps_sigcatch, sig))
2263 action = SIG_CATCH;
2264 else
2265 action = SIG_DFL;
2266 if (SIGISMEMBER(ps->ps_sigintr, sig))
2267 intrval = EINTR;
2268 else
2269 intrval = ERESTART;
2270 }
2271 mtx_unlock(&ps->ps_mtx);
2272
2273 if (prop & SIGPROP_CONT)
2274 sigqueue_delete_stopmask_proc(p);
2275 else if (prop & SIGPROP_STOP) {
2276 /*
2277 * If sending a tty stop signal to a member of an orphaned
2278 * process group, discard the signal here if the action
2279 * is default; don't stop the process below if sleeping,
2280 * and don't clear any pending SIGCONT.
2281 */
2282 if ((prop & SIGPROP_TTYSTOP) != 0 &&
2283 (p->p_pgrp->pg_flags & PGRP_ORPHANED) != 0 &&
2284 action == SIG_DFL) {
2285 if (ksi && (ksi->ksi_flags & KSI_INS))
2286 ksiginfo_tryfree(ksi);
2287 return (ret);
2288 }
2289 sigqueue_delete_proc(p, SIGCONT);
2290 if (p->p_flag & P_CONTINUED) {
2291 p->p_flag &= ~P_CONTINUED;
2292 PROC_LOCK(p->p_pptr);
2293 sigqueue_take(p->p_ksi);
2294 PROC_UNLOCK(p->p_pptr);
2295 }
2296 }
2297
2298 ret = sigqueue_add(sigqueue, sig, ksi);
2299 if (ret != 0)
2300 return (ret);
2301 signotify(td);
2302 /*
2303 * Defer further processing for signals which are held,
2304 * except that stopped processes must be continued by SIGCONT.
2305 */
2306 if (action == SIG_HOLD &&
2307 !((prop & SIGPROP_CONT) && (p->p_flag & P_STOPPED_SIG)))
2308 return (ret);
2309
2310 wakeup_swapper = 0;
2311
2312 /*
2313 * Some signals have a process-wide effect and a per-thread
2314 * component. Most processing occurs when the process next
2315 * tries to cross the user boundary, however there are some
2316 * times when processing needs to be done immediately, such as
2317 * waking up threads so that they can cross the user boundary.
2318 * We try to do the per-process part here.
2319 */
2320 if (P_SHOULDSTOP(p)) {
2321 KASSERT(!(p->p_flag & P_WEXIT),
2322 ("signal to stopped but exiting process"));
2323 if (sig == SIGKILL) {
2324 /*
2325 * If traced process is already stopped,
2326 * then no further action is necessary.
2327 */
2328 if (p->p_flag & P_TRACED)
2329 goto out;
2330 /*
2331 * SIGKILL sets process running.
2332 * It will die elsewhere.
2333 * All threads must be restarted.
2334 */
2335 p->p_flag &= ~P_STOPPED_SIG;
2336 goto runfast;
2337 }
2338
2339 if (prop & SIGPROP_CONT) {
2340 /*
2341 * If traced process is already stopped,
2342 * then no further action is necessary.
2343 */
2344 if (p->p_flag & P_TRACED)
2345 goto out;
2346 /*
2347 * If SIGCONT is default (or ignored), we continue the
2348 * process but don't leave the signal in sigqueue as
2349 * it has no further action. If SIGCONT is held, we
2350 * continue the process and leave the signal in
2351 * sigqueue. If the process catches SIGCONT, let it
2352 * handle the signal itself. If it isn't waiting on
2353 * an event, it goes back to run state.
2354 * Otherwise, process goes back to sleep state.
2355 */
2356 p->p_flag &= ~P_STOPPED_SIG;
2357 PROC_SLOCK(p);
2358 if (p->p_numthreads == p->p_suspcount) {
2359 PROC_SUNLOCK(p);
2360 p->p_flag |= P_CONTINUED;
2361 p->p_xsig = SIGCONT;
2362 PROC_LOCK(p->p_pptr);
2363 childproc_continued(p);
2364 PROC_UNLOCK(p->p_pptr);
2365 PROC_SLOCK(p);
2366 }
2367 if (action == SIG_DFL) {
2368 thread_unsuspend(p);
2369 PROC_SUNLOCK(p);
2370 sigqueue_delete(sigqueue, sig);
2371 goto out_cont;
2372 }
2373 if (action == SIG_CATCH) {
2374 /*
2375 * The process wants to catch it so it needs
2376 * to run at least one thread, but which one?
2377 */
2378 PROC_SUNLOCK(p);
2379 goto runfast;
2380 }
2381 /*
2382 * The signal is not ignored or caught.
2383 */
2384 thread_unsuspend(p);
2385 PROC_SUNLOCK(p);
2386 goto out_cont;
2387 }
2388
2389 if (prop & SIGPROP_STOP) {
2390 /*
2391 * If traced process is already stopped,
2392 * then no further action is necessary.
2393 */
2394 if (p->p_flag & P_TRACED)
2395 goto out;
2396 /*
2397 * Already stopped, don't need to stop again
2398 * (If we did the shell could get confused).
2399 * Just make sure the signal STOP bit set.
2400 */
2401 p->p_flag |= P_STOPPED_SIG;
2402 sigqueue_delete(sigqueue, sig);
2403 goto out;
2404 }
2405
2406 /*
2407 * All other kinds of signals:
2408 * If a thread is sleeping interruptibly, simulate a
2409 * wakeup so that when it is continued it will be made
2410 * runnable and can look at the signal. However, don't make
2411 * the PROCESS runnable, leave it stopped.
2412 * It may run a bit until it hits a thread_suspend_check().
2413 */
2414 PROC_SLOCK(p);
2415 thread_lock(td);
2416 if (TD_CAN_ABORT(td))
2417 wakeup_swapper = sig_sleepq_abort(td, intrval);
2418 else
2419 thread_unlock(td);
2420 PROC_SUNLOCK(p);
2421 goto out;
2422 /*
2423 * Mutexes are short lived. Threads waiting on them will
2424 * hit thread_suspend_check() soon.
2425 */
2426 } else if (p->p_state == PRS_NORMAL) {
2427 if (p->p_flag & P_TRACED || action == SIG_CATCH) {
2428 tdsigwakeup(td, sig, action, intrval);
2429 goto out;
2430 }
2431
2432 MPASS(action == SIG_DFL);
2433
2434 if (prop & SIGPROP_STOP) {
2435 if (p->p_flag & (P_PPWAIT|P_WEXIT))
2436 goto out;
2437 p->p_flag |= P_STOPPED_SIG;
2438 p->p_xsig = sig;
2439 PROC_SLOCK(p);
2440 wakeup_swapper = sig_suspend_threads(td, p, 1);
2441 if (p->p_numthreads == p->p_suspcount) {
2442 /*
2443 * only thread sending signal to another
2444 * process can reach here, if thread is sending
2445 * signal to its process, because thread does
2446 * not suspend itself here, p_numthreads
2447 * should never be equal to p_suspcount.
2448 */
2449 thread_stopped(p);
2450 PROC_SUNLOCK(p);
2451 sigqueue_delete_proc(p, p->p_xsig);
2452 } else
2453 PROC_SUNLOCK(p);
2454 goto out;
2455 }
2456 } else {
2457 /* Not in "NORMAL" state. discard the signal. */
2458 sigqueue_delete(sigqueue, sig);
2459 goto out;
2460 }
2461
2462 /*
2463 * The process is not stopped so we need to apply the signal to all the
2464 * running threads.
2465 */
2466 runfast:
2467 tdsigwakeup(td, sig, action, intrval);
2468 PROC_SLOCK(p);
2469 thread_unsuspend(p);
2470 PROC_SUNLOCK(p);
2471 out_cont:
2472 itimer_proc_continue(p);
2473 kqtimer_proc_continue(p);
2474 out:
2475 /* If we jump here, proc slock should not be owned. */
2476 PROC_SLOCK_ASSERT(p, MA_NOTOWNED);
2477 if (wakeup_swapper)
2478 kick_proc0();
2479
2480 return (ret);
2481 }
2482
2483 /*
2484 * The force of a signal has been directed against a single
2485 * thread. We need to see what we can do about knocking it
2486 * out of any sleep it may be in etc.
2487 */
2488 static void
2489 tdsigwakeup(struct thread *td, int sig, sig_t action, int intrval)
2490 {
2491 struct proc *p = td->td_proc;
2492 int prop, wakeup_swapper;
2493
2494 PROC_LOCK_ASSERT(p, MA_OWNED);
2495 prop = sigprop(sig);
2496
2497 PROC_SLOCK(p);
2498 thread_lock(td);
2499 /*
2500 * Bring the priority of a thread up if we want it to get
2501 * killed in this lifetime. Be careful to avoid bumping the
2502 * priority of the idle thread, since we still allow to signal
2503 * kernel processes.
2504 */
2505 if (action == SIG_DFL && (prop & SIGPROP_KILL) != 0 &&
2506 td->td_priority > PUSER && !TD_IS_IDLETHREAD(td))
2507 sched_prio(td, PUSER);
2508 if (TD_ON_SLEEPQ(td)) {
2509 /*
2510 * If thread is sleeping uninterruptibly
2511 * we can't interrupt the sleep... the signal will
2512 * be noticed when the process returns through
2513 * trap() or syscall().
2514 */
2515 if ((td->td_flags & TDF_SINTR) == 0)
2516 goto out;
2517 /*
2518 * If SIGCONT is default (or ignored) and process is
2519 * asleep, we are finished; the process should not
2520 * be awakened.
2521 */
2522 if ((prop & SIGPROP_CONT) && action == SIG_DFL) {
2523 thread_unlock(td);
2524 PROC_SUNLOCK(p);
2525 sigqueue_delete(&p->p_sigqueue, sig);
2526 /*
2527 * It may be on either list in this state.
2528 * Remove from both for now.
2529 */
2530 sigqueue_delete(&td->td_sigqueue, sig);
2531 return;
2532 }
2533
2534 /*
2535 * Don't awaken a sleeping thread for SIGSTOP if the
2536 * STOP signal is deferred.
2537 */
2538 if ((prop & SIGPROP_STOP) != 0 && (td->td_flags & (TDF_SBDRY |
2539 TDF_SERESTART | TDF_SEINTR)) == TDF_SBDRY)
2540 goto out;
2541
2542 /*
2543 * Give low priority threads a better chance to run.
2544 */
2545 if (td->td_priority > PUSER && !TD_IS_IDLETHREAD(td))
2546 sched_prio(td, PUSER);
2547
2548 wakeup_swapper = sig_sleepq_abort(td, intrval);
2549 PROC_SUNLOCK(p);
2550 if (wakeup_swapper)
2551 kick_proc0();
2552 return;
2553 }
2554
2555 /*
2556 * Other states do nothing with the signal immediately,
2557 * other than kicking ourselves if we are running.
2558 * It will either never be noticed, or noticed very soon.
2559 */
2560 #ifdef SMP
2561 if (TD_IS_RUNNING(td) && td != curthread)
2562 forward_signal(td);
2563 #endif
2564
2565 out:
2566 PROC_SUNLOCK(p);
2567 thread_unlock(td);
2568 }
2569
2570 static void
2571 ptrace_coredump(struct thread *td)
2572 {
2573 struct proc *p;
2574 struct thr_coredump_req *tcq;
2575 void *rl_cookie;
2576
2577 MPASS(td == curthread);
2578 p = td->td_proc;
2579 PROC_LOCK_ASSERT(p, MA_OWNED);
2580 if ((td->td_dbgflags & TDB_COREDUMPRQ) == 0)
2581 return;
2582 KASSERT((p->p_flag & P_STOPPED_TRACE) != 0, ("not stopped"));
2583
2584 tcq = td->td_coredump;
2585 KASSERT(tcq != NULL, ("td_coredump is NULL"));
2586
2587 if (p->p_sysent->sv_coredump == NULL) {
2588 tcq->tc_error = ENOSYS;
2589 goto wake;
2590 }
2591
2592 PROC_UNLOCK(p);
2593 rl_cookie = vn_rangelock_wlock(tcq->tc_vp, 0, OFF_MAX);
2594
2595 tcq->tc_error = p->p_sysent->sv_coredump(td, tcq->tc_vp,
2596 tcq->tc_limit, tcq->tc_flags);
2597
2598 vn_rangelock_unlock(tcq->tc_vp, rl_cookie);
2599 PROC_LOCK(p);
2600 wake:
2601 td->td_dbgflags &= ~TDB_COREDUMPRQ;
2602 td->td_coredump = NULL;
2603 wakeup(p);
2604 }
2605
2606 static int
2607 sig_suspend_threads(struct thread *td, struct proc *p, int sending)
2608 {
2609 struct thread *td2;
2610 int wakeup_swapper;
2611
2612 PROC_LOCK_ASSERT(p, MA_OWNED);
2613 PROC_SLOCK_ASSERT(p, MA_OWNED);
2614 MPASS(sending || td == curthread);
2615
2616 wakeup_swapper = 0;
2617 FOREACH_THREAD_IN_PROC(p, td2) {
2618 thread_lock(td2);
2619 td2->td_flags |= TDF_ASTPENDING | TDF_NEEDSUSPCHK;
2620 if ((TD_IS_SLEEPING(td2) || TD_IS_SWAPPED(td2)) &&
2621 (td2->td_flags & TDF_SINTR)) {
2622 if (td2->td_flags & TDF_SBDRY) {
2623 /*
2624 * Once a thread is asleep with
2625 * TDF_SBDRY and without TDF_SERESTART
2626 * or TDF_SEINTR set, it should never
2627 * become suspended due to this check.
2628 */
2629 KASSERT(!TD_IS_SUSPENDED(td2),
2630 ("thread with deferred stops suspended"));
2631 if (TD_SBDRY_INTR(td2)) {
2632 wakeup_swapper |= sleepq_abort(td2,
2633 TD_SBDRY_ERRNO(td2));
2634 continue;
2635 }
2636 } else if (!TD_IS_SUSPENDED(td2))
2637 thread_suspend_one(td2);
2638 } else if (!TD_IS_SUSPENDED(td2)) {
2639 if (sending || td != td2)
2640 td2->td_flags |= TDF_ASTPENDING;
2641 #ifdef SMP
2642 if (TD_IS_RUNNING(td2) && td2 != td)
2643 forward_signal(td2);
2644 #endif
2645 }
2646 thread_unlock(td2);
2647 }
2648 return (wakeup_swapper);
2649 }
2650
2651 /*
2652 * Stop the process for an event deemed interesting to the debugger. If si is
2653 * non-NULL, this is a signal exchange; the new signal requested by the
2654 * debugger will be returned for handling. If si is NULL, this is some other
2655 * type of interesting event. The debugger may request a signal be delivered in
2656 * that case as well, however it will be deferred until it can be handled.
2657 */
2658 int
2659 ptracestop(struct thread *td, int sig, ksiginfo_t *si)
2660 {
2661 struct proc *p = td->td_proc;
2662 struct thread *td2;
2663 ksiginfo_t ksi;
2664
2665 PROC_LOCK_ASSERT(p, MA_OWNED);
2666 KASSERT(!(p->p_flag & P_WEXIT), ("Stopping exiting process"));
2667 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK,
2668 &p->p_mtx.lock_object, "Stopping for traced signal");
2669
2670 td->td_xsig = sig;
2671
2672 if (si == NULL || (si->ksi_flags & KSI_PTRACE) == 0) {
2673 td->td_dbgflags |= TDB_XSIG;
2674 CTR4(KTR_PTRACE, "ptracestop: tid %d (pid %d) flags %#x sig %d",
2675 td->td_tid, p->p_pid, td->td_dbgflags, sig);
2676 PROC_SLOCK(p);
2677 while ((p->p_flag & P_TRACED) && (td->td_dbgflags & TDB_XSIG)) {
2678 if (P_KILLED(p)) {
2679 /*
2680 * Ensure that, if we've been PT_KILLed, the
2681 * exit status reflects that. Another thread
2682 * may also be in ptracestop(), having just
2683 * received the SIGKILL, but this thread was
2684 * unsuspended first.
2685 */
2686 td->td_dbgflags &= ~TDB_XSIG;
2687 td->td_xsig = SIGKILL;
2688 p->p_ptevents = 0;
2689 break;
2690 }
2691 if (p->p_flag & P_SINGLE_EXIT &&
2692 !(td->td_dbgflags & TDB_EXIT)) {
2693 /*
2694 * Ignore ptrace stops except for thread exit
2695 * events when the process exits.
2696 */
2697 td->td_dbgflags &= ~TDB_XSIG;
2698 PROC_SUNLOCK(p);
2699 return (0);
2700 }
2701
2702 /*
2703 * Make wait(2) work. Ensure that right after the
2704 * attach, the thread which was decided to become the
2705 * leader of attach gets reported to the waiter.
2706 * Otherwise, just avoid overwriting another thread's
2707 * assignment to p_xthread. If another thread has
2708 * already set p_xthread, the current thread will get
2709 * a chance to report itself upon the next iteration.
2710 */
2711 if ((td->td_dbgflags & TDB_FSTP) != 0 ||
2712 ((p->p_flag2 & P2_PTRACE_FSTP) == 0 &&
2713 p->p_xthread == NULL)) {
2714 p->p_xsig = sig;
2715 p->p_xthread = td;
2716
2717 /*
2718 * If we are on sleepqueue already,
2719 * let sleepqueue code decide if it
2720 * needs to go sleep after attach.
2721 */
2722 if (td->td_wchan == NULL)
2723 td->td_dbgflags &= ~TDB_FSTP;
2724
2725 p->p_flag2 &= ~P2_PTRACE_FSTP;
2726 p->p_flag |= P_STOPPED_SIG | P_STOPPED_TRACE;
2727 sig_suspend_threads(td, p, 0);
2728 }
2729 if ((td->td_dbgflags & TDB_STOPATFORK) != 0) {
2730 td->td_dbgflags &= ~TDB_STOPATFORK;
2731 }
2732 stopme:
2733 td->td_dbgflags |= TDB_SSWITCH;
2734 thread_suspend_switch(td, p);
2735 td->td_dbgflags &= ~TDB_SSWITCH;
2736 if ((td->td_dbgflags & TDB_COREDUMPRQ) != 0) {
2737 PROC_SUNLOCK(p);
2738 ptrace_coredump(td);
2739 PROC_SLOCK(p);
2740 goto stopme;
2741 }
2742 if (p->p_xthread == td)
2743 p->p_xthread = NULL;
2744 if (!(p->p_flag & P_TRACED))
2745 break;
2746 if (td->td_dbgflags & TDB_SUSPEND) {
2747 if (p->p_flag & P_SINGLE_EXIT)
2748 break;
2749 goto stopme;
2750 }
2751 }
2752 PROC_SUNLOCK(p);
2753 }
2754
2755 if (si != NULL && sig == td->td_xsig) {
2756 /* Parent wants us to take the original signal unchanged. */
2757 si->ksi_flags |= KSI_HEAD;
2758 if (sigqueue_add(&td->td_sigqueue, sig, si) != 0)
2759 si->ksi_signo = 0;
2760 } else if (td->td_xsig != 0) {
2761 /*
2762 * If parent wants us to take a new signal, then it will leave
2763 * it in td->td_xsig; otherwise we just look for signals again.
2764 */
2765 ksiginfo_init(&ksi);
2766 ksi.ksi_signo = td->td_xsig;
2767 ksi.ksi_flags |= KSI_PTRACE;
2768 td2 = sigtd(p, td->td_xsig, false);
2769 tdsendsignal(p, td2, td->td_xsig, &ksi);
2770 if (td != td2)
2771 return (0);
2772 }
2773
2774 return (td->td_xsig);
2775 }
2776
2777 static void
2778 reschedule_signals(struct proc *p, sigset_t block, int flags)
2779 {
2780 struct sigacts *ps;
2781 struct thread *td;
2782 int sig;
2783 bool fastblk, pslocked;
2784
2785 PROC_LOCK_ASSERT(p, MA_OWNED);
2786 ps = p->p_sigacts;
2787 pslocked = (flags & SIGPROCMASK_PS_LOCKED) != 0;
2788 mtx_assert(&ps->ps_mtx, pslocked ? MA_OWNED : MA_NOTOWNED);
2789 if (SIGISEMPTY(p->p_siglist))
2790 return;
2791 SIGSETAND(block, p->p_siglist);
2792 fastblk = (flags & SIGPROCMASK_FASTBLK) != 0;
2793 SIG_FOREACH(sig, &block) {
2794 td = sigtd(p, sig, fastblk);
2795
2796 /*
2797 * If sigtd() selected us despite sigfastblock is
2798 * blocking, do not activate AST or wake us, to avoid
2799 * loop in AST handler.
2800 */
2801 if (fastblk && td == curthread)
2802 continue;
2803
2804 signotify(td);
2805 if (!pslocked)
2806 mtx_lock(&ps->ps_mtx);
2807 if (p->p_flag & P_TRACED ||
2808 (SIGISMEMBER(ps->ps_sigcatch, sig) &&
2809 !SIGISMEMBER(td->td_sigmask, sig))) {
2810 tdsigwakeup(td, sig, SIG_CATCH,
2811 (SIGISMEMBER(ps->ps_sigintr, sig) ? EINTR :
2812 ERESTART));
2813 }
2814 if (!pslocked)
2815 mtx_unlock(&ps->ps_mtx);
2816 }
2817 }
2818
2819 void
2820 tdsigcleanup(struct thread *td)
2821 {
2822 struct proc *p;
2823 sigset_t unblocked;
2824
2825 p = td->td_proc;
2826 PROC_LOCK_ASSERT(p, MA_OWNED);
2827
2828 sigqueue_flush(&td->td_sigqueue);
2829 if (p->p_numthreads == 1)
2830 return;
2831
2832 /*
2833 * Since we cannot handle signals, notify signal post code
2834 * about this by filling the sigmask.
2835 *
2836 * Also, if needed, wake up thread(s) that do not block the
2837 * same signals as the exiting thread, since the thread might
2838 * have been selected for delivery and woken up.
2839 */
2840 SIGFILLSET(unblocked);
2841 SIGSETNAND(unblocked, td->td_sigmask);
2842 SIGFILLSET(td->td_sigmask);
2843 reschedule_signals(p, unblocked, 0);
2844
2845 }
2846
2847 static int
2848 sigdeferstop_curr_flags(int cflags)
2849 {
2850
2851 MPASS((cflags & (TDF_SEINTR | TDF_SERESTART)) == 0 ||
2852 (cflags & TDF_SBDRY) != 0);
2853 return (cflags & (TDF_SBDRY | TDF_SEINTR | TDF_SERESTART));
2854 }
2855
2856 /*
2857 * Defer the delivery of SIGSTOP for the current thread, according to
2858 * the requested mode. Returns previous flags, which must be restored
2859 * by sigallowstop().
2860 *
2861 * TDF_SBDRY, TDF_SEINTR, and TDF_SERESTART flags are only set and
2862 * cleared by the current thread, which allow the lock-less read-only
2863 * accesses below.
2864 */
2865 int
2866 sigdeferstop_impl(int mode)
2867 {
2868 struct thread *td;
2869 int cflags, nflags;
2870
2871 td = curthread;
2872 cflags = sigdeferstop_curr_flags(td->td_flags);
2873 switch (mode) {
2874 case SIGDEFERSTOP_NOP:
2875 nflags = cflags;
2876 break;
2877 case SIGDEFERSTOP_OFF:
2878 nflags = 0;
2879 break;
2880 case SIGDEFERSTOP_SILENT:
2881 nflags = (cflags | TDF_SBDRY) & ~(TDF_SEINTR | TDF_SERESTART);
2882 break;
2883 case SIGDEFERSTOP_EINTR:
2884 nflags = (cflags | TDF_SBDRY | TDF_SEINTR) & ~TDF_SERESTART;
2885 break;
2886 case SIGDEFERSTOP_ERESTART:
2887 nflags = (cflags | TDF_SBDRY | TDF_SERESTART) & ~TDF_SEINTR;
2888 break;
2889 default:
2890 panic("sigdeferstop: invalid mode %x", mode);
2891 break;
2892 }
2893 if (cflags == nflags)
2894 return (SIGDEFERSTOP_VAL_NCHG);
2895 thread_lock(td);
2896 td->td_flags = (td->td_flags & ~cflags) | nflags;
2897 thread_unlock(td);
2898 return (cflags);
2899 }
2900
2901 /*
2902 * Restores the STOP handling mode, typically permitting the delivery
2903 * of SIGSTOP for the current thread. This does not immediately
2904 * suspend if a stop was posted. Instead, the thread will suspend
2905 * either via ast() or a subsequent interruptible sleep.
2906 */
2907 void
2908 sigallowstop_impl(int prev)
2909 {
2910 struct thread *td;
2911 int cflags;
2912
2913 KASSERT(prev != SIGDEFERSTOP_VAL_NCHG, ("failed sigallowstop"));
2914 KASSERT((prev & ~(TDF_SBDRY | TDF_SEINTR | TDF_SERESTART)) == 0,
2915 ("sigallowstop: incorrect previous mode %x", prev));
2916 td = curthread;
2917 cflags = sigdeferstop_curr_flags(td->td_flags);
2918 if (cflags != prev) {
2919 thread_lock(td);
2920 td->td_flags = (td->td_flags & ~cflags) | prev;
2921 thread_unlock(td);
2922 }
2923 }
2924
2925 enum sigstatus {
2926 SIGSTATUS_HANDLE,
2927 SIGSTATUS_HANDLED,
2928 SIGSTATUS_IGNORE,
2929 SIGSTATUS_SBDRY_STOP,
2930 };
2931
2932 /*
2933 * The thread has signal "sig" pending. Figure out what to do with it:
2934 *
2935 * _HANDLE -> the caller should handle the signal
2936 * _HANDLED -> handled internally, reload pending signal set
2937 * _IGNORE -> ignored, remove from the set of pending signals and try the
2938 * next pending signal
2939 * _SBDRY_STOP -> the signal should stop the thread but this is not
2940 * permitted in the current context
2941 */
2942 static enum sigstatus
2943 sigprocess(struct thread *td, int sig)
2944 {
2945 struct proc *p;
2946 struct sigacts *ps;
2947 struct sigqueue *queue;
2948 ksiginfo_t ksi;
2949 int prop;
2950
2951 KASSERT(_SIG_VALID(sig), ("%s: invalid signal %d", __func__, sig));
2952
2953 p = td->td_proc;
2954 ps = p->p_sigacts;
2955 mtx_assert(&ps->ps_mtx, MA_OWNED);
2956 PROC_LOCK_ASSERT(p, MA_OWNED);
2957
2958 /*
2959 * We should allow pending but ignored signals below
2960 * only if there is sigwait() active, or P_TRACED was
2961 * on when they were posted.
2962 */
2963 if (SIGISMEMBER(ps->ps_sigignore, sig) &&
2964 (p->p_flag & P_TRACED) == 0 &&
2965 (td->td_flags & TDF_SIGWAIT) == 0) {
2966 return (SIGSTATUS_IGNORE);
2967 }
2968
2969 if ((p->p_flag & (P_TRACED | P_PPTRACE)) == P_TRACED) {
2970 /*
2971 * If traced, always stop.
2972 * Remove old signal from queue before the stop.
2973 * XXX shrug off debugger, it causes siginfo to
2974 * be thrown away.
2975 */
2976 queue = &td->td_sigqueue;
2977 ksiginfo_init(&ksi);
2978 if (sigqueue_get(queue, sig, &ksi) == 0) {
2979 queue = &p->p_sigqueue;
2980 sigqueue_get(queue, sig, &ksi);
2981 }
2982 td->td_si = ksi.ksi_info;
2983
2984 mtx_unlock(&ps->ps_mtx);
2985 sig = ptracestop(td, sig, &ksi);
2986 mtx_lock(&ps->ps_mtx);
2987
2988 td->td_si.si_signo = 0;
2989
2990 /*
2991 * Keep looking if the debugger discarded or
2992 * replaced the signal.
2993 */
2994 if (sig == 0)
2995 return (SIGSTATUS_HANDLED);
2996
2997 /*
2998 * If the signal became masked, re-queue it.
2999 */
3000 if (SIGISMEMBER(td->td_sigmask, sig)) {
3001 ksi.ksi_flags |= KSI_HEAD;
3002 sigqueue_add(&p->p_sigqueue, sig, &ksi);
3003 return (SIGSTATUS_HANDLED);
3004 }
3005
3006 /*
3007 * If the traced bit got turned off, requeue the signal and
3008 * reload the set of pending signals. This ensures that p_sig*
3009 * and p_sigact are consistent.
3010 */
3011 if ((p->p_flag & P_TRACED) == 0) {
3012 ksi.ksi_flags |= KSI_HEAD;
3013 sigqueue_add(queue, sig, &ksi);
3014 return (SIGSTATUS_HANDLED);
3015 }
3016 }
3017
3018 /*
3019 * Decide whether the signal should be returned.
3020 * Return the signal's number, or fall through
3021 * to clear it from the pending mask.
3022 */
3023 switch ((intptr_t)p->p_sigacts->ps_sigact[_SIG_IDX(sig)]) {
3024 case (intptr_t)SIG_DFL:
3025 /*
3026 * Don't take default actions on system processes.
3027 */
3028 if (p->p_pid <= 1) {
3029 #ifdef DIAGNOSTIC
3030 /*
3031 * Are you sure you want to ignore SIGSEGV
3032 * in init? XXX
3033 */
3034 printf("Process (pid %lu) got signal %d\n",
3035 (u_long)p->p_pid, sig);
3036 #endif
3037 return (SIGSTATUS_IGNORE);
3038 }
3039
3040 /*
3041 * If there is a pending stop signal to process with
3042 * default action, stop here, then clear the signal.
3043 * Traced or exiting processes should ignore stops.
3044 * Additionally, a member of an orphaned process group
3045 * should ignore tty stops.
3046 */
3047 prop = sigprop(sig);
3048 if (prop & SIGPROP_STOP) {
3049 mtx_unlock(&ps->ps_mtx);
3050 if ((p->p_flag & (P_TRACED | P_WEXIT |
3051 P_SINGLE_EXIT)) != 0 || ((p->p_pgrp->
3052 pg_flags & PGRP_ORPHANED) != 0 &&
3053 (prop & SIGPROP_TTYSTOP) != 0)) {
3054 mtx_lock(&ps->ps_mtx);
3055 return (SIGSTATUS_IGNORE);
3056 }
3057 if (TD_SBDRY_INTR(td)) {
3058 KASSERT((td->td_flags & TDF_SBDRY) != 0,
3059 ("lost TDF_SBDRY"));
3060 mtx_lock(&ps->ps_mtx);
3061 return (SIGSTATUS_SBDRY_STOP);
3062 }
3063 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK,
3064 &p->p_mtx.lock_object, "Catching SIGSTOP");
3065 sigqueue_delete(&td->td_sigqueue, sig);
3066 sigqueue_delete(&p->p_sigqueue, sig);
3067 p->p_flag |= P_STOPPED_SIG;
3068 p->p_xsig = sig;
3069 PROC_SLOCK(p);
3070 sig_suspend_threads(td, p, 0);
3071 thread_suspend_switch(td, p);
3072 PROC_SUNLOCK(p);
3073 mtx_lock(&ps->ps_mtx);
3074 return (SIGSTATUS_HANDLED);
3075 } else if ((prop & SIGPROP_IGNORE) != 0 &&
3076 (td->td_flags & TDF_SIGWAIT) == 0) {
3077 /*
3078 * Default action is to ignore; drop it if
3079 * not in kern_sigtimedwait().
3080 */
3081 return (SIGSTATUS_IGNORE);
3082 } else {
3083 return (SIGSTATUS_HANDLE);
3084 }
3085
3086 case (intptr_t)SIG_IGN:
3087 if ((td->td_flags & TDF_SIGWAIT) == 0)
3088 return (SIGSTATUS_IGNORE);
3089 else
3090 return (SIGSTATUS_HANDLE);
3091
3092 default:
3093 /*
3094 * This signal has an action, let postsig() process it.
3095 */
3096 return (SIGSTATUS_HANDLE);
3097 }
3098 }
3099
3100 /*
3101 * If the current process has received a signal (should be caught or cause
3102 * termination, should interrupt current syscall), return the signal number.
3103 * Stop signals with default action are processed immediately, then cleared;
3104 * they aren't returned. This is checked after each entry to the system for
3105 * a syscall or trap (though this can usually be done without calling
3106 * issignal by checking the pending signal masks in cursig.) The normal call
3107 * sequence is
3108 *
3109 * while (sig = cursig(curthread))
3110 * postsig(sig);
3111 */
3112 static int
3113 issignal(struct thread *td)
3114 {
3115 struct proc *p;
3116 sigset_t sigpending;
3117 int sig;
3118
3119 p = td->td_proc;
3120 PROC_LOCK_ASSERT(p, MA_OWNED);
3121
3122 for (;;) {
3123 sigpending = td->td_sigqueue.sq_signals;
3124 SIGSETOR(sigpending, p->p_sigqueue.sq_signals);
3125 SIGSETNAND(sigpending, td->td_sigmask);
3126
3127 if ((p->p_flag & P_PPWAIT) != 0 || (td->td_flags &
3128 (TDF_SBDRY | TDF_SERESTART | TDF_SEINTR)) == TDF_SBDRY)
3129 SIG_STOPSIGMASK(sigpending);
3130 if (SIGISEMPTY(sigpending)) /* no signal to send */
3131 return (0);
3132
3133 /*
3134 * Do fast sigblock if requested by usermode. Since
3135 * we do know that there was a signal pending at this
3136 * point, set the FAST_SIGBLOCK_PEND as indicator for
3137 * usermode to perform a dummy call to
3138 * FAST_SIGBLOCK_UNBLOCK, which causes immediate
3139 * delivery of postponed pending signal.
3140 */
3141 if ((td->td_pflags & TDP_SIGFASTBLOCK) != 0) {
3142 if (td->td_sigblock_val != 0)
3143 SIGSETNAND(sigpending, fastblock_mask);
3144 if (SIGISEMPTY(sigpending)) {
3145 td->td_pflags |= TDP_SIGFASTPENDING;
3146 return (0);
3147 }
3148 }
3149
3150 if ((p->p_flag & (P_TRACED | P_PPTRACE)) == P_TRACED &&
3151 (p->p_flag2 & P2_PTRACE_FSTP) != 0 &&
3152 SIGISMEMBER(sigpending, SIGSTOP)) {
3153 /*
3154 * If debugger just attached, always consume
3155 * SIGSTOP from ptrace(PT_ATTACH) first, to
3156 * execute the debugger attach ritual in
3157 * order.
3158 */
3159 td->td_dbgflags |= TDB_FSTP;
3160 SIGEMPTYSET(sigpending);
3161 SIGADDSET(sigpending, SIGSTOP);
3162 }
3163
3164 SIG_FOREACH(sig, &sigpending) {
3165 switch (sigprocess(td, sig)) {
3166 case SIGSTATUS_HANDLE:
3167 return (sig);
3168 case SIGSTATUS_HANDLED:
3169 goto next;
3170 case SIGSTATUS_IGNORE:
3171 sigqueue_delete(&td->td_sigqueue, sig);
3172 sigqueue_delete(&p->p_sigqueue, sig);
3173 break;
3174 case SIGSTATUS_SBDRY_STOP:
3175 return (-1);
3176 }
3177 }
3178 next:;
3179 }
3180 }
3181
3182 void
3183 thread_stopped(struct proc *p)
3184 {
3185 int n;
3186
3187 PROC_LOCK_ASSERT(p, MA_OWNED);
3188 PROC_SLOCK_ASSERT(p, MA_OWNED);
3189 n = p->p_suspcount;
3190 if (p == curproc)
3191 n++;
3192 if ((p->p_flag & P_STOPPED_SIG) && (n == p->p_numthreads)) {
3193 PROC_SUNLOCK(p);
3194 p->p_flag &= ~P_WAITED;
3195 PROC_LOCK(p->p_pptr);
3196 childproc_stopped(p, (p->p_flag & P_TRACED) ?
3197 CLD_TRAPPED : CLD_STOPPED);
3198 PROC_UNLOCK(p->p_pptr);
3199 PROC_SLOCK(p);
3200 }
3201 }
3202
3203 /*
3204 * Take the action for the specified signal
3205 * from the current set of pending signals.
3206 */
3207 int
3208 postsig(int sig)
3209 {
3210 struct thread *td;
3211 struct proc *p;
3212 struct sigacts *ps;
3213 sig_t action;
3214 ksiginfo_t ksi;
3215 sigset_t returnmask;
3216
3217 KASSERT(sig != 0, ("postsig"));
3218
3219 td = curthread;
3220 p = td->td_proc;
3221 PROC_LOCK_ASSERT(p, MA_OWNED);
3222 ps = p->p_sigacts;
3223 mtx_assert(&ps->ps_mtx, MA_OWNED);
3224 ksiginfo_init(&ksi);
3225 if (sigqueue_get(&td->td_sigqueue, sig, &ksi) == 0 &&
3226 sigqueue_get(&p->p_sigqueue, sig, &ksi) == 0)
3227 return (0);
3228 ksi.ksi_signo = sig;
3229 if (ksi.ksi_code == SI_TIMER)
3230 itimer_accept(p, ksi.ksi_timerid, &ksi);
3231 action = ps->ps_sigact[_SIG_IDX(sig)];
3232 #ifdef KTRACE
3233 if (KTRPOINT(td, KTR_PSIG))
3234 ktrpsig(sig, action, td->td_pflags & TDP_OLDMASK ?
3235 &td->td_oldsigmask : &td->td_sigmask, ksi.ksi_code);
3236 #endif
3237
3238 if (action == SIG_DFL) {
3239 /*
3240 * Default action, where the default is to kill
3241 * the process. (Other cases were ignored above.)
3242 */
3243 mtx_unlock(&ps->ps_mtx);
3244 proc_td_siginfo_capture(td, &ksi.ksi_info);
3245 sigexit(td, sig);
3246 /* NOTREACHED */
3247 } else {
3248 /*
3249 * If we get here, the signal must be caught.
3250 */
3251 KASSERT(action != SIG_IGN, ("postsig action %p", action));
3252 KASSERT(!SIGISMEMBER(td->td_sigmask, sig),
3253 ("postsig action: blocked sig %d", sig));
3254
3255 /*
3256 * Set the new mask value and also defer further
3257 * occurrences of this signal.
3258 *
3259 * Special case: user has done a sigsuspend. Here the
3260 * current mask is not of interest, but rather the
3261 * mask from before the sigsuspend is what we want
3262 * restored after the signal processing is completed.
3263 */
3264 if (td->td_pflags & TDP_OLDMASK) {
3265 returnmask = td->td_oldsigmask;
3266 td->td_pflags &= ~TDP_OLDMASK;
3267 } else
3268 returnmask = td->td_sigmask;
3269
3270 if (p->p_sig == sig) {
3271 p->p_sig = 0;
3272 }
3273 (*p->p_sysent->sv_sendsig)(action, &ksi, &returnmask);
3274 postsig_done(sig, td, ps);
3275 }
3276 return (1);
3277 }
3278
3279 int
3280 sig_ast_checksusp(struct thread *td)
3281 {
3282 struct proc *p __diagused;
3283 int ret;
3284
3285 p = td->td_proc;
3286 PROC_LOCK_ASSERT(p, MA_OWNED);
3287
3288 if ((td->td_flags & TDF_NEEDSUSPCHK) == 0)
3289 return (0);
3290
3291 ret = thread_suspend_check(1);
3292 MPASS(ret == 0 || ret == EINTR || ret == ERESTART);
3293 return (ret);
3294 }
3295
3296 int
3297 sig_ast_needsigchk(struct thread *td)
3298 {
3299 struct proc *p;
3300 struct sigacts *ps;
3301 int ret, sig;
3302
3303 p = td->td_proc;
3304 PROC_LOCK_ASSERT(p, MA_OWNED);
3305
3306 if ((td->td_flags & TDF_NEEDSIGCHK) == 0)
3307 return (0);
3308
3309 ps = p->p_sigacts;
3310 mtx_lock(&ps->ps_mtx);
3311 sig = cursig(td);
3312 if (sig == -1) {
3313 mtx_unlock(&ps->ps_mtx);
3314 KASSERT((td->td_flags & TDF_SBDRY) != 0, ("lost TDF_SBDRY"));
3315 KASSERT(TD_SBDRY_INTR(td),
3316 ("lost TDF_SERESTART of TDF_SEINTR"));
3317 KASSERT((td->td_flags & (TDF_SEINTR | TDF_SERESTART)) !=
3318 (TDF_SEINTR | TDF_SERESTART),
3319 ("both TDF_SEINTR and TDF_SERESTART"));
3320 ret = TD_SBDRY_ERRNO(td);
3321 } else if (sig != 0) {
3322 ret = SIGISMEMBER(ps->ps_sigintr, sig) ? EINTR : ERESTART;
3323 mtx_unlock(&ps->ps_mtx);
3324 } else {
3325 mtx_unlock(&ps->ps_mtx);
3326 ret = 0;
3327 }
3328
3329 /*
3330 * Do not go into sleep if this thread was the ptrace(2)
3331 * attach leader. cursig() consumed SIGSTOP from PT_ATTACH,
3332 * but we usually act on the signal by interrupting sleep, and
3333 * should do that here as well.
3334 */
3335 if ((td->td_dbgflags & TDB_FSTP) != 0) {
3336 if (ret == 0)
3337 ret = EINTR;
3338 td->td_dbgflags &= ~TDB_FSTP;
3339 }
3340
3341 return (ret);
3342 }
3343
3344 int
3345 sig_intr(void)
3346 {
3347 struct thread *td;
3348 struct proc *p;
3349 int ret;
3350
3351 td = curthread;
3352 if ((td->td_flags & (TDF_NEEDSIGCHK | TDF_NEEDSUSPCHK)) == 0)
3353 return (0);
3354
3355 p = td->td_proc;
3356
3357 PROC_LOCK(p);
3358 ret = sig_ast_checksusp(td);
3359 if (ret == 0)
3360 ret = sig_ast_needsigchk(td);
3361 PROC_UNLOCK(p);
3362 return (ret);
3363 }
3364
3365 bool
3366 curproc_sigkilled(void)
3367 {
3368 struct thread *td;
3369 struct proc *p;
3370 struct sigacts *ps;
3371 bool res;
3372
3373 td = curthread;
3374 if ((td->td_flags & TDF_NEEDSIGCHK) == 0)
3375 return (false);
3376
3377 p = td->td_proc;
3378 PROC_LOCK(p);
3379 ps = p->p_sigacts;
3380 mtx_lock(&ps->ps_mtx);
3381 res = SIGISMEMBER(td->td_sigqueue.sq_signals, SIGKILL) ||
3382 SIGISMEMBER(p->p_sigqueue.sq_signals, SIGKILL);
3383 mtx_unlock(&ps->ps_mtx);
3384 PROC_UNLOCK(p);
3385 return (res);
3386 }
3387
3388 void
3389 proc_wkilled(struct proc *p)
3390 {
3391
3392 PROC_LOCK_ASSERT(p, MA_OWNED);
3393 if ((p->p_flag & P_WKILLED) == 0) {
3394 p->p_flag |= P_WKILLED;
3395 /*
3396 * Notify swapper that there is a process to swap in.
3397 * The notification is racy, at worst it would take 10
3398 * seconds for the swapper process to notice.
3399 */
3400 if ((p->p_flag & (P_INMEM | P_SWAPPINGIN)) == 0)
3401 wakeup(&proc0);
3402 }
3403 }
3404
3405 /*
3406 * Kill the current process for stated reason.
3407 */
3408 void
3409 killproc(struct proc *p, const char *why)
3410 {
3411
3412 PROC_LOCK_ASSERT(p, MA_OWNED);
3413 CTR3(KTR_PROC, "killproc: proc %p (pid %d, %s)", p, p->p_pid,
3414 p->p_comm);
3415 log(LOG_ERR, "pid %d (%s), jid %d, uid %d, was killed: %s\n",
3416 p->p_pid, p->p_comm, p->p_ucred->cr_prison->pr_id,
3417 p->p_ucred->cr_uid, why);
3418 proc_wkilled(p);
3419 kern_psignal(p, SIGKILL);
3420 }
3421
3422 /*
3423 * Force the current process to exit with the specified signal, dumping core
3424 * if appropriate. We bypass the normal tests for masked and caught signals,
3425 * allowing unrecoverable failures to terminate the process without changing
3426 * signal state. Mark the accounting record with the signal termination.
3427 * If dumping core, save the signal number for the debugger. Calls exit and
3428 * does not return.
3429 */
3430 void
3431 sigexit(struct thread *td, int sig)
3432 {
3433 struct proc *p = td->td_proc;
3434
3435 PROC_LOCK_ASSERT(p, MA_OWNED);
3436 p->p_acflag |= AXSIG;
3437 /*
3438 * We must be single-threading to generate a core dump. This
3439 * ensures that the registers in the core file are up-to-date.
3440 * Also, the ELF dump handler assumes that the thread list doesn't
3441 * change out from under it.
3442 *
3443 * XXX If another thread attempts to single-thread before us
3444 * (e.g. via fork()), we won't get a dump at all.
3445 */
3446 if ((sigprop(sig) & SIGPROP_CORE) &&
3447 thread_single(p, SINGLE_NO_EXIT) == 0) {
3448 p->p_sig = sig;
3449 /*
3450 * Log signals which would cause core dumps
3451 * (Log as LOG_INFO to appease those who don't want
3452 * these messages.)
3453 * XXX : Todo, as well as euid, write out ruid too
3454 * Note that coredump() drops proc lock.
3455 */
3456 if (coredump(td) == 0)
3457 sig |= WCOREFLAG;
3458 if (kern_logsigexit)
3459 log(LOG_INFO,
3460 "pid %d (%s), jid %d, uid %d: exited on "
3461 "signal %d%s\n", p->p_pid, p->p_comm,
3462 p->p_ucred->cr_prison->pr_id,
3463 td->td_ucred->cr_uid,
3464 sig &~ WCOREFLAG,
3465 sig & WCOREFLAG ? " (core dumped)" : "");
3466 } else
3467 PROC_UNLOCK(p);
3468 exit1(td, 0, sig);
3469 /* NOTREACHED */
3470 }
3471
3472 /*
3473 * Send queued SIGCHLD to parent when child process's state
3474 * is changed.
3475 */
3476 static void
3477 sigparent(struct proc *p, int reason, int status)
3478 {
3479 PROC_LOCK_ASSERT(p, MA_OWNED);
3480 PROC_LOCK_ASSERT(p->p_pptr, MA_OWNED);
3481
3482 if (p->p_ksi != NULL) {
3483 p->p_ksi->ksi_signo = SIGCHLD;
3484 p->p_ksi->ksi_code = reason;
3485 p->p_ksi->ksi_status = status;
3486 p->p_ksi->ksi_pid = p->p_pid;
3487 p->p_ksi->ksi_uid = p->p_ucred->cr_ruid;
3488 if (KSI_ONQ(p->p_ksi))
3489 return;
3490 }
3491 pksignal(p->p_pptr, SIGCHLD, p->p_ksi);
3492 }
3493
3494 static void
3495 childproc_jobstate(struct proc *p, int reason, int sig)
3496 {
3497 struct sigacts *ps;
3498
3499 PROC_LOCK_ASSERT(p, MA_OWNED);
3500 PROC_LOCK_ASSERT(p->p_pptr, MA_OWNED);
3501
3502 /*
3503 * Wake up parent sleeping in kern_wait(), also send
3504 * SIGCHLD to parent, but SIGCHLD does not guarantee
3505 * that parent will awake, because parent may masked
3506 * the signal.
3507 */
3508 p->p_pptr->p_flag |= P_STATCHILD;
3509 wakeup(p->p_pptr);
3510
3511 ps = p->p_pptr->p_sigacts;
3512 mtx_lock(&ps->ps_mtx);
3513 if ((ps->ps_flag & PS_NOCLDSTOP) == 0) {
3514 mtx_unlock(&ps->ps_mtx);
3515 sigparent(p, reason, sig);
3516 } else
3517 mtx_unlock(&ps->ps_mtx);
3518 }
3519
3520 void
3521 childproc_stopped(struct proc *p, int reason)
3522 {
3523
3524 childproc_jobstate(p, reason, p->p_xsig);
3525 }
3526
3527 void
3528 childproc_continued(struct proc *p)
3529 {
3530 childproc_jobstate(p, CLD_CONTINUED, SIGCONT);
3531 }
3532
3533 void
3534 childproc_exited(struct proc *p)
3535 {
3536 int reason, status;
3537
3538 if (WCOREDUMP(p->p_xsig)) {
3539 reason = CLD_DUMPED;
3540 status = WTERMSIG(p->p_xsig);
3541 } else if (WIFSIGNALED(p->p_xsig)) {
3542 reason = CLD_KILLED;
3543 status = WTERMSIG(p->p_xsig);
3544 } else {
3545 reason = CLD_EXITED;
3546 status = p->p_xexit;
3547 }
3548 /*
3549 * XXX avoid calling wakeup(p->p_pptr), the work is
3550 * done in exit1().
3551 */
3552 sigparent(p, reason, status);
3553 }
3554
3555 #define MAX_NUM_CORE_FILES 100000
3556 #ifndef NUM_CORE_FILES
3557 #define NUM_CORE_FILES 5
3558 #endif
3559 CTASSERT(NUM_CORE_FILES >= 0 && NUM_CORE_FILES <= MAX_NUM_CORE_FILES);
3560 static int num_cores = NUM_CORE_FILES;
3561
3562 static int
3563 sysctl_debug_num_cores_check (SYSCTL_HANDLER_ARGS)
3564 {
3565 int error;
3566 int new_val;
3567
3568 new_val = num_cores;
3569 error = sysctl_handle_int(oidp, &new_val, 0, req);
3570 if (error != 0 || req->newptr == NULL)
3571 return (error);
3572 if (new_val > MAX_NUM_CORE_FILES)
3573 new_val = MAX_NUM_CORE_FILES;
3574 if (new_val < 0)
3575 new_val = 0;
3576 num_cores = new_val;
3577 return (0);
3578 }
3579 SYSCTL_PROC(_debug, OID_AUTO, ncores,
3580 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 0, sizeof(int),
3581 sysctl_debug_num_cores_check, "I",
3582 "Maximum number of generated process corefiles while using index format");
3583
3584 #define GZIP_SUFFIX ".gz"
3585 #define ZSTD_SUFFIX ".zst"
3586
3587 int compress_user_cores = 0;
3588
3589 static int
3590 sysctl_compress_user_cores(SYSCTL_HANDLER_ARGS)
3591 {
3592 int error, val;
3593
3594 val = compress_user_cores;
3595 error = sysctl_handle_int(oidp, &val, 0, req);
3596 if (error != 0 || req->newptr == NULL)
3597 return (error);
3598 if (val != 0 && !compressor_avail(val))
3599 return (EINVAL);
3600 compress_user_cores = val;
3601 return (error);
3602 }
3603 SYSCTL_PROC(_kern, OID_AUTO, compress_user_cores,
3604 CTLTYPE_INT | CTLFLAG_RWTUN | CTLFLAG_NEEDGIANT, 0, sizeof(int),
3605 sysctl_compress_user_cores, "I",
3606 "Enable compression of user corefiles ("
3607 __XSTRING(COMPRESS_GZIP) " = gzip, "
3608 __XSTRING(COMPRESS_ZSTD) " = zstd)");
3609
3610 int compress_user_cores_level = 6;
3611 SYSCTL_INT(_kern, OID_AUTO, compress_user_cores_level, CTLFLAG_RWTUN,
3612 &compress_user_cores_level, 0,
3613 "Corefile compression level");
3614
3615 /*
3616 * Protect the access to corefilename[] by allproc_lock.
3617 */
3618 #define corefilename_lock allproc_lock
3619
3620 static char corefilename[MAXPATHLEN] = {"%N.core"};
3621 TUNABLE_STR("kern.corefile", corefilename, sizeof(corefilename));
3622
3623 static int
3624 sysctl_kern_corefile(SYSCTL_HANDLER_ARGS)
3625 {
3626 int error;
3627
3628 sx_xlock(&corefilename_lock);
3629 error = sysctl_handle_string(oidp, corefilename, sizeof(corefilename),
3630 req);
3631 sx_xunlock(&corefilename_lock);
3632
3633 return (error);
3634 }
3635 SYSCTL_PROC(_kern, OID_AUTO, corefile, CTLTYPE_STRING | CTLFLAG_RW |
3636 CTLFLAG_MPSAFE, 0, 0, sysctl_kern_corefile, "A",
3637 "Process corefile name format string");
3638
3639 static void
3640 vnode_close_locked(struct thread *td, struct vnode *vp)
3641 {
3642
3643 VOP_UNLOCK(vp);
3644 vn_close(vp, FWRITE, td->td_ucred, td);
3645 }
3646
3647 /*
3648 * If the core format has a %I in it, then we need to check
3649 * for existing corefiles before defining a name.
3650 * To do this we iterate over 0..ncores to find a
3651 * non-existing core file name to use. If all core files are
3652 * already used we choose the oldest one.
3653 */
3654 static int
3655 corefile_open_last(struct thread *td, char *name, int indexpos,
3656 int indexlen, int ncores, struct vnode **vpp)
3657 {
3658 struct vnode *oldvp, *nextvp, *vp;
3659 struct vattr vattr;
3660 struct nameidata nd;
3661 int error, i, flags, oflags, cmode;
3662 char ch;
3663 struct timespec lasttime;
3664
3665 nextvp = oldvp = NULL;
3666 cmode = S_IRUSR | S_IWUSR;
3667 oflags = VN_OPEN_NOAUDIT | VN_OPEN_NAMECACHE |
3668 (capmode_coredump ? VN_OPEN_NOCAPCHECK : 0);
3669
3670 for (i = 0; i < ncores; i++) {
3671 flags = O_CREAT | FWRITE | O_NOFOLLOW;
3672
3673 ch = name[indexpos + indexlen];
3674 (void)snprintf(name + indexpos, indexlen + 1, "%.*u", indexlen,
3675 i);
3676 name[indexpos + indexlen] = ch;
3677
3678 NDINIT(&nd, LOOKUP, NOFOLLOW, UIO_SYSSPACE, name, td);
3679 error = vn_open_cred(&nd, &flags, cmode, oflags, td->td_ucred,
3680 NULL);
3681 if (error != 0)
3682 break;
3683
3684 vp = nd.ni_vp;
3685 NDFREE(&nd, NDF_ONLY_PNBUF);
3686 if ((flags & O_CREAT) == O_CREAT) {
3687 nextvp = vp;
3688 break;
3689 }
3690
3691 error = VOP_GETATTR(vp, &vattr, td->td_ucred);
3692 if (error != 0) {
3693 vnode_close_locked(td, vp);
3694 break;
3695 }
3696
3697 if (oldvp == NULL ||
3698 lasttime.tv_sec > vattr.va_mtime.tv_sec ||
3699 (lasttime.tv_sec == vattr.va_mtime.tv_sec &&
3700 lasttime.tv_nsec >= vattr.va_mtime.tv_nsec)) {
3701 if (oldvp != NULL)
3702 vn_close(oldvp, FWRITE, td->td_ucred, td);
3703 oldvp = vp;
3704 VOP_UNLOCK(oldvp);
3705 lasttime = vattr.va_mtime;
3706 } else {
3707 vnode_close_locked(td, vp);
3708 }
3709 }
3710
3711 if (oldvp != NULL) {
3712 if (nextvp == NULL) {
3713 if ((td->td_proc->p_flag & P_SUGID) != 0) {
3714 error = EFAULT;
3715 vn_close(oldvp, FWRITE, td->td_ucred, td);
3716 } else {
3717 nextvp = oldvp;
3718 error = vn_lock(nextvp, LK_EXCLUSIVE);
3719 if (error != 0) {
3720 vn_close(nextvp, FWRITE, td->td_ucred,
3721 td);
3722 nextvp = NULL;
3723 }
3724 }
3725 } else {
3726 vn_close(oldvp, FWRITE, td->td_ucred, td);
3727 }
3728 }
3729 if (error != 0) {
3730 if (nextvp != NULL)
3731 vnode_close_locked(td, oldvp);
3732 } else {
3733 *vpp = nextvp;
3734 }
3735
3736 return (error);
3737 }
3738
3739 /*
3740 * corefile_open(comm, uid, pid, td, compress, vpp, namep)
3741 * Expand the name described in corefilename, using name, uid, and pid
3742 * and open/create core file.
3743 * corefilename is a printf-like string, with three format specifiers:
3744 * %N name of process ("name")
3745 * %P process id (pid)
3746 * %U user id (uid)
3747 * For example, "%N.core" is the default; they can be disabled completely
3748 * by using "/dev/null", or all core files can be stored in "/cores/%U/%N-%P".
3749 * This is controlled by the sysctl variable kern.corefile (see above).
3750 */
3751 static int
3752 corefile_open(const char *comm, uid_t uid, pid_t pid, struct thread *td,
3753 int compress, int signum, struct vnode **vpp, char **namep)
3754 {
3755 struct sbuf sb;
3756 struct nameidata nd;
3757 const char *format;
3758 char *hostname, *name;
3759 int cmode, error, flags, i, indexpos, indexlen, oflags, ncores;
3760
3761 hostname = NULL;
3762 format = corefilename;
3763 name = malloc(MAXPATHLEN, M_TEMP, M_WAITOK | M_ZERO);
3764 indexlen = 0;
3765 indexpos = -1;
3766 ncores = num_cores;
3767 (void)sbuf_new(&sb, name, MAXPATHLEN, SBUF_FIXEDLEN);
3768 sx_slock(&corefilename_lock);
3769 for (i = 0; format[i] != '\0'; i++) {
3770 switch (format[i]) {
3771 case '%': /* Format character */
3772 i++;
3773 switch (format[i]) {
3774 case '%':
3775 sbuf_putc(&sb, '%');
3776 break;
3777 case 'H': /* hostname */
3778 if (hostname == NULL) {
3779 hostname = malloc(MAXHOSTNAMELEN,
3780 M_TEMP, M_WAITOK);
3781 }
3782 getcredhostname(td->td_ucred, hostname,
3783 MAXHOSTNAMELEN);
3784 sbuf_printf(&sb, "%s", hostname);
3785 break;
3786 case 'I': /* autoincrementing index */
3787 if (indexpos != -1) {
3788 sbuf_printf(&sb, "%%I");
3789 break;
3790 }
3791
3792 indexpos = sbuf_len(&sb);
3793 sbuf_printf(&sb, "%u", ncores - 1);
3794 indexlen = sbuf_len(&sb) - indexpos;
3795 break;
3796 case 'N': /* process name */
3797 sbuf_printf(&sb, "%s", comm);
3798 break;
3799 case 'P': /* process id */
3800 sbuf_printf(&sb, "%u", pid);
3801 break;
3802 case 'S': /* signal number */
3803 sbuf_printf(&sb, "%i", signum);
3804 break;
3805 case 'U': /* user id */
3806 sbuf_printf(&sb, "%u", uid);
3807 break;
3808 default:
3809 log(LOG_ERR,
3810 "Unknown format character %c in "
3811 "corename `%s'\n", format[i], format);
3812 break;
3813 }
3814 break;
3815 default:
3816 sbuf_putc(&sb, format[i]);
3817 break;
3818 }
3819 }
3820 sx_sunlock(&corefilename_lock);
3821 free(hostname, M_TEMP);
3822 if (compress == COMPRESS_GZIP)
3823 sbuf_printf(&sb, GZIP_SUFFIX);
3824 else if (compress == COMPRESS_ZSTD)
3825 sbuf_printf(&sb, ZSTD_SUFFIX);
3826 if (sbuf_error(&sb) != 0) {
3827 log(LOG_ERR, "pid %ld (%s), uid (%lu): corename is too "
3828 "long\n", (long)pid, comm, (u_long)uid);
3829 sbuf_delete(&sb);
3830 free(name, M_TEMP);
3831 return (ENOMEM);
3832 }
3833 sbuf_finish(&sb);
3834 sbuf_delete(&sb);
3835
3836 if (indexpos != -1) {
3837 error = corefile_open_last(td, name, indexpos, indexlen, ncores,
3838 vpp);
3839 if (error != 0) {
3840 log(LOG_ERR,
3841 "pid %d (%s), uid (%u): Path `%s' failed "
3842 "on initial open test, error = %d\n",
3843 pid, comm, uid, name, error);
3844 }
3845 } else {
3846 cmode = S_IRUSR | S_IWUSR;
3847 oflags = VN_OPEN_NOAUDIT | VN_OPEN_NAMECACHE |
3848 (capmode_coredump ? VN_OPEN_NOCAPCHECK : 0);
3849 flags = O_CREAT | FWRITE | O_NOFOLLOW;
3850 if ((td->td_proc->p_flag & P_SUGID) != 0)
3851 flags |= O_EXCL;
3852
3853 NDINIT(&nd, LOOKUP, NOFOLLOW, UIO_SYSSPACE, name, td);
3854 error = vn_open_cred(&nd, &flags, cmode, oflags, td->td_ucred,
3855 NULL);
3856 if (error == 0) {
3857 *vpp = nd.ni_vp;
3858 NDFREE(&nd, NDF_ONLY_PNBUF);
3859 }
3860 }
3861
3862 if (error != 0) {
3863 #ifdef AUDIT
3864 audit_proc_coredump(td, name, error);
3865 #endif
3866 free(name, M_TEMP);
3867 return (error);
3868 }
3869 *namep = name;
3870 return (0);
3871 }
3872
3873 /*
3874 * Dump a process' core. The main routine does some
3875 * policy checking, and creates the name of the coredump;
3876 * then it passes on a vnode and a size limit to the process-specific
3877 * coredump routine if there is one; if there _is not_ one, it returns
3878 * ENOSYS; otherwise it returns the error from the process-specific routine.
3879 */
3880
3881 static int
3882 coredump(struct thread *td)
3883 {
3884 struct proc *p = td->td_proc;
3885 struct ucred *cred = td->td_ucred;
3886 struct vnode *vp;
3887 struct flock lf;
3888 struct vattr vattr;
3889 size_t fullpathsize;
3890 int error, error1, locked;
3891 char *name; /* name of corefile */
3892 void *rl_cookie;
3893 off_t limit;
3894 char *fullpath, *freepath = NULL;
3895 struct sbuf *sb;
3896
3897 PROC_LOCK_ASSERT(p, MA_OWNED);
3898 MPASS((p->p_flag & P_HADTHREADS) == 0 || p->p_singlethread == td);
3899
3900 if (!do_coredump || (!sugid_coredump && (p->p_flag & P_SUGID) != 0) ||
3901 (p->p_flag2 & P2_NOTRACE) != 0) {
3902 PROC_UNLOCK(p);
3903 return (EFAULT);
3904 }
3905
3906 /*
3907 * Note that the bulk of limit checking is done after
3908 * the corefile is created. The exception is if the limit
3909 * for corefiles is 0, in which case we don't bother
3910 * creating the corefile at all. This layout means that
3911 * a corefile is truncated instead of not being created,
3912 * if it is larger than the limit.
3913 */
3914 limit = (off_t)lim_cur(td, RLIMIT_CORE);
3915 if (limit == 0 || racct_get_available(p, RACCT_CORE) == 0) {
3916 PROC_UNLOCK(p);
3917 return (EFBIG);
3918 }
3919 PROC_UNLOCK(p);
3920
3921 error = corefile_open(p->p_comm, cred->cr_uid, p->p_pid, td,
3922 compress_user_cores, p->p_sig, &vp, &name);
3923 if (error != 0)
3924 return (error);
3925
3926 /*
3927 * Don't dump to non-regular files or files with links.
3928 * Do not dump into system files. Effective user must own the corefile.
3929 */
3930 if (vp->v_type != VREG || VOP_GETATTR(vp, &vattr, cred) != 0 ||
3931 vattr.va_nlink != 1 || (vp->v_vflag & VV_SYSTEM) != 0 ||
3932 vattr.va_uid != cred->cr_uid) {
3933 VOP_UNLOCK(vp);
3934 error = EFAULT;
3935 goto out;
3936 }
3937
3938 VOP_UNLOCK(vp);
3939
3940 /* Postpone other writers, including core dumps of other processes. */
3941 rl_cookie = vn_rangelock_wlock(vp, 0, OFF_MAX);
3942
3943 lf.l_whence = SEEK_SET;
3944 lf.l_start = 0;
3945 lf.l_len = 0;
3946 lf.l_type = F_WRLCK;
3947 locked = (VOP_ADVLOCK(vp, (caddr_t)p, F_SETLK, &lf, F_FLOCK) == 0);
3948
3949 VATTR_NULL(&vattr);
3950 vattr.va_size = 0;
3951 if (set_core_nodump_flag)
3952 vattr.va_flags = UF_NODUMP;
3953 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
3954 VOP_SETATTR(vp, &vattr, cred);
3955 VOP_UNLOCK(vp);
3956 PROC_LOCK(p);
3957 p->p_acflag |= ACORE;
3958 PROC_UNLOCK(p);
3959
3960 if (p->p_sysent->sv_coredump != NULL) {
3961 error = p->p_sysent->sv_coredump(td, vp, limit, 0);
3962 } else {
3963 error = ENOSYS;
3964 }
3965
3966 if (locked) {
3967 lf.l_type = F_UNLCK;
3968 VOP_ADVLOCK(vp, (caddr_t)p, F_UNLCK, &lf, F_FLOCK);
3969 }
3970 vn_rangelock_unlock(vp, rl_cookie);
3971
3972 /*
3973 * Notify the userland helper that a process triggered a core dump.
3974 * This allows the helper to run an automated debugging session.
3975 */
3976 if (error != 0 || coredump_devctl == 0)
3977 goto out;
3978 sb = sbuf_new_auto();
3979 if (vn_fullpath_global(p->p_textvp, &fullpath, &freepath) != 0)
3980 goto out2;
3981 sbuf_printf(sb, "comm=\"");
3982 devctl_safe_quote_sb(sb, fullpath);
3983 free(freepath, M_TEMP);
3984 sbuf_printf(sb, "\" core=\"");
3985
3986 /*
3987 * We can't lookup core file vp directly. When we're replacing a core, and
3988 * other random times, we flush the name cache, so it will fail. Instead,
3989 * if the path of the core is relative, add the current dir in front if it.
3990 */
3991 if (name[0] != '/') {
3992 fullpathsize = MAXPATHLEN;
3993 freepath = malloc(fullpathsize, M_TEMP, M_WAITOK);
3994 if (vn_getcwd(freepath, &fullpath, &fullpathsize) != 0) {
3995 free(freepath, M_TEMP);
3996 goto out2;
3997 }
3998 devctl_safe_quote_sb(sb, fullpath);
3999 free(freepath, M_TEMP);
4000 sbuf_putc(sb, '/');
4001 }
4002 devctl_safe_quote_sb(sb, name);
4003 sbuf_printf(sb, "\"");
4004 if (sbuf_finish(sb) == 0)
4005 devctl_notify("kernel", "signal", "coredump", sbuf_data(sb));
4006 out2:
4007 sbuf_delete(sb);
4008 out:
4009 error1 = vn_close(vp, FWRITE, cred, td);
4010 if (error == 0)
4011 error = error1;
4012 #ifdef AUDIT
4013 audit_proc_coredump(td, name, error);
4014 #endif
4015 free(name, M_TEMP);
4016 return (error);
4017 }
4018
4019 /*
4020 * Nonexistent system call-- signal process (may want to handle it). Flag
4021 * error in case process won't see signal immediately (blocked or ignored).
4022 */
4023 #ifndef _SYS_SYSPROTO_H_
4024 struct nosys_args {
4025 int dummy;
4026 };
4027 #endif
4028 /* ARGSUSED */
4029 int
4030 nosys(struct thread *td, struct nosys_args *args)
4031 {
4032 struct proc *p;
4033
4034 p = td->td_proc;
4035
4036 PROC_LOCK(p);
4037 tdsignal(td, SIGSYS);
4038 PROC_UNLOCK(p);
4039 if (kern_lognosys == 1 || kern_lognosys == 3) {
4040 uprintf("pid %d comm %s: nosys %d\n", p->p_pid, p->p_comm,
4041 td->td_sa.code);
4042 }
4043 if (kern_lognosys == 2 || kern_lognosys == 3 ||
4044 (p->p_pid == 1 && (kern_lognosys & 3) == 0)) {
4045 printf("pid %d comm %s: nosys %d\n", p->p_pid, p->p_comm,
4046 td->td_sa.code);
4047 }
4048 return (ENOSYS);
4049 }
4050
4051 /*
4052 * Send a SIGIO or SIGURG signal to a process or process group using stored
4053 * credentials rather than those of the current process.
4054 */
4055 void
4056 pgsigio(struct sigio **sigiop, int sig, int checkctty)
4057 {
4058 ksiginfo_t ksi;
4059 struct sigio *sigio;
4060
4061 ksiginfo_init(&ksi);
4062 ksi.ksi_signo = sig;
4063 ksi.ksi_code = SI_KERNEL;
4064
4065 SIGIO_LOCK();
4066 sigio = *sigiop;
4067 if (sigio == NULL) {
4068 SIGIO_UNLOCK();
4069 return;
4070 }
4071 if (sigio->sio_pgid > 0) {
4072 PROC_LOCK(sigio->sio_proc);
4073 if (CANSIGIO(sigio->sio_ucred, sigio->sio_proc->p_ucred))
4074 kern_psignal(sigio->sio_proc, sig);
4075 PROC_UNLOCK(sigio->sio_proc);
4076 } else if (sigio->sio_pgid < 0) {
4077 struct proc *p;
4078
4079 PGRP_LOCK(sigio->sio_pgrp);
4080 LIST_FOREACH(p, &sigio->sio_pgrp->pg_members, p_pglist) {
4081 PROC_LOCK(p);
4082 if (p->p_state == PRS_NORMAL &&
4083 CANSIGIO(sigio->sio_ucred, p->p_ucred) &&
4084 (checkctty == 0 || (p->p_flag & P_CONTROLT)))
4085 kern_psignal(p, sig);
4086 PROC_UNLOCK(p);
4087 }
4088 PGRP_UNLOCK(sigio->sio_pgrp);
4089 }
4090 SIGIO_UNLOCK();
4091 }
4092
4093 static int
4094 filt_sigattach(struct knote *kn)
4095 {
4096 struct proc *p = curproc;
4097
4098 kn->kn_ptr.p_proc = p;
4099 kn->kn_flags |= EV_CLEAR; /* automatically set */
4100
4101 knlist_add(p->p_klist, kn, 0);
4102
4103 return (0);
4104 }
4105
4106 static void
4107 filt_sigdetach(struct knote *kn)
4108 {
4109 struct proc *p = kn->kn_ptr.p_proc;
4110
4111 knlist_remove(p->p_klist, kn, 0);
4112 }
4113
4114 /*
4115 * signal knotes are shared with proc knotes, so we apply a mask to
4116 * the hint in order to differentiate them from process hints. This
4117 * could be avoided by using a signal-specific knote list, but probably
4118 * isn't worth the trouble.
4119 */
4120 static int
4121 filt_signal(struct knote *kn, long hint)
4122 {
4123
4124 if (hint & NOTE_SIGNAL) {
4125 hint &= ~NOTE_SIGNAL;
4126
4127 if (kn->kn_id == hint)
4128 kn->kn_data++;
4129 }
4130 return (kn->kn_data != 0);
4131 }
4132
4133 struct sigacts *
4134 sigacts_alloc(void)
4135 {
4136 struct sigacts *ps;
4137
4138 ps = malloc(sizeof(struct sigacts), M_SUBPROC, M_WAITOK | M_ZERO);
4139 refcount_init(&ps->ps_refcnt, 1);
4140 mtx_init(&ps->ps_mtx, "sigacts", NULL, MTX_DEF);
4141 return (ps);
4142 }
4143
4144 void
4145 sigacts_free(struct sigacts *ps)
4146 {
4147
4148 if (refcount_release(&ps->ps_refcnt) == 0)
4149 return;
4150 mtx_destroy(&ps->ps_mtx);
4151 free(ps, M_SUBPROC);
4152 }
4153
4154 struct sigacts *
4155 sigacts_hold(struct sigacts *ps)
4156 {
4157
4158 refcount_acquire(&ps->ps_refcnt);
4159 return (ps);
4160 }
4161
4162 void
4163 sigacts_copy(struct sigacts *dest, struct sigacts *src)
4164 {
4165
4166 KASSERT(dest->ps_refcnt == 1, ("sigacts_copy to shared dest"));
4167 mtx_lock(&src->ps_mtx);
4168 bcopy(src, dest, offsetof(struct sigacts, ps_refcnt));
4169 mtx_unlock(&src->ps_mtx);
4170 }
4171
4172 int
4173 sigacts_shared(struct sigacts *ps)
4174 {
4175
4176 return (ps->ps_refcnt > 1);
4177 }
4178
4179 void
4180 sig_drop_caught(struct proc *p)
4181 {
4182 int sig;
4183 struct sigacts *ps;
4184
4185 ps = p->p_sigacts;
4186 PROC_LOCK_ASSERT(p, MA_OWNED);
4187 mtx_assert(&ps->ps_mtx, MA_OWNED);
4188 SIG_FOREACH(sig, &ps->ps_sigcatch) {
4189 sigdflt(ps, sig);
4190 if ((sigprop(sig) & SIGPROP_IGNORE) != 0)
4191 sigqueue_delete_proc(p, sig);
4192 }
4193 }
4194
4195 static void
4196 sigfastblock_failed(struct thread *td, bool sendsig, bool write)
4197 {
4198 ksiginfo_t ksi;
4199
4200 /*
4201 * Prevent further fetches and SIGSEGVs, allowing thread to
4202 * issue syscalls despite corruption.
4203 */
4204 sigfastblock_clear(td);
4205
4206 if (!sendsig)
4207 return;
4208 ksiginfo_init_trap(&ksi);
4209 ksi.ksi_signo = SIGSEGV;
4210 ksi.ksi_code = write ? SEGV_ACCERR : SEGV_MAPERR;
4211 ksi.ksi_addr = td->td_sigblock_ptr;
4212 trapsignal(td, &ksi);
4213 }
4214
4215 static bool
4216 sigfastblock_fetch_sig(struct thread *td, bool sendsig, uint32_t *valp)
4217 {
4218 uint32_t res;
4219
4220 if ((td->td_pflags & TDP_SIGFASTBLOCK) == 0)
4221 return (true);
4222 if (fueword32((void *)td->td_sigblock_ptr, &res) == -1) {
4223 sigfastblock_failed(td, sendsig, false);
4224 return (false);
4225 }
4226 *valp = res;
4227 td->td_sigblock_val = res & ~SIGFASTBLOCK_FLAGS;
4228 return (true);
4229 }
4230
4231 static void
4232 sigfastblock_resched(struct thread *td, bool resched)
4233 {
4234 struct proc *p;
4235
4236 if (resched) {
4237 p = td->td_proc;
4238 PROC_LOCK(p);
4239 reschedule_signals(p, td->td_sigmask, 0);
4240 PROC_UNLOCK(p);
4241 }
4242 thread_lock(td);
4243 td->td_flags |= TDF_ASTPENDING | TDF_NEEDSIGCHK;
4244 thread_unlock(td);
4245 }
4246
4247 int
4248 sys_sigfastblock(struct thread *td, struct sigfastblock_args *uap)
4249 {
4250 struct proc *p;
4251 int error, res;
4252 uint32_t oldval;
4253
4254 error = 0;
4255 p = td->td_proc;
4256 switch (uap->cmd) {
4257 case SIGFASTBLOCK_SETPTR:
4258 if ((td->td_pflags & TDP_SIGFASTBLOCK) != 0) {
4259 error = EBUSY;
4260 break;
4261 }
4262 if (((uintptr_t)(uap->ptr) & (sizeof(uint32_t) - 1)) != 0) {
4263 error = EINVAL;
4264 break;
4265 }
4266 td->td_pflags |= TDP_SIGFASTBLOCK;
4267 td->td_sigblock_ptr = uap->ptr;
4268 break;
4269
4270 case SIGFASTBLOCK_UNBLOCK:
4271 if ((td->td_pflags & TDP_SIGFASTBLOCK) == 0) {
4272 error = EINVAL;
4273 break;
4274 }
4275
4276 for (;;) {
4277 res = casueword32(td->td_sigblock_ptr,
4278 SIGFASTBLOCK_PEND, &oldval, 0);
4279 if (res == -1) {
4280 error = EFAULT;
4281 sigfastblock_failed(td, false, true);
4282 break;
4283 }
4284 if (res == 0)
4285 break;
4286 MPASS(res == 1);
4287 if (oldval != SIGFASTBLOCK_PEND) {
4288 error = EBUSY;
4289 break;
4290 }
4291 error = thread_check_susp(td, false);
4292 if (error != 0)
4293 break;
4294 }
4295 if (error != 0)
4296 break;
4297
4298 /*
4299 * td_sigblock_val is cleared there, but not on a
4300 * syscall exit. The end effect is that a single
4301 * interruptible sleep, while user sigblock word is
4302 * set, might return EINTR or ERESTART to usermode
4303 * without delivering signal. All further sleeps,
4304 * until userspace clears the word and does
4305 * sigfastblock(UNBLOCK), observe current word and no
4306 * longer get interrupted. It is slight
4307 * non-conformance, with alternative to have read the
4308 * sigblock word on each syscall entry.
4309 */
4310 td->td_sigblock_val = 0;
4311
4312 /*
4313 * Rely on normal ast mechanism to deliver pending
4314 * signals to current thread. But notify others about
4315 * fake unblock.
4316 */
4317 sigfastblock_resched(td, error == 0 && p->p_numthreads != 1);
4318
4319 break;
4320
4321 case SIGFASTBLOCK_UNSETPTR:
4322 if ((td->td_pflags & TDP_SIGFASTBLOCK) == 0) {
4323 error = EINVAL;
4324 break;
4325 }
4326 if (!sigfastblock_fetch_sig(td, false, &oldval)) {
4327 error = EFAULT;
4328 break;
4329 }
4330 if (oldval != 0 && oldval != SIGFASTBLOCK_PEND) {
4331 error = EBUSY;
4332 break;
4333 }
4334 sigfastblock_clear(td);
4335 break;
4336
4337 default:
4338 error = EINVAL;
4339 break;
4340 }
4341 return (error);
4342 }
4343
4344 void
4345 sigfastblock_clear(struct thread *td)
4346 {
4347 bool resched;
4348
4349 if ((td->td_pflags & TDP_SIGFASTBLOCK) == 0)
4350 return;
4351 td->td_sigblock_val = 0;
4352 resched = (td->td_pflags & TDP_SIGFASTPENDING) != 0 ||
4353 SIGPENDING(td);
4354 td->td_pflags &= ~(TDP_SIGFASTBLOCK | TDP_SIGFASTPENDING);
4355 sigfastblock_resched(td, resched);
4356 }
4357
4358 void
4359 sigfastblock_fetch(struct thread *td)
4360 {
4361 uint32_t val;
4362
4363 (void)sigfastblock_fetch_sig(td, true, &val);
4364 }
4365
4366 static void
4367 sigfastblock_setpend1(struct thread *td)
4368 {
4369 int res;
4370 uint32_t oldval;
4371
4372 if ((td->td_pflags & TDP_SIGFASTPENDING) == 0)
4373 return;
4374 res = fueword32((void *)td->td_sigblock_ptr, &oldval);
4375 if (res == -1) {
4376 sigfastblock_failed(td, true, false);
4377 return;
4378 }
4379 for (;;) {
4380 res = casueword32(td->td_sigblock_ptr, oldval, &oldval,
4381 oldval | SIGFASTBLOCK_PEND);
4382 if (res == -1) {
4383 sigfastblock_failed(td, true, true);
4384 return;
4385 }
4386 if (res == 0) {
4387 td->td_sigblock_val = oldval & ~SIGFASTBLOCK_FLAGS;
4388 td->td_pflags &= ~TDP_SIGFASTPENDING;
4389 break;
4390 }
4391 MPASS(res == 1);
4392 if (thread_check_susp(td, false) != 0)
4393 break;
4394 }
4395 }
4396
4397 void
4398 sigfastblock_setpend(struct thread *td, bool resched)
4399 {
4400 struct proc *p;
4401
4402 sigfastblock_setpend1(td);
4403 if (resched) {
4404 p = td->td_proc;
4405 PROC_LOCK(p);
4406 reschedule_signals(p, fastblock_mask, SIGPROCMASK_FASTBLK);
4407 PROC_UNLOCK(p);
4408 }
4409 }
Cache object: ab60c144317026c96ac87748c30da1b8
|