FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_sig.c
1 /*
2 * Copyright (c) 1982, 1986, 1989, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the University of
21 * California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 *
38 * @(#)kern_sig.c 8.7 (Berkeley) 4/18/94
39 * $FreeBSD: releng/5.0/sys/kern/kern_sig.c 118752 2003-08-10 23:17:49Z nectar $
40 */
41
42 #include "opt_compat.h"
43 #include "opt_ktrace.h"
44
45 #include <sys/param.h>
46 #include <sys/kernel.h>
47 #include <sys/sysproto.h>
48 #include <sys/systm.h>
49 #include <sys/signalvar.h>
50 #include <sys/namei.h>
51 #include <sys/vnode.h>
52 #include <sys/event.h>
53 #include <sys/proc.h>
54 #include <sys/pioctl.h>
55 #include <sys/acct.h>
56 #include <sys/fcntl.h>
57 #include <sys/condvar.h>
58 #include <sys/lock.h>
59 #include <sys/mutex.h>
60 #include <sys/wait.h>
61 #include <sys/ktr.h>
62 #include <sys/ktrace.h>
63 #include <sys/resourcevar.h>
64 #include <sys/smp.h>
65 #include <sys/stat.h>
66 #include <sys/sx.h>
67 #include <sys/syscallsubr.h>
68 #include <sys/syslog.h>
69 #include <sys/sysent.h>
70 #include <sys/sysctl.h>
71 #include <sys/malloc.h>
72 #include <sys/unistd.h>
73
74 #include <machine/cpu.h>
75
76 #if !defined(COMPAT_FREEBSD4) && !defined(NO_COMPAT_FREEBSD4)
77 #error "You *really* want COMPAT_FREEBSD4 on -current for a while"
78 #endif
79 #if defined (__alpha__) && !defined(COMPAT_43)
80 #error "You *really* need COMPAT_43 on the alpha for longjmp(3)"
81 #endif
82
83 #define ONSIG 32 /* NSIG for osig* syscalls. XXX. */
84
85 static int coredump(struct thread *);
86 static int do_sigprocmask(struct proc *p, int how, sigset_t *set,
87 sigset_t *oset, int old);
88 static char *expand_name(const char *, uid_t, pid_t);
89 static int killpg1(struct thread *td, int sig, int pgid, int all);
90 static int sig_ffs(sigset_t *set);
91 static int sigprop(int sig);
92 static void stop(struct proc *);
93 static void tdsignal(struct thread *td, int sig, sig_t action);
94 static int filt_sigattach(struct knote *kn);
95 static void filt_sigdetach(struct knote *kn);
96 static int filt_signal(struct knote *kn, long hint);
97
98 struct filterops sig_filtops =
99 { 0, filt_sigattach, filt_sigdetach, filt_signal };
100
101 static int kern_logsigexit = 1;
102 SYSCTL_INT(_kern, KERN_LOGSIGEXIT, logsigexit, CTLFLAG_RW,
103 &kern_logsigexit, 0,
104 "Log processes quitting on abnormal signals to syslog(3)");
105
106 /*
107 * Policy -- Can ucred cr1 send SIGIO to process cr2?
108 * Should use cr_cansignal() once cr_cansignal() allows SIGIO and SIGURG
109 * in the right situations.
110 */
111 #define CANSIGIO(cr1, cr2) \
112 ((cr1)->cr_uid == 0 || \
113 (cr1)->cr_ruid == (cr2)->cr_ruid || \
114 (cr1)->cr_uid == (cr2)->cr_ruid || \
115 (cr1)->cr_ruid == (cr2)->cr_uid || \
116 (cr1)->cr_uid == (cr2)->cr_uid)
117
118 int sugid_coredump;
119 SYSCTL_INT(_kern, OID_AUTO, sugid_coredump, CTLFLAG_RW,
120 &sugid_coredump, 0, "Enable coredumping set user/group ID processes");
121
122 static int do_coredump = 1;
123 SYSCTL_INT(_kern, OID_AUTO, coredump, CTLFLAG_RW,
124 &do_coredump, 0, "Enable/Disable coredumps");
125
126 /*
127 * Signal properties and actions.
128 * The array below categorizes the signals and their default actions
129 * according to the following properties:
130 */
131 #define SA_KILL 0x01 /* terminates process by default */
132 #define SA_CORE 0x02 /* ditto and coredumps */
133 #define SA_STOP 0x04 /* suspend process */
134 #define SA_TTYSTOP 0x08 /* ditto, from tty */
135 #define SA_IGNORE 0x10 /* ignore by default */
136 #define SA_CONT 0x20 /* continue if suspended */
137 #define SA_CANTMASK 0x40 /* non-maskable, catchable */
138
139 static int sigproptbl[NSIG] = {
140 SA_KILL, /* SIGHUP */
141 SA_KILL, /* SIGINT */
142 SA_KILL|SA_CORE, /* SIGQUIT */
143 SA_KILL|SA_CORE, /* SIGILL */
144 SA_KILL|SA_CORE, /* SIGTRAP */
145 SA_KILL|SA_CORE, /* SIGABRT */
146 SA_KILL|SA_CORE, /* SIGEMT */
147 SA_KILL|SA_CORE, /* SIGFPE */
148 SA_KILL, /* SIGKILL */
149 SA_KILL|SA_CORE, /* SIGBUS */
150 SA_KILL|SA_CORE, /* SIGSEGV */
151 SA_KILL|SA_CORE, /* SIGSYS */
152 SA_KILL, /* SIGPIPE */
153 SA_KILL, /* SIGALRM */
154 SA_KILL, /* SIGTERM */
155 SA_IGNORE, /* SIGURG */
156 SA_STOP, /* SIGSTOP */
157 SA_STOP|SA_TTYSTOP, /* SIGTSTP */
158 SA_IGNORE|SA_CONT, /* SIGCONT */
159 SA_IGNORE, /* SIGCHLD */
160 SA_STOP|SA_TTYSTOP, /* SIGTTIN */
161 SA_STOP|SA_TTYSTOP, /* SIGTTOU */
162 SA_IGNORE, /* SIGIO */
163 SA_KILL, /* SIGXCPU */
164 SA_KILL, /* SIGXFSZ */
165 SA_KILL, /* SIGVTALRM */
166 SA_KILL, /* SIGPROF */
167 SA_IGNORE, /* SIGWINCH */
168 SA_IGNORE, /* SIGINFO */
169 SA_KILL, /* SIGUSR1 */
170 SA_KILL, /* SIGUSR2 */
171 };
172
173 /*
174 * Determine signal that should be delivered to process p, the current
175 * process, 0 if none. If there is a pending stop signal with default
176 * action, the process stops in issignal().
177 * XXXKSE the check for a pending stop is not done under KSE
178 *
179 * MP SAFE.
180 */
181 int
182 cursig(struct thread *td)
183 {
184 struct proc *p = td->td_proc;
185
186 PROC_LOCK_ASSERT(p, MA_OWNED);
187 mtx_assert(&sched_lock, MA_NOTOWNED);
188 return (SIGPENDING(p) ? issignal(td) : 0);
189 }
190
191 /*
192 * Arrange for ast() to handle unmasked pending signals on return to user
193 * mode. This must be called whenever a signal is added to p_siglist or
194 * unmasked in p_sigmask.
195 */
196 void
197 signotify(struct proc *p)
198 {
199 struct kse *ke;
200 struct ksegrp *kg;
201
202 PROC_LOCK_ASSERT(p, MA_OWNED);
203 mtx_lock_spin(&sched_lock);
204 if (SIGPENDING(p)) {
205 p->p_sflag |= PS_NEEDSIGCHK;
206 /* XXXKSE for now punish all KSEs */
207 FOREACH_KSEGRP_IN_PROC(p, kg) {
208 FOREACH_KSE_IN_GROUP(kg, ke) {
209 ke->ke_flags |= KEF_ASTPENDING;
210 }
211 }
212 }
213 mtx_unlock_spin(&sched_lock);
214 }
215
216 static __inline int
217 sigprop(int sig)
218 {
219
220 if (sig > 0 && sig < NSIG)
221 return (sigproptbl[_SIG_IDX(sig)]);
222 return (0);
223 }
224
225 static __inline int
226 sig_ffs(sigset_t *set)
227 {
228 int i;
229
230 for (i = 0; i < _SIG_WORDS; i++)
231 if (set->__bits[i])
232 return (ffs(set->__bits[i]) + (i * 32));
233 return (0);
234 }
235
236 /*
237 * kern_sigaction
238 * sigaction
239 * freebsd4_sigaction
240 * osigaction
241 */
242 int
243 kern_sigaction(td, sig, act, oact, flags)
244 struct thread *td;
245 register int sig;
246 struct sigaction *act, *oact;
247 int flags;
248 {
249 register struct sigacts *ps;
250 struct proc *p = td->td_proc;
251
252 if (!_SIG_VALID(sig))
253 return (EINVAL);
254
255 PROC_LOCK(p);
256 ps = p->p_sigacts;
257 if (oact) {
258 oact->sa_handler = ps->ps_sigact[_SIG_IDX(sig)];
259 oact->sa_mask = ps->ps_catchmask[_SIG_IDX(sig)];
260 oact->sa_flags = 0;
261 if (SIGISMEMBER(ps->ps_sigonstack, sig))
262 oact->sa_flags |= SA_ONSTACK;
263 if (!SIGISMEMBER(ps->ps_sigintr, sig))
264 oact->sa_flags |= SA_RESTART;
265 if (SIGISMEMBER(ps->ps_sigreset, sig))
266 oact->sa_flags |= SA_RESETHAND;
267 if (SIGISMEMBER(ps->ps_signodefer, sig))
268 oact->sa_flags |= SA_NODEFER;
269 if (SIGISMEMBER(ps->ps_siginfo, sig))
270 oact->sa_flags |= SA_SIGINFO;
271 if (sig == SIGCHLD && p->p_procsig->ps_flag & PS_NOCLDSTOP)
272 oact->sa_flags |= SA_NOCLDSTOP;
273 if (sig == SIGCHLD && p->p_procsig->ps_flag & PS_NOCLDWAIT)
274 oact->sa_flags |= SA_NOCLDWAIT;
275 }
276 if (act) {
277 if ((sig == SIGKILL || sig == SIGSTOP) &&
278 act->sa_handler != SIG_DFL) {
279 PROC_UNLOCK(p);
280 return (EINVAL);
281 }
282
283 /*
284 * Change setting atomically.
285 */
286
287 ps->ps_catchmask[_SIG_IDX(sig)] = act->sa_mask;
288 SIG_CANTMASK(ps->ps_catchmask[_SIG_IDX(sig)]);
289 if (act->sa_flags & SA_SIGINFO) {
290 ps->ps_sigact[_SIG_IDX(sig)] =
291 (__sighandler_t *)act->sa_sigaction;
292 SIGADDSET(ps->ps_siginfo, sig);
293 } else {
294 ps->ps_sigact[_SIG_IDX(sig)] = act->sa_handler;
295 SIGDELSET(ps->ps_siginfo, sig);
296 }
297 if (!(act->sa_flags & SA_RESTART))
298 SIGADDSET(ps->ps_sigintr, sig);
299 else
300 SIGDELSET(ps->ps_sigintr, sig);
301 if (act->sa_flags & SA_ONSTACK)
302 SIGADDSET(ps->ps_sigonstack, sig);
303 else
304 SIGDELSET(ps->ps_sigonstack, sig);
305 if (act->sa_flags & SA_RESETHAND)
306 SIGADDSET(ps->ps_sigreset, sig);
307 else
308 SIGDELSET(ps->ps_sigreset, sig);
309 if (act->sa_flags & SA_NODEFER)
310 SIGADDSET(ps->ps_signodefer, sig);
311 else
312 SIGDELSET(ps->ps_signodefer, sig);
313 #ifdef COMPAT_SUNOS
314 if (act->sa_flags & SA_USERTRAMP)
315 SIGADDSET(ps->ps_usertramp, sig);
316 else
317 SIGDELSET(ps->ps_usertramp, sig);
318 #endif
319 if (sig == SIGCHLD) {
320 if (act->sa_flags & SA_NOCLDSTOP)
321 p->p_procsig->ps_flag |= PS_NOCLDSTOP;
322 else
323 p->p_procsig->ps_flag &= ~PS_NOCLDSTOP;
324 if (act->sa_flags & SA_NOCLDWAIT) {
325 /*
326 * Paranoia: since SA_NOCLDWAIT is implemented
327 * by reparenting the dying child to PID 1 (and
328 * trust it to reap the zombie), PID 1 itself
329 * is forbidden to set SA_NOCLDWAIT.
330 */
331 if (p->p_pid == 1)
332 p->p_procsig->ps_flag &= ~PS_NOCLDWAIT;
333 else
334 p->p_procsig->ps_flag |= PS_NOCLDWAIT;
335 } else
336 p->p_procsig->ps_flag &= ~PS_NOCLDWAIT;
337 if (ps->ps_sigact[_SIG_IDX(SIGCHLD)] == SIG_IGN)
338 p->p_procsig->ps_flag |= PS_CLDSIGIGN;
339 else
340 p->p_procsig->ps_flag &= ~PS_CLDSIGIGN;
341 }
342 /*
343 * Set bit in p_sigignore for signals that are set to SIG_IGN,
344 * and for signals set to SIG_DFL where the default is to
345 * ignore. However, don't put SIGCONT in p_sigignore, as we
346 * have to restart the process.
347 */
348 if (ps->ps_sigact[_SIG_IDX(sig)] == SIG_IGN ||
349 (sigprop(sig) & SA_IGNORE &&
350 ps->ps_sigact[_SIG_IDX(sig)] == SIG_DFL)) {
351 /* never to be seen again */
352 SIGDELSET(p->p_siglist, sig);
353 if (sig != SIGCONT)
354 /* easier in psignal */
355 SIGADDSET(p->p_sigignore, sig);
356 SIGDELSET(p->p_sigcatch, sig);
357 } else {
358 SIGDELSET(p->p_sigignore, sig);
359 if (ps->ps_sigact[_SIG_IDX(sig)] == SIG_DFL)
360 SIGDELSET(p->p_sigcatch, sig);
361 else
362 SIGADDSET(p->p_sigcatch, sig);
363 }
364 #ifdef COMPAT_FREEBSD4
365 if (ps->ps_sigact[_SIG_IDX(sig)] == SIG_IGN ||
366 ps->ps_sigact[_SIG_IDX(sig)] == SIG_DFL ||
367 (flags & KSA_FREEBSD4) == 0)
368 SIGDELSET(ps->ps_freebsd4, sig);
369 else
370 SIGADDSET(ps->ps_freebsd4, sig);
371 #endif
372 #ifdef COMPAT_43
373 if (ps->ps_sigact[_SIG_IDX(sig)] == SIG_IGN ||
374 ps->ps_sigact[_SIG_IDX(sig)] == SIG_DFL ||
375 (flags & KSA_OSIGSET) == 0)
376 SIGDELSET(ps->ps_osigset, sig);
377 else
378 SIGADDSET(ps->ps_osigset, sig);
379 #endif
380 }
381 PROC_UNLOCK(p);
382 return (0);
383 }
384
385 #ifndef _SYS_SYSPROTO_H_
386 struct sigaction_args {
387 int sig;
388 struct sigaction *act;
389 struct sigaction *oact;
390 };
391 #endif
392 /*
393 * MPSAFE
394 */
395 /* ARGSUSED */
396 int
397 sigaction(td, uap)
398 struct thread *td;
399 register struct sigaction_args *uap;
400 {
401 struct sigaction act, oact;
402 register struct sigaction *actp, *oactp;
403 int error;
404
405 mtx_lock(&Giant);
406
407 actp = (uap->act != NULL) ? &act : NULL;
408 oactp = (uap->oact != NULL) ? &oact : NULL;
409 if (actp) {
410 error = copyin(uap->act, actp, sizeof(act));
411 if (error)
412 goto done2;
413 }
414 error = kern_sigaction(td, uap->sig, actp, oactp, 0);
415 if (oactp && !error) {
416 error = copyout(oactp, uap->oact, sizeof(oact));
417 }
418 done2:
419 mtx_unlock(&Giant);
420 return (error);
421 }
422
423 #ifdef COMPAT_FREEBSD4
424 #ifndef _SYS_SYSPROTO_H_
425 struct freebsd4_sigaction_args {
426 int sig;
427 struct sigaction *act;
428 struct sigaction *oact;
429 };
430 #endif
431 /*
432 * MPSAFE
433 */
434 /* ARGSUSED */
435 int
436 freebsd4_sigaction(td, uap)
437 struct thread *td;
438 register struct freebsd4_sigaction_args *uap;
439 {
440 struct sigaction act, oact;
441 register struct sigaction *actp, *oactp;
442 int error;
443
444 mtx_lock(&Giant);
445
446 actp = (uap->act != NULL) ? &act : NULL;
447 oactp = (uap->oact != NULL) ? &oact : NULL;
448 if (actp) {
449 error = copyin(uap->act, actp, sizeof(act));
450 if (error)
451 goto done2;
452 }
453 error = kern_sigaction(td, uap->sig, actp, oactp, KSA_FREEBSD4);
454 if (oactp && !error) {
455 error = copyout(oactp, uap->oact, sizeof(oact));
456 }
457 done2:
458 mtx_unlock(&Giant);
459 return (error);
460 }
461 #endif /* COMAPT_FREEBSD4 */
462
463 #ifdef COMPAT_43 /* XXX - COMPAT_FBSD3 */
464 #ifndef _SYS_SYSPROTO_H_
465 struct osigaction_args {
466 int signum;
467 struct osigaction *nsa;
468 struct osigaction *osa;
469 };
470 #endif
471 /*
472 * MPSAFE
473 */
474 /* ARGSUSED */
475 int
476 osigaction(td, uap)
477 struct thread *td;
478 register struct osigaction_args *uap;
479 {
480 struct osigaction sa;
481 struct sigaction nsa, osa;
482 register struct sigaction *nsap, *osap;
483 int error;
484
485 if (uap->signum <= 0 || uap->signum >= ONSIG)
486 return (EINVAL);
487
488 nsap = (uap->nsa != NULL) ? &nsa : NULL;
489 osap = (uap->osa != NULL) ? &osa : NULL;
490
491 mtx_lock(&Giant);
492
493 if (nsap) {
494 error = copyin(uap->nsa, &sa, sizeof(sa));
495 if (error)
496 goto done2;
497 nsap->sa_handler = sa.sa_handler;
498 nsap->sa_flags = sa.sa_flags;
499 OSIG2SIG(sa.sa_mask, nsap->sa_mask);
500 }
501 error = kern_sigaction(td, uap->signum, nsap, osap, KSA_OSIGSET);
502 if (osap && !error) {
503 sa.sa_handler = osap->sa_handler;
504 sa.sa_flags = osap->sa_flags;
505 SIG2OSIG(osap->sa_mask, sa.sa_mask);
506 error = copyout(&sa, uap->osa, sizeof(sa));
507 }
508 done2:
509 mtx_unlock(&Giant);
510 return (error);
511 }
512
513 #if !defined(__i386__) && !defined(__alpha__)
514 /* Avoid replicating the same stub everywhere */
515 int
516 osigreturn(td, uap)
517 struct thread *td;
518 struct osigreturn_args *uap;
519 {
520
521 return (nosys(td, (struct nosys_args *)uap));
522 }
523 #endif
524 #endif /* COMPAT_43 */
525
526 /*
527 * Initialize signal state for process 0;
528 * set to ignore signals that are ignored by default.
529 */
530 void
531 siginit(p)
532 struct proc *p;
533 {
534 register int i;
535
536 PROC_LOCK(p);
537 for (i = 1; i <= NSIG; i++)
538 if (sigprop(i) & SA_IGNORE && i != SIGCONT)
539 SIGADDSET(p->p_sigignore, i);
540 PROC_UNLOCK(p);
541 }
542
543 /*
544 * Reset signals for an exec of the specified process.
545 */
546 void
547 execsigs(p)
548 register struct proc *p;
549 {
550 register struct sigacts *ps;
551 register int sig;
552
553 /*
554 * Reset caught signals. Held signals remain held
555 * through p_sigmask (unless they were caught,
556 * and are now ignored by default).
557 */
558 PROC_LOCK_ASSERT(p, MA_OWNED);
559 ps = p->p_sigacts;
560 while (SIGNOTEMPTY(p->p_sigcatch)) {
561 sig = sig_ffs(&p->p_sigcatch);
562 SIGDELSET(p->p_sigcatch, sig);
563 if (sigprop(sig) & SA_IGNORE) {
564 if (sig != SIGCONT)
565 SIGADDSET(p->p_sigignore, sig);
566 SIGDELSET(p->p_siglist, sig);
567 }
568 ps->ps_sigact[_SIG_IDX(sig)] = SIG_DFL;
569 }
570 /*
571 * Reset stack state to the user stack.
572 * Clear set of signals caught on the signal stack.
573 */
574 p->p_sigstk.ss_flags = SS_DISABLE;
575 p->p_sigstk.ss_size = 0;
576 p->p_sigstk.ss_sp = 0;
577 p->p_flag &= ~P_ALTSTACK;
578 /*
579 * Reset no zombies if child dies flag as Solaris does.
580 */
581 p->p_procsig->ps_flag &= ~(PS_NOCLDWAIT | PS_CLDSIGIGN);
582 if (ps->ps_sigact[_SIG_IDX(SIGCHLD)] == SIG_IGN)
583 ps->ps_sigact[_SIG_IDX(SIGCHLD)] = SIG_DFL;
584 }
585
586 /*
587 * do_sigprocmask()
588 *
589 * Manipulate signal mask.
590 */
591 static int
592 do_sigprocmask(p, how, set, oset, old)
593 struct proc *p;
594 int how;
595 sigset_t *set, *oset;
596 int old;
597 {
598 int error;
599
600 PROC_LOCK(p);
601 if (oset != NULL)
602 *oset = p->p_sigmask;
603
604 error = 0;
605 if (set != NULL) {
606 switch (how) {
607 case SIG_BLOCK:
608 SIG_CANTMASK(*set);
609 SIGSETOR(p->p_sigmask, *set);
610 break;
611 case SIG_UNBLOCK:
612 SIGSETNAND(p->p_sigmask, *set);
613 signotify(p);
614 break;
615 case SIG_SETMASK:
616 SIG_CANTMASK(*set);
617 if (old)
618 SIGSETLO(p->p_sigmask, *set);
619 else
620 p->p_sigmask = *set;
621 signotify(p);
622 break;
623 default:
624 error = EINVAL;
625 break;
626 }
627 }
628 PROC_UNLOCK(p);
629 return (error);
630 }
631
632 /*
633 * sigprocmask() - MP SAFE (XXXKSE not under KSE it isn't)
634 */
635
636 #ifndef _SYS_SYSPROTO_H_
637 struct sigprocmask_args {
638 int how;
639 const sigset_t *set;
640 sigset_t *oset;
641 };
642 #endif
643 int
644 sigprocmask(td, uap)
645 register struct thread *td;
646 struct sigprocmask_args *uap;
647 {
648 struct proc *p = td->td_proc;
649 sigset_t set, oset;
650 sigset_t *setp, *osetp;
651 int error;
652
653 setp = (uap->set != NULL) ? &set : NULL;
654 osetp = (uap->oset != NULL) ? &oset : NULL;
655 if (setp) {
656 error = copyin(uap->set, setp, sizeof(set));
657 if (error)
658 return (error);
659 }
660 error = do_sigprocmask(p, uap->how, setp, osetp, 0);
661 if (osetp && !error) {
662 error = copyout(osetp, uap->oset, sizeof(oset));
663 }
664 return (error);
665 }
666
667 #ifdef COMPAT_43 /* XXX - COMPAT_FBSD3 */
668 /*
669 * osigprocmask() - MP SAFE
670 */
671 #ifndef _SYS_SYSPROTO_H_
672 struct osigprocmask_args {
673 int how;
674 osigset_t mask;
675 };
676 #endif
677 int
678 osigprocmask(td, uap)
679 register struct thread *td;
680 struct osigprocmask_args *uap;
681 {
682 struct proc *p = td->td_proc;
683 sigset_t set, oset;
684 int error;
685
686 OSIG2SIG(uap->mask, set);
687 error = do_sigprocmask(p, uap->how, &set, &oset, 1);
688 SIG2OSIG(oset, td->td_retval[0]);
689 return (error);
690 }
691 #endif /* COMPAT_43 */
692
693 #ifndef _SYS_SYSPROTO_H_
694 struct sigpending_args {
695 sigset_t *set;
696 };
697 #endif
698 /*
699 * MPSAFE
700 */
701 /* ARGSUSED */
702 int
703 sigpending(td, uap)
704 struct thread *td;
705 struct sigpending_args *uap;
706 {
707 struct proc *p = td->td_proc;
708 sigset_t siglist;
709 int error;
710
711 mtx_lock(&Giant);
712 PROC_LOCK(p);
713 siglist = p->p_siglist;
714 PROC_UNLOCK(p);
715 mtx_unlock(&Giant);
716 error = copyout(&siglist, uap->set, sizeof(sigset_t));
717 return(error);
718 }
719
720 #ifdef COMPAT_43 /* XXX - COMPAT_FBSD3 */
721 #ifndef _SYS_SYSPROTO_H_
722 struct osigpending_args {
723 int dummy;
724 };
725 #endif
726 /*
727 * MPSAFE
728 */
729 /* ARGSUSED */
730 int
731 osigpending(td, uap)
732 struct thread *td;
733 struct osigpending_args *uap;
734 {
735 struct proc *p = td->td_proc;
736
737 mtx_lock(&Giant);
738 PROC_LOCK(p);
739 SIG2OSIG(p->p_siglist, td->td_retval[0]);
740 PROC_UNLOCK(p);
741 mtx_unlock(&Giant);
742 return (0);
743 }
744 #endif /* COMPAT_43 */
745
746 #if defined(COMPAT_43) || defined(COMPAT_SUNOS)
747 /*
748 * Generalized interface signal handler, 4.3-compatible.
749 */
750 #ifndef _SYS_SYSPROTO_H_
751 struct osigvec_args {
752 int signum;
753 struct sigvec *nsv;
754 struct sigvec *osv;
755 };
756 #endif
757 /*
758 * MPSAFE
759 */
760 /* ARGSUSED */
761 int
762 osigvec(td, uap)
763 struct thread *td;
764 register struct osigvec_args *uap;
765 {
766 struct sigvec vec;
767 struct sigaction nsa, osa;
768 register struct sigaction *nsap, *osap;
769 int error;
770
771 if (uap->signum <= 0 || uap->signum >= ONSIG)
772 return (EINVAL);
773 nsap = (uap->nsv != NULL) ? &nsa : NULL;
774 osap = (uap->osv != NULL) ? &osa : NULL;
775 if (nsap) {
776 error = copyin(uap->nsv, &vec, sizeof(vec));
777 if (error)
778 return (error);
779 nsap->sa_handler = vec.sv_handler;
780 OSIG2SIG(vec.sv_mask, nsap->sa_mask);
781 nsap->sa_flags = vec.sv_flags;
782 nsap->sa_flags ^= SA_RESTART; /* opposite of SV_INTERRUPT */
783 #ifdef COMPAT_SUNOS
784 nsap->sa_flags |= SA_USERTRAMP;
785 #endif
786 }
787 mtx_lock(&Giant);
788 error = kern_sigaction(td, uap->signum, nsap, osap, 1);
789 mtx_unlock(&Giant);
790 if (osap && !error) {
791 vec.sv_handler = osap->sa_handler;
792 SIG2OSIG(osap->sa_mask, vec.sv_mask);
793 vec.sv_flags = osap->sa_flags;
794 vec.sv_flags &= ~SA_NOCLDWAIT;
795 vec.sv_flags ^= SA_RESTART;
796 #ifdef COMPAT_SUNOS
797 vec.sv_flags &= ~SA_NOCLDSTOP;
798 #endif
799 error = copyout(&vec, uap->osv, sizeof(vec));
800 }
801 return (error);
802 }
803
804 #ifndef _SYS_SYSPROTO_H_
805 struct osigblock_args {
806 int mask;
807 };
808 #endif
809 /*
810 * MPSAFE
811 */
812 int
813 osigblock(td, uap)
814 register struct thread *td;
815 struct osigblock_args *uap;
816 {
817 struct proc *p = td->td_proc;
818 sigset_t set;
819
820 OSIG2SIG(uap->mask, set);
821 SIG_CANTMASK(set);
822 mtx_lock(&Giant);
823 PROC_LOCK(p);
824 SIG2OSIG(p->p_sigmask, td->td_retval[0]);
825 SIGSETOR(p->p_sigmask, set);
826 PROC_UNLOCK(p);
827 mtx_unlock(&Giant);
828 return (0);
829 }
830
831 #ifndef _SYS_SYSPROTO_H_
832 struct osigsetmask_args {
833 int mask;
834 };
835 #endif
836 /*
837 * MPSAFE
838 */
839 int
840 osigsetmask(td, uap)
841 struct thread *td;
842 struct osigsetmask_args *uap;
843 {
844 struct proc *p = td->td_proc;
845 sigset_t set;
846
847 OSIG2SIG(uap->mask, set);
848 SIG_CANTMASK(set);
849 mtx_lock(&Giant);
850 PROC_LOCK(p);
851 SIG2OSIG(p->p_sigmask, td->td_retval[0]);
852 SIGSETLO(p->p_sigmask, set);
853 signotify(p);
854 PROC_UNLOCK(p);
855 mtx_unlock(&Giant);
856 return (0);
857 }
858 #endif /* COMPAT_43 || COMPAT_SUNOS */
859
860 /*
861 * Suspend process until signal, providing mask to be set
862 * in the meantime. Note nonstandard calling convention:
863 * libc stub passes mask, not pointer, to save a copyin.
864 ***** XXXKSE this doesn't make sense under KSE.
865 ***** Do we suspend the thread or all threads in the process?
866 ***** How do we suspend threads running NOW on another processor?
867 */
868 #ifndef _SYS_SYSPROTO_H_
869 struct sigsuspend_args {
870 const sigset_t *sigmask;
871 };
872 #endif
873 /*
874 * MPSAFE
875 */
876 /* ARGSUSED */
877 int
878 sigsuspend(td, uap)
879 struct thread *td;
880 struct sigsuspend_args *uap;
881 {
882 sigset_t mask;
883 int error;
884
885 error = copyin(uap->sigmask, &mask, sizeof(mask));
886 if (error)
887 return (error);
888 return (kern_sigsuspend(td, mask));
889 }
890
891 int
892 kern_sigsuspend(struct thread *td, sigset_t mask)
893 {
894 struct proc *p = td->td_proc;
895 register struct sigacts *ps;
896
897 /*
898 * When returning from sigsuspend, we want
899 * the old mask to be restored after the
900 * signal handler has finished. Thus, we
901 * save it here and mark the sigacts structure
902 * to indicate this.
903 */
904 mtx_lock(&Giant);
905 PROC_LOCK(p);
906 ps = p->p_sigacts;
907 p->p_oldsigmask = p->p_sigmask;
908 p->p_flag |= P_OLDMASK;
909
910 SIG_CANTMASK(mask);
911 p->p_sigmask = mask;
912 signotify(p);
913 while (msleep(ps, &p->p_mtx, PPAUSE|PCATCH, "pause", 0) == 0)
914 /* void */;
915 PROC_UNLOCK(p);
916 mtx_unlock(&Giant);
917 /* always return EINTR rather than ERESTART... */
918 return (EINTR);
919 }
920
921 #ifdef COMPAT_43 /* XXX - COMPAT_FBSD3 */
922 #ifndef _SYS_SYSPROTO_H_
923 struct osigsuspend_args {
924 osigset_t mask;
925 };
926 #endif
927 /*
928 * MPSAFE
929 */
930 /* ARGSUSED */
931 int
932 osigsuspend(td, uap)
933 struct thread *td;
934 struct osigsuspend_args *uap;
935 {
936 struct proc *p = td->td_proc;
937 sigset_t mask;
938 register struct sigacts *ps;
939
940 mtx_lock(&Giant);
941 PROC_LOCK(p);
942 ps = p->p_sigacts;
943 p->p_oldsigmask = p->p_sigmask;
944 p->p_flag |= P_OLDMASK;
945 OSIG2SIG(uap->mask, mask);
946 SIG_CANTMASK(mask);
947 SIGSETLO(p->p_sigmask, mask);
948 signotify(p);
949 while (msleep(ps, &p->p_mtx, PPAUSE|PCATCH, "opause", 0) == 0)
950 /* void */;
951 PROC_UNLOCK(p);
952 mtx_unlock(&Giant);
953 /* always return EINTR rather than ERESTART... */
954 return (EINTR);
955 }
956 #endif /* COMPAT_43 */
957
958 #if defined(COMPAT_43) || defined(COMPAT_SUNOS)
959 #ifndef _SYS_SYSPROTO_H_
960 struct osigstack_args {
961 struct sigstack *nss;
962 struct sigstack *oss;
963 };
964 #endif
965 /*
966 * MPSAFE
967 */
968 /* ARGSUSED */
969 int
970 osigstack(td, uap)
971 struct thread *td;
972 register struct osigstack_args *uap;
973 {
974 struct proc *p = td->td_proc;
975 struct sigstack ss;
976 int error = 0;
977
978 mtx_lock(&Giant);
979
980 if (uap->oss != NULL) {
981 PROC_LOCK(p);
982 ss.ss_sp = p->p_sigstk.ss_sp;
983 ss.ss_onstack = sigonstack(cpu_getstack(td));
984 PROC_UNLOCK(p);
985 error = copyout(&ss, uap->oss, sizeof(struct sigstack));
986 if (error)
987 goto done2;
988 }
989
990 if (uap->nss != NULL) {
991 if ((error = copyin(uap->nss, &ss, sizeof(ss))) != 0)
992 goto done2;
993 PROC_LOCK(p);
994 p->p_sigstk.ss_sp = ss.ss_sp;
995 p->p_sigstk.ss_size = 0;
996 p->p_sigstk.ss_flags |= ss.ss_onstack & SS_ONSTACK;
997 p->p_flag |= P_ALTSTACK;
998 PROC_UNLOCK(p);
999 }
1000 done2:
1001 mtx_unlock(&Giant);
1002 return (error);
1003 }
1004 #endif /* COMPAT_43 || COMPAT_SUNOS */
1005
1006 #ifndef _SYS_SYSPROTO_H_
1007 struct sigaltstack_args {
1008 stack_t *ss;
1009 stack_t *oss;
1010 };
1011 #endif
1012 /*
1013 * MPSAFE
1014 */
1015 /* ARGSUSED */
1016 int
1017 sigaltstack(td, uap)
1018 struct thread *td;
1019 register struct sigaltstack_args *uap;
1020 {
1021 stack_t ss, oss;
1022 int error;
1023
1024 if (uap->ss != NULL) {
1025 error = copyin(uap->ss, &ss, sizeof(ss));
1026 if (error)
1027 return (error);
1028 }
1029 error = kern_sigaltstack(td, (uap->ss != NULL) ? &ss : NULL,
1030 (uap->oss != NULL) ? &oss : NULL);
1031 if (error)
1032 return (error);
1033 if (uap->oss != NULL)
1034 error = copyout(&oss, uap->oss, sizeof(stack_t));
1035 return (error);
1036 }
1037
1038 int
1039 kern_sigaltstack(struct thread *td, stack_t *ss, stack_t *oss)
1040 {
1041 struct proc *p = td->td_proc;
1042 int oonstack;
1043 int error = 0;
1044
1045 mtx_lock(&Giant);
1046
1047 oonstack = sigonstack(cpu_getstack(td));
1048
1049 if (oss != NULL) {
1050 PROC_LOCK(p);
1051 *oss = p->p_sigstk;
1052 oss->ss_flags = (p->p_flag & P_ALTSTACK)
1053 ? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE;
1054 PROC_UNLOCK(p);
1055 }
1056
1057 if (ss != NULL) {
1058 if (oonstack) {
1059 error = EPERM;
1060 goto done2;
1061 }
1062 if ((ss->ss_flags & ~SS_DISABLE) != 0) {
1063 error = EINVAL;
1064 goto done2;
1065 }
1066 if (!(ss->ss_flags & SS_DISABLE)) {
1067 if (ss->ss_size < p->p_sysent->sv_minsigstksz) {
1068 error = ENOMEM;
1069 goto done2;
1070 }
1071 PROC_LOCK(p);
1072 p->p_sigstk = *ss;
1073 p->p_flag |= P_ALTSTACK;
1074 PROC_UNLOCK(p);
1075 } else {
1076 PROC_LOCK(p);
1077 p->p_flag &= ~P_ALTSTACK;
1078 PROC_UNLOCK(p);
1079 }
1080 }
1081 done2:
1082 mtx_unlock(&Giant);
1083 return (error);
1084 }
1085
1086 /*
1087 * Common code for kill process group/broadcast kill.
1088 * cp is calling process.
1089 */
1090 static int
1091 killpg1(td, sig, pgid, all)
1092 register struct thread *td;
1093 int sig, pgid, all;
1094 {
1095 register struct proc *p;
1096 struct pgrp *pgrp;
1097 int nfound = 0;
1098
1099 if (all) {
1100 /*
1101 * broadcast
1102 */
1103 sx_slock(&allproc_lock);
1104 LIST_FOREACH(p, &allproc, p_list) {
1105 PROC_LOCK(p);
1106 if (p->p_pid <= 1 || p->p_flag & P_SYSTEM ||
1107 p == td->td_proc) {
1108 PROC_UNLOCK(p);
1109 continue;
1110 }
1111 if (p_cansignal(td, p, sig) == 0) {
1112 nfound++;
1113 if (sig)
1114 psignal(p, sig);
1115 }
1116 PROC_UNLOCK(p);
1117 }
1118 sx_sunlock(&allproc_lock);
1119 } else {
1120 sx_slock(&proctree_lock);
1121 if (pgid == 0) {
1122 /*
1123 * zero pgid means send to my process group.
1124 */
1125 pgrp = td->td_proc->p_pgrp;
1126 PGRP_LOCK(pgrp);
1127 } else {
1128 pgrp = pgfind(pgid);
1129 if (pgrp == NULL) {
1130 sx_sunlock(&proctree_lock);
1131 return (ESRCH);
1132 }
1133 }
1134 sx_sunlock(&proctree_lock);
1135 LIST_FOREACH(p, &pgrp->pg_members, p_pglist) {
1136 PROC_LOCK(p);
1137 if (p->p_pid <= 1 || p->p_flag & P_SYSTEM) {
1138 PROC_UNLOCK(p);
1139 continue;
1140 }
1141 if (p->p_state == PRS_ZOMBIE) {
1142 PROC_UNLOCK(p);
1143 continue;
1144 }
1145 if (p_cansignal(td, p, sig) == 0) {
1146 nfound++;
1147 if (sig)
1148 psignal(p, sig);
1149 }
1150 PROC_UNLOCK(p);
1151 }
1152 PGRP_UNLOCK(pgrp);
1153 }
1154 return (nfound ? 0 : ESRCH);
1155 }
1156
1157 #ifndef _SYS_SYSPROTO_H_
1158 struct kill_args {
1159 int pid;
1160 int signum;
1161 };
1162 #endif
1163 /*
1164 * MPSAFE
1165 */
1166 /* ARGSUSED */
1167 int
1168 kill(td, uap)
1169 register struct thread *td;
1170 register struct kill_args *uap;
1171 {
1172 register struct proc *p;
1173 int error = 0;
1174
1175 if ((u_int)uap->signum > _SIG_MAXSIG)
1176 return (EINVAL);
1177
1178 mtx_lock(&Giant);
1179 if (uap->pid > 0) {
1180 /* kill single process */
1181 if ((p = pfind(uap->pid)) == NULL) {
1182 error = ESRCH;
1183 } else if ((error = p_cansignal(td, p, uap->signum)) != 0) {
1184 PROC_UNLOCK(p);
1185 } else {
1186 if (uap->signum)
1187 psignal(p, uap->signum);
1188 PROC_UNLOCK(p);
1189 error = 0;
1190 }
1191 } else {
1192 switch (uap->pid) {
1193 case -1: /* broadcast signal */
1194 error = killpg1(td, uap->signum, 0, 1);
1195 break;
1196 case 0: /* signal own process group */
1197 error = killpg1(td, uap->signum, 0, 0);
1198 break;
1199 default: /* negative explicit process group */
1200 error = killpg1(td, uap->signum, -uap->pid, 0);
1201 break;
1202 }
1203 }
1204 mtx_unlock(&Giant);
1205 return(error);
1206 }
1207
1208 #if defined(COMPAT_43) || defined(COMPAT_SUNOS)
1209 #ifndef _SYS_SYSPROTO_H_
1210 struct okillpg_args {
1211 int pgid;
1212 int signum;
1213 };
1214 #endif
1215 /*
1216 * MPSAFE
1217 */
1218 /* ARGSUSED */
1219 int
1220 okillpg(td, uap)
1221 struct thread *td;
1222 register struct okillpg_args *uap;
1223 {
1224 int error;
1225
1226 if ((u_int)uap->signum > _SIG_MAXSIG)
1227 return (EINVAL);
1228 mtx_lock(&Giant);
1229 error = killpg1(td, uap->signum, uap->pgid, 0);
1230 mtx_unlock(&Giant);
1231 return (error);
1232 }
1233 #endif /* COMPAT_43 || COMPAT_SUNOS */
1234
1235 /*
1236 * Send a signal to a process group.
1237 */
1238 void
1239 gsignal(pgid, sig)
1240 int pgid, sig;
1241 {
1242 struct pgrp *pgrp;
1243
1244 if (pgid != 0) {
1245 sx_slock(&proctree_lock);
1246 pgrp = pgfind(pgid);
1247 sx_sunlock(&proctree_lock);
1248 if (pgrp != NULL) {
1249 pgsignal(pgrp, sig, 0);
1250 PGRP_UNLOCK(pgrp);
1251 }
1252 }
1253 }
1254
1255 /*
1256 * Send a signal to a process group. If checktty is 1,
1257 * limit to members which have a controlling terminal.
1258 */
1259 void
1260 pgsignal(pgrp, sig, checkctty)
1261 struct pgrp *pgrp;
1262 int sig, checkctty;
1263 {
1264 register struct proc *p;
1265
1266 if (pgrp) {
1267 PGRP_LOCK_ASSERT(pgrp, MA_OWNED);
1268 LIST_FOREACH(p, &pgrp->pg_members, p_pglist) {
1269 PROC_LOCK(p);
1270 if (checkctty == 0 || p->p_flag & P_CONTROLT)
1271 psignal(p, sig);
1272 PROC_UNLOCK(p);
1273 }
1274 }
1275 }
1276
1277 /*
1278 * Send a signal caused by a trap to the current process.
1279 * If it will be caught immediately, deliver it with correct code.
1280 * Otherwise, post it normally.
1281 *
1282 * MPSAFE
1283 */
1284 void
1285 trapsignal(p, sig, code)
1286 struct proc *p;
1287 register int sig;
1288 u_long code;
1289 {
1290 register struct sigacts *ps = p->p_sigacts;
1291
1292 PROC_LOCK(p);
1293 if ((p->p_flag & P_TRACED) == 0 && SIGISMEMBER(p->p_sigcatch, sig) &&
1294 !SIGISMEMBER(p->p_sigmask, sig)) {
1295 p->p_stats->p_ru.ru_nsignals++;
1296 #ifdef KTRACE
1297 if (KTRPOINT(curthread, KTR_PSIG))
1298 ktrpsig(sig, ps->ps_sigact[_SIG_IDX(sig)],
1299 &p->p_sigmask, code);
1300 #endif
1301 (*p->p_sysent->sv_sendsig)(ps->ps_sigact[_SIG_IDX(sig)], sig,
1302 &p->p_sigmask, code);
1303 SIGSETOR(p->p_sigmask, ps->ps_catchmask[_SIG_IDX(sig)]);
1304 if (!SIGISMEMBER(ps->ps_signodefer, sig))
1305 SIGADDSET(p->p_sigmask, sig);
1306 if (SIGISMEMBER(ps->ps_sigreset, sig)) {
1307 /*
1308 * See kern_sigaction() for origin of this code.
1309 */
1310 SIGDELSET(p->p_sigcatch, sig);
1311 if (sig != SIGCONT &&
1312 sigprop(sig) & SA_IGNORE)
1313 SIGADDSET(p->p_sigignore, sig);
1314 ps->ps_sigact[_SIG_IDX(sig)] = SIG_DFL;
1315 }
1316 } else {
1317 p->p_code = code; /* XXX for core dump/debugger */
1318 p->p_sig = sig; /* XXX to verify code */
1319 psignal(p, sig);
1320 }
1321 PROC_UNLOCK(p);
1322 }
1323
1324 /*
1325 * Send the signal to the process. If the signal has an action, the action
1326 * is usually performed by the target process rather than the caller; we add
1327 * the signal to the set of pending signals for the process.
1328 *
1329 * Exceptions:
1330 * o When a stop signal is sent to a sleeping process that takes the
1331 * default action, the process is stopped without awakening it.
1332 * o SIGCONT restarts stopped processes (or puts them back to sleep)
1333 * regardless of the signal action (eg, blocked or ignored).
1334 *
1335 * Other ignored signals are discarded immediately.
1336 */
1337 void
1338 psignal(p, sig)
1339 register struct proc *p;
1340 register int sig;
1341 {
1342 register sig_t action;
1343 struct thread *td;
1344 register int prop;
1345
1346 if (!_SIG_VALID(sig))
1347 panic("psignal(): invalid signal");
1348
1349 PROC_LOCK_ASSERT(p, MA_OWNED);
1350 KNOTE(&p->p_klist, NOTE_SIGNAL | sig);
1351
1352 prop = sigprop(sig);
1353 /*
1354 * If proc is traced, always give parent a chance;
1355 * if signal event is tracked by procfs, give *that*
1356 * a chance, as well.
1357 */
1358 if ((p->p_flag & P_TRACED) || (p->p_stops & S_SIG)) {
1359 action = SIG_DFL;
1360 } else {
1361 /*
1362 * If the signal is being ignored,
1363 * then we forget about it immediately.
1364 * (Note: we don't set SIGCONT in p_sigignore,
1365 * and if it is set to SIG_IGN,
1366 * action will be SIG_DFL here.)
1367 */
1368 if (SIGISMEMBER(p->p_sigignore, sig) || (p->p_flag & P_WEXIT))
1369 return;
1370 if (SIGISMEMBER(p->p_sigmask, sig))
1371 action = SIG_HOLD;
1372 else if (SIGISMEMBER(p->p_sigcatch, sig))
1373 action = SIG_CATCH;
1374 else
1375 action = SIG_DFL;
1376 }
1377
1378 if (prop & SA_CONT)
1379 SIG_STOPSIGMASK(p->p_siglist);
1380
1381 if (prop & SA_STOP) {
1382 /*
1383 * If sending a tty stop signal to a member of an orphaned
1384 * process group, discard the signal here if the action
1385 * is default; don't stop the process below if sleeping,
1386 * and don't clear any pending SIGCONT.
1387 */
1388 if ((prop & SA_TTYSTOP) &&
1389 (p->p_pgrp->pg_jobc == 0) &&
1390 (action == SIG_DFL))
1391 return;
1392 SIG_CONTSIGMASK(p->p_siglist);
1393 p->p_flag &= ~P_CONTINUED;
1394 }
1395 SIGADDSET(p->p_siglist, sig);
1396 signotify(p); /* uses schedlock */
1397
1398 /*
1399 * Some signals have a process-wide effect and a per-thread
1400 * component. Most processing occurs when the process next
1401 * tries to cross the user boundary, however there are some
1402 * times when processing needs to be done immediatly, such as
1403 * waking up threads so that they can cross the user boundary.
1404 * We try do the per-process part here.
1405 */
1406 if (P_SHOULDSTOP(p)) {
1407 /*
1408 * The process is in stopped mode. All the threads should be
1409 * either winding down or already on the suspended queue.
1410 */
1411 if (p->p_flag & P_TRACED) {
1412 /*
1413 * The traced process is already stopped,
1414 * so no further action is necessary.
1415 * No signal can restart us.
1416 */
1417 goto out;
1418 }
1419
1420 if (sig == SIGKILL) {
1421 /*
1422 * SIGKILL sets process running.
1423 * It will die elsewhere.
1424 * All threads must be restarted.
1425 */
1426 p->p_flag &= ~P_STOPPED;
1427 goto runfast;
1428 }
1429
1430 if (prop & SA_CONT) {
1431 /*
1432 * If SIGCONT is default (or ignored), we continue the
1433 * process but don't leave the signal in p_siglist as
1434 * it has no further action. If SIGCONT is held, we
1435 * continue the process and leave the signal in
1436 * p_siglist. If the process catches SIGCONT, let it
1437 * handle the signal itself. If it isn't waiting on
1438 * an event, it goes back to run state.
1439 * Otherwise, process goes back to sleep state.
1440 */
1441 p->p_flag &= ~P_STOPPED_SIG;
1442 p->p_flag |= P_CONTINUED;
1443 if (action == SIG_DFL) {
1444 SIGDELSET(p->p_siglist, sig);
1445 } else if (action == SIG_CATCH) {
1446 /*
1447 * The process wants to catch it so it needs
1448 * to run at least one thread, but which one?
1449 * It would seem that the answer would be to
1450 * run an upcall in the next KSE to run, and
1451 * deliver the signal that way. In a NON KSE
1452 * process, we need to make sure that the
1453 * single thread is runnable asap.
1454 * XXXKSE for now however, make them all run.
1455 */
1456 goto runfast;
1457 }
1458 /*
1459 * The signal is not ignored or caught.
1460 */
1461 mtx_lock_spin(&sched_lock);
1462 thread_unsuspend(p);
1463 mtx_unlock_spin(&sched_lock);
1464 goto out;
1465 }
1466
1467 if (prop & SA_STOP) {
1468 /*
1469 * Already stopped, don't need to stop again
1470 * (If we did the shell could get confused).
1471 * Just make sure the signal STOP bit set.
1472 */
1473 p->p_flag |= P_STOPPED_SIG;
1474 SIGDELSET(p->p_siglist, sig);
1475 goto out;
1476 }
1477
1478 /*
1479 * All other kinds of signals:
1480 * If a thread is sleeping interruptibly, simulate a
1481 * wakeup so that when it is continued it will be made
1482 * runnable and can look at the signal. However, don't make
1483 * the PROCESS runnable, leave it stopped.
1484 * It may run a bit until it hits a thread_suspend_check().
1485 */
1486 mtx_lock_spin(&sched_lock);
1487 FOREACH_THREAD_IN_PROC(p, td) {
1488 if (TD_ON_SLEEPQ(td) && (td->td_flags & TDF_SINTR)) {
1489 if (td->td_flags & TDF_CVWAITQ)
1490 cv_abort(td);
1491 else
1492 abortsleep(td);
1493 }
1494 }
1495 mtx_unlock_spin(&sched_lock);
1496 goto out;
1497 /*
1498 * XXXKSE What about threads that are waiting on mutexes?
1499 * Shouldn't they abort too?
1500 * No, hopefully mutexes are short lived.. They'll
1501 * eventually hit thread_suspend_check().
1502 */
1503 } else if (p->p_state == PRS_NORMAL) {
1504 if (prop & SA_CONT) {
1505 /*
1506 * Already active, don't need to start again.
1507 */
1508 SIGDELSET(p->p_siglist, sig);
1509 goto out;
1510 }
1511 if ((p->p_flag & P_TRACED) || (action != SIG_DFL) ||
1512 !(prop & SA_STOP)) {
1513 mtx_lock_spin(&sched_lock);
1514 FOREACH_THREAD_IN_PROC(p, td)
1515 tdsignal(td, sig, action);
1516 mtx_unlock_spin(&sched_lock);
1517 goto out;
1518 }
1519 if (prop & SA_STOP) {
1520 if (p->p_flag & P_PPWAIT)
1521 goto out;
1522 mtx_lock_spin(&sched_lock);
1523 FOREACH_THREAD_IN_PROC(p, td) {
1524 if (TD_IS_SLEEPING(td) &&
1525 (td->td_flags & TDF_SINTR))
1526 thread_suspend_one(td);
1527 }
1528 if (p->p_suspcount == p->p_numthreads) {
1529 mtx_unlock_spin(&sched_lock);
1530 stop(p);
1531 p->p_xstat = sig;
1532 SIGDELSET(p->p_siglist, sig);
1533 PROC_LOCK(p->p_pptr);
1534 if ((p->p_pptr->p_procsig->ps_flag &
1535 PS_NOCLDSTOP) == 0) {
1536 psignal(p->p_pptr, SIGCHLD);
1537 }
1538 PROC_UNLOCK(p->p_pptr);
1539 } else {
1540 mtx_unlock_spin(&sched_lock);
1541 }
1542 goto out;
1543 }
1544 else
1545 goto runfast;
1546 /* NOTREACHED */
1547 } else {
1548 /* Not in "NORMAL" state. discard the signal. */
1549 SIGDELSET(p->p_siglist, sig);
1550 goto out;
1551 }
1552
1553 /*
1554 * The process is not stopped so we need to apply the signal to all the
1555 * running threads.
1556 */
1557
1558 runfast:
1559 mtx_lock_spin(&sched_lock);
1560 FOREACH_THREAD_IN_PROC(p, td)
1561 tdsignal(td, sig, action);
1562 thread_unsuspend(p);
1563 mtx_unlock_spin(&sched_lock);
1564 out:
1565 /* If we jump here, sched_lock should not be owned. */
1566 mtx_assert(&sched_lock, MA_NOTOWNED);
1567 }
1568
1569 /*
1570 * The force of a signal has been directed against a single
1571 * thread. We need to see what we can do about knocking it
1572 * out of any sleep it may be in etc.
1573 */
1574 static void
1575 tdsignal(struct thread *td, int sig, sig_t action)
1576 {
1577 struct proc *p = td->td_proc;
1578 register int prop;
1579
1580 mtx_assert(&sched_lock, MA_OWNED);
1581 prop = sigprop(sig);
1582 /*
1583 * Bring the priority of a thread up if we want it to get
1584 * killed in this lifetime.
1585 */
1586 if ((action == SIG_DFL) && (prop & SA_KILL)) {
1587 if (td->td_priority > PUSER) {
1588 td->td_priority = PUSER;
1589 }
1590 }
1591
1592 /*
1593 * Defer further processing for signals which are held,
1594 * except that stopped processes must be continued by SIGCONT.
1595 */
1596 if (action == SIG_HOLD) {
1597 return;
1598 }
1599 if (TD_IS_SLEEPING(td)) {
1600 /*
1601 * If thread is sleeping uninterruptibly
1602 * we can't interrupt the sleep... the signal will
1603 * be noticed when the process returns through
1604 * trap() or syscall().
1605 */
1606 if ((td->td_flags & TDF_SINTR) == 0) {
1607 return;
1608 }
1609 /*
1610 * Process is sleeping and traced. Make it runnable
1611 * so it can discover the signal in issignal() and stop
1612 * for its parent.
1613 */
1614 if (p->p_flag & P_TRACED) {
1615 p->p_flag &= ~P_STOPPED_TRACE;
1616 } else {
1617
1618 /*
1619 * If SIGCONT is default (or ignored) and process is
1620 * asleep, we are finished; the process should not
1621 * be awakened.
1622 */
1623 if ((prop & SA_CONT) && action == SIG_DFL) {
1624 SIGDELSET(p->p_siglist, sig);
1625 return;
1626 }
1627
1628 /*
1629 * Raise priority to at least PUSER.
1630 */
1631 if (td->td_priority > PUSER) {
1632 td->td_priority = PUSER;
1633 }
1634 }
1635 if (td->td_flags & TDF_CVWAITQ)
1636 cv_abort(td);
1637 else
1638 abortsleep(td);
1639 }
1640 #ifdef SMP
1641 else {
1642 /*
1643 * Other states do nothing with the signal immediatly,
1644 * other than kicking ourselves if we are running.
1645 * It will either never be noticed, or noticed very soon.
1646 */
1647 if (TD_IS_RUNNING(td) && td != curthread) {
1648 forward_signal(td);
1649 }
1650 }
1651 #endif
1652 }
1653
1654 /*
1655 * If the current process has received a signal (should be caught or cause
1656 * termination, should interrupt current syscall), return the signal number.
1657 * Stop signals with default action are processed immediately, then cleared;
1658 * they aren't returned. This is checked after each entry to the system for
1659 * a syscall or trap (though this can usually be done without calling issignal
1660 * by checking the pending signal masks in cursig.) The normal call
1661 * sequence is
1662 *
1663 * while (sig = cursig(curthread))
1664 * postsig(sig);
1665 */
1666 int
1667 issignal(td)
1668 struct thread *td;
1669 {
1670 struct proc *p;
1671 sigset_t mask;
1672 register int sig, prop;
1673
1674 p = td->td_proc;
1675 PROC_LOCK_ASSERT(p, MA_OWNED);
1676 WITNESS_SLEEP(1, &p->p_mtx.mtx_object);
1677 for (;;) {
1678 int traced = (p->p_flag & P_TRACED) || (p->p_stops & S_SIG);
1679
1680 mask = p->p_siglist;
1681 SIGSETNAND(mask, p->p_sigmask);
1682 if (p->p_flag & P_PPWAIT)
1683 SIG_STOPSIGMASK(mask);
1684 if (SIGISEMPTY(mask)) /* no signal to send */
1685 return (0);
1686 sig = sig_ffs(&mask);
1687 prop = sigprop(sig);
1688
1689 _STOPEVENT(p, S_SIG, sig);
1690
1691 /*
1692 * We should see pending but ignored signals
1693 * only if P_TRACED was on when they were posted.
1694 */
1695 if (SIGISMEMBER(p->p_sigignore, sig) && (traced == 0)) {
1696 SIGDELSET(p->p_siglist, sig);
1697 continue;
1698 }
1699 if (p->p_flag & P_TRACED && (p->p_flag & P_PPWAIT) == 0) {
1700 /*
1701 * If traced, always stop.
1702 */
1703 p->p_xstat = sig;
1704 PROC_LOCK(p->p_pptr);
1705 psignal(p->p_pptr, SIGCHLD);
1706 PROC_UNLOCK(p->p_pptr);
1707 mtx_lock_spin(&sched_lock);
1708 stop(p); /* uses schedlock too eventually */
1709 thread_suspend_one(td);
1710 PROC_UNLOCK(p);
1711 DROP_GIANT();
1712 p->p_stats->p_ru.ru_nivcsw++;
1713 mi_switch();
1714 mtx_unlock_spin(&sched_lock);
1715 PICKUP_GIANT();
1716 PROC_LOCK(p);
1717
1718 /*
1719 * If the traced bit got turned off, go back up
1720 * to the top to rescan signals. This ensures
1721 * that p_sig* and ps_sigact are consistent.
1722 */
1723 if ((p->p_flag & P_TRACED) == 0)
1724 continue;
1725
1726 /*
1727 * If parent wants us to take the signal,
1728 * then it will leave it in p->p_xstat;
1729 * otherwise we just look for signals again.
1730 */
1731 SIGDELSET(p->p_siglist, sig); /* clear old signal */
1732 sig = p->p_xstat;
1733 if (sig == 0)
1734 continue;
1735
1736 /*
1737 * Put the new signal into p_siglist. If the
1738 * signal is being masked, look for other signals.
1739 */
1740 SIGADDSET(p->p_siglist, sig);
1741 if (SIGISMEMBER(p->p_sigmask, sig))
1742 continue;
1743 signotify(p);
1744 }
1745
1746 /*
1747 * Decide whether the signal should be returned.
1748 * Return the signal's number, or fall through
1749 * to clear it from the pending mask.
1750 */
1751 switch ((int)(intptr_t)p->p_sigacts->ps_sigact[_SIG_IDX(sig)]) {
1752
1753 case (int)SIG_DFL:
1754 /*
1755 * Don't take default actions on system processes.
1756 */
1757 if (p->p_pid <= 1) {
1758 #ifdef DIAGNOSTIC
1759 /*
1760 * Are you sure you want to ignore SIGSEGV
1761 * in init? XXX
1762 */
1763 printf("Process (pid %lu) got signal %d\n",
1764 (u_long)p->p_pid, sig);
1765 #endif
1766 break; /* == ignore */
1767 }
1768 /*
1769 * If there is a pending stop signal to process
1770 * with default action, stop here,
1771 * then clear the signal. However,
1772 * if process is member of an orphaned
1773 * process group, ignore tty stop signals.
1774 */
1775 if (prop & SA_STOP) {
1776 if (p->p_flag & P_TRACED ||
1777 (p->p_pgrp->pg_jobc == 0 &&
1778 prop & SA_TTYSTOP))
1779 break; /* == ignore */
1780 p->p_xstat = sig;
1781 mtx_lock_spin(&sched_lock);
1782 if (p->p_suspcount+1 == p->p_numthreads) {
1783 mtx_unlock_spin(&sched_lock);
1784 PROC_LOCK(p->p_pptr);
1785 if ((p->p_pptr->p_procsig->ps_flag &
1786 PS_NOCLDSTOP) == 0) {
1787 psignal(p->p_pptr, SIGCHLD);
1788 }
1789 PROC_UNLOCK(p->p_pptr);
1790 mtx_lock_spin(&sched_lock);
1791 }
1792 stop(p);
1793 thread_suspend_one(td);
1794 PROC_UNLOCK(p);
1795 DROP_GIANT();
1796 p->p_stats->p_ru.ru_nivcsw++;
1797 mi_switch();
1798 mtx_unlock_spin(&sched_lock);
1799 PICKUP_GIANT();
1800 PROC_LOCK(p);
1801 break;
1802 } else if (prop & SA_IGNORE) {
1803 /*
1804 * Except for SIGCONT, shouldn't get here.
1805 * Default action is to ignore; drop it.
1806 */
1807 break; /* == ignore */
1808 } else
1809 return (sig);
1810 /*NOTREACHED*/
1811
1812 case (int)SIG_IGN:
1813 /*
1814 * Masking above should prevent us ever trying
1815 * to take action on an ignored signal other
1816 * than SIGCONT, unless process is traced.
1817 */
1818 if ((prop & SA_CONT) == 0 &&
1819 (p->p_flag & P_TRACED) == 0)
1820 printf("issignal\n");
1821 break; /* == ignore */
1822
1823 default:
1824 /*
1825 * This signal has an action, let
1826 * postsig() process it.
1827 */
1828 return (sig);
1829 }
1830 SIGDELSET(p->p_siglist, sig); /* take the signal! */
1831 }
1832 /* NOTREACHED */
1833 }
1834
1835 /*
1836 * Put the argument process into the stopped state and notify the parent
1837 * via wakeup. Signals are handled elsewhere. The process must not be
1838 * on the run queue. Must be called with the proc p locked and the scheduler
1839 * lock held.
1840 */
1841 static void
1842 stop(p)
1843 register struct proc *p;
1844 {
1845
1846 PROC_LOCK_ASSERT(p, MA_OWNED);
1847 p->p_flag |= P_STOPPED_SIG;
1848 p->p_flag &= ~P_WAITED;
1849 wakeup(p->p_pptr);
1850 }
1851
1852 /*
1853 * Take the action for the specified signal
1854 * from the current set of pending signals.
1855 */
1856 void
1857 postsig(sig)
1858 register int sig;
1859 {
1860 struct thread *td = curthread;
1861 register struct proc *p = td->td_proc;
1862 struct sigacts *ps;
1863 sig_t action;
1864 sigset_t returnmask;
1865 int code;
1866
1867 KASSERT(sig != 0, ("postsig"));
1868
1869 PROC_LOCK_ASSERT(p, MA_OWNED);
1870 ps = p->p_sigacts;
1871 SIGDELSET(p->p_siglist, sig);
1872 action = ps->ps_sigact[_SIG_IDX(sig)];
1873 #ifdef KTRACE
1874 if (KTRPOINT(td, KTR_PSIG))
1875 ktrpsig(sig, action, p->p_flag & P_OLDMASK ?
1876 &p->p_oldsigmask : &p->p_sigmask, 0);
1877 #endif
1878 _STOPEVENT(p, S_SIG, sig);
1879
1880 if (action == SIG_DFL) {
1881 /*
1882 * Default action, where the default is to kill
1883 * the process. (Other cases were ignored above.)
1884 */
1885 sigexit(td, sig);
1886 /* NOTREACHED */
1887 } else {
1888 /*
1889 * If we get here, the signal must be caught.
1890 */
1891 KASSERT(action != SIG_IGN && !SIGISMEMBER(p->p_sigmask, sig),
1892 ("postsig action"));
1893 /*
1894 * Set the new mask value and also defer further
1895 * occurrences of this signal.
1896 *
1897 * Special case: user has done a sigsuspend. Here the
1898 * current mask is not of interest, but rather the
1899 * mask from before the sigsuspend is what we want
1900 * restored after the signal processing is completed.
1901 */
1902 if (p->p_flag & P_OLDMASK) {
1903 returnmask = p->p_oldsigmask;
1904 p->p_flag &= ~P_OLDMASK;
1905 } else
1906 returnmask = p->p_sigmask;
1907
1908 SIGSETOR(p->p_sigmask, ps->ps_catchmask[_SIG_IDX(sig)]);
1909 if (!SIGISMEMBER(ps->ps_signodefer, sig))
1910 SIGADDSET(p->p_sigmask, sig);
1911
1912 if (SIGISMEMBER(ps->ps_sigreset, sig)) {
1913 /*
1914 * See kern_sigaction() for origin of this code.
1915 */
1916 SIGDELSET(p->p_sigcatch, sig);
1917 if (sig != SIGCONT &&
1918 sigprop(sig) & SA_IGNORE)
1919 SIGADDSET(p->p_sigignore, sig);
1920 ps->ps_sigact[_SIG_IDX(sig)] = SIG_DFL;
1921 }
1922 p->p_stats->p_ru.ru_nsignals++;
1923 if (p->p_sig != sig) {
1924 code = 0;
1925 } else {
1926 code = p->p_code;
1927 p->p_code = 0;
1928 p->p_sig = 0;
1929 }
1930 if (p->p_flag & P_KSES)
1931 if (signal_upcall(p, sig))
1932 return;
1933 (*p->p_sysent->sv_sendsig)(action, sig, &returnmask, code);
1934 }
1935 }
1936
1937 /*
1938 * Kill the current process for stated reason.
1939 */
1940 void
1941 killproc(p, why)
1942 struct proc *p;
1943 char *why;
1944 {
1945
1946 PROC_LOCK_ASSERT(p, MA_OWNED);
1947 CTR3(KTR_PROC, "killproc: proc %p (pid %d, %s)",
1948 p, p->p_pid, p->p_comm);
1949 log(LOG_ERR, "pid %d (%s), uid %d, was killed: %s\n", p->p_pid, p->p_comm,
1950 p->p_ucred ? p->p_ucred->cr_uid : -1, why);
1951 psignal(p, SIGKILL);
1952 }
1953
1954 /*
1955 * Force the current process to exit with the specified signal, dumping core
1956 * if appropriate. We bypass the normal tests for masked and caught signals,
1957 * allowing unrecoverable failures to terminate the process without changing
1958 * signal state. Mark the accounting record with the signal termination.
1959 * If dumping core, save the signal number for the debugger. Calls exit and
1960 * does not return.
1961 */
1962 void
1963 sigexit(td, sig)
1964 struct thread *td;
1965 int sig;
1966 {
1967 struct proc *p = td->td_proc;
1968
1969 PROC_LOCK_ASSERT(p, MA_OWNED);
1970 p->p_acflag |= AXSIG;
1971 if (sigprop(sig) & SA_CORE) {
1972 p->p_sig = sig;
1973 /*
1974 * Log signals which would cause core dumps
1975 * (Log as LOG_INFO to appease those who don't want
1976 * these messages.)
1977 * XXX : Todo, as well as euid, write out ruid too
1978 */
1979 PROC_UNLOCK(p);
1980 if (!mtx_owned(&Giant))
1981 mtx_lock(&Giant);
1982 if (coredump(td) == 0)
1983 sig |= WCOREFLAG;
1984 if (kern_logsigexit)
1985 log(LOG_INFO,
1986 "pid %d (%s), uid %d: exited on signal %d%s\n",
1987 p->p_pid, p->p_comm,
1988 td->td_ucred ? td->td_ucred->cr_uid : -1,
1989 sig &~ WCOREFLAG,
1990 sig & WCOREFLAG ? " (core dumped)" : "");
1991 } else {
1992 PROC_UNLOCK(p);
1993 if (!mtx_owned(&Giant))
1994 mtx_lock(&Giant);
1995 }
1996 exit1(td, W_EXITCODE(0, sig));
1997 /* NOTREACHED */
1998 }
1999
2000 static char corefilename[MAXPATHLEN+1] = {"%N.core"};
2001 SYSCTL_STRING(_kern, OID_AUTO, corefile, CTLFLAG_RW, corefilename,
2002 sizeof(corefilename), "process corefile name format string");
2003
2004 /*
2005 * expand_name(name, uid, pid)
2006 * Expand the name described in corefilename, using name, uid, and pid.
2007 * corefilename is a printf-like string, with three format specifiers:
2008 * %N name of process ("name")
2009 * %P process id (pid)
2010 * %U user id (uid)
2011 * For example, "%N.core" is the default; they can be disabled completely
2012 * by using "/dev/null", or all core files can be stored in "/cores/%U/%N-%P".
2013 * This is controlled by the sysctl variable kern.corefile (see above).
2014 */
2015
2016 static char *
2017 expand_name(name, uid, pid)
2018 const char *name;
2019 uid_t uid;
2020 pid_t pid;
2021 {
2022 const char *format, *appendstr;
2023 char *temp;
2024 char buf[11]; /* Buffer for pid/uid -- max 4B */
2025 size_t i, l, n;
2026
2027 format = corefilename;
2028 temp = malloc(MAXPATHLEN, M_TEMP, M_NOWAIT | M_ZERO);
2029 if (temp == NULL)
2030 return (NULL);
2031 for (i = 0, n = 0; n < MAXPATHLEN && format[i]; i++) {
2032 switch (format[i]) {
2033 case '%': /* Format character */
2034 i++;
2035 switch (format[i]) {
2036 case '%':
2037 appendstr = "%";
2038 break;
2039 case 'N': /* process name */
2040 appendstr = name;
2041 break;
2042 case 'P': /* process id */
2043 sprintf(buf, "%u", pid);
2044 appendstr = buf;
2045 break;
2046 case 'U': /* user id */
2047 sprintf(buf, "%u", uid);
2048 appendstr = buf;
2049 break;
2050 default:
2051 appendstr = "";
2052 log(LOG_ERR,
2053 "Unknown format character %c in `%s'\n",
2054 format[i], format);
2055 }
2056 l = strlen(appendstr);
2057 if ((n + l) >= MAXPATHLEN)
2058 goto toolong;
2059 memcpy(temp + n, appendstr, l);
2060 n += l;
2061 break;
2062 default:
2063 temp[n++] = format[i];
2064 }
2065 }
2066 if (format[i] != '\0')
2067 goto toolong;
2068 return (temp);
2069 toolong:
2070 log(LOG_ERR, "pid %ld (%s), uid (%lu): corename is too long\n",
2071 (long)pid, name, (u_long)uid);
2072 free(temp, M_TEMP);
2073 return (NULL);
2074 }
2075
2076 /*
2077 * Dump a process' core. The main routine does some
2078 * policy checking, and creates the name of the coredump;
2079 * then it passes on a vnode and a size limit to the process-specific
2080 * coredump routine if there is one; if there _is not_ one, it returns
2081 * ENOSYS; otherwise it returns the error from the process-specific routine.
2082 */
2083
2084 static int
2085 coredump(struct thread *td)
2086 {
2087 struct proc *p = td->td_proc;
2088 register struct vnode *vp;
2089 register struct ucred *cred = td->td_ucred;
2090 struct flock lf;
2091 struct nameidata nd;
2092 struct vattr vattr;
2093 int error, error1, flags;
2094 struct mount *mp;
2095 char *name; /* name of corefile */
2096 off_t limit;
2097
2098 PROC_LOCK(p);
2099 _STOPEVENT(p, S_CORE, 0);
2100
2101 if (((sugid_coredump == 0) && p->p_flag & P_SUGID) || do_coredump == 0) {
2102 PROC_UNLOCK(p);
2103 return (EFAULT);
2104 }
2105
2106 /*
2107 * Note that the bulk of limit checking is done after
2108 * the corefile is created. The exception is if the limit
2109 * for corefiles is 0, in which case we don't bother
2110 * creating the corefile at all. This layout means that
2111 * a corefile is truncated instead of not being created,
2112 * if it is larger than the limit.
2113 */
2114 limit = p->p_rlimit[RLIMIT_CORE].rlim_cur;
2115 if (limit == 0) {
2116 PROC_UNLOCK(p);
2117 return 0;
2118 }
2119 PROC_UNLOCK(p);
2120
2121 restart:
2122 name = expand_name(p->p_comm, td->td_ucred->cr_uid, p->p_pid);
2123 if (name == NULL)
2124 return (EINVAL);
2125 NDINIT(&nd, LOOKUP, NOFOLLOW, UIO_SYSSPACE, name, td); /* XXXKSE */
2126 flags = O_CREAT | FWRITE | O_NOFOLLOW;
2127 error = vn_open(&nd, &flags, S_IRUSR | S_IWUSR);
2128 free(name, M_TEMP);
2129 if (error)
2130 return (error);
2131 NDFREE(&nd, NDF_ONLY_PNBUF);
2132 vp = nd.ni_vp;
2133
2134 /* Don't dump to non-regular files or files with links. */
2135 if (vp->v_type != VREG ||
2136 VOP_GETATTR(vp, &vattr, cred, td) || vattr.va_nlink != 1) {
2137 VOP_UNLOCK(vp, 0, td);
2138 error = EFAULT;
2139 goto out2;
2140 }
2141
2142 VOP_UNLOCK(vp, 0, td);
2143 lf.l_whence = SEEK_SET;
2144 lf.l_start = 0;
2145 lf.l_len = 0;
2146 lf.l_type = F_WRLCK;
2147 error = VOP_ADVLOCK(vp, (caddr_t)p, F_SETLK, &lf, F_FLOCK);
2148 if (error)
2149 goto out2;
2150
2151 if (vn_start_write(vp, &mp, V_NOWAIT) != 0) {
2152 lf.l_type = F_UNLCK;
2153 VOP_ADVLOCK(vp, (caddr_t)p, F_UNLCK, &lf, F_FLOCK);
2154 if ((error = vn_close(vp, FWRITE, cred, td)) != 0)
2155 return (error);
2156 if ((error = vn_start_write(NULL, &mp, V_XSLEEP | PCATCH)) != 0)
2157 return (error);
2158 goto restart;
2159 }
2160
2161 VATTR_NULL(&vattr);
2162 vattr.va_size = 0;
2163 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
2164 VOP_LEASE(vp, td, cred, LEASE_WRITE);
2165 VOP_SETATTR(vp, &vattr, cred, td);
2166 VOP_UNLOCK(vp, 0, td);
2167 PROC_LOCK(p);
2168 p->p_acflag |= ACORE;
2169 PROC_UNLOCK(p);
2170
2171 error = p->p_sysent->sv_coredump ?
2172 p->p_sysent->sv_coredump(td, vp, limit) :
2173 ENOSYS;
2174
2175 lf.l_type = F_UNLCK;
2176 VOP_ADVLOCK(vp, (caddr_t)p, F_UNLCK, &lf, F_FLOCK);
2177 vn_finished_write(mp);
2178 out2:
2179 error1 = vn_close(vp, FWRITE, cred, td);
2180 if (error == 0)
2181 error = error1;
2182 return (error);
2183 }
2184
2185 /*
2186 * Nonexistent system call-- signal process (may want to handle it).
2187 * Flag error in case process won't see signal immediately (blocked or ignored).
2188 */
2189 #ifndef _SYS_SYSPROTO_H_
2190 struct nosys_args {
2191 int dummy;
2192 };
2193 #endif
2194 /*
2195 * MPSAFE
2196 */
2197 /* ARGSUSED */
2198 int
2199 nosys(td, args)
2200 struct thread *td;
2201 struct nosys_args *args;
2202 {
2203 struct proc *p = td->td_proc;
2204
2205 mtx_lock(&Giant);
2206 PROC_LOCK(p);
2207 psignal(p, SIGSYS);
2208 PROC_UNLOCK(p);
2209 mtx_unlock(&Giant);
2210 return (ENOSYS);
2211 }
2212
2213 /*
2214 * Send a SIGIO or SIGURG signal to a process or process group using
2215 * stored credentials rather than those of the current process.
2216 */
2217 void
2218 pgsigio(sigiop, sig, checkctty)
2219 struct sigio **sigiop;
2220 int sig, checkctty;
2221 {
2222 struct sigio *sigio;
2223
2224 SIGIO_LOCK();
2225 sigio = *sigiop;
2226 if (sigio == NULL) {
2227 SIGIO_UNLOCK();
2228 return;
2229 }
2230 if (sigio->sio_pgid > 0) {
2231 PROC_LOCK(sigio->sio_proc);
2232 if (CANSIGIO(sigio->sio_ucred, sigio->sio_proc->p_ucred))
2233 psignal(sigio->sio_proc, sig);
2234 PROC_UNLOCK(sigio->sio_proc);
2235 } else if (sigio->sio_pgid < 0) {
2236 struct proc *p;
2237
2238 PGRP_LOCK(sigio->sio_pgrp);
2239 LIST_FOREACH(p, &sigio->sio_pgrp->pg_members, p_pglist) {
2240 PROC_LOCK(p);
2241 if (CANSIGIO(sigio->sio_ucred, p->p_ucred) &&
2242 (checkctty == 0 || (p->p_flag & P_CONTROLT)))
2243 psignal(p, sig);
2244 PROC_UNLOCK(p);
2245 }
2246 PGRP_UNLOCK(sigio->sio_pgrp);
2247 }
2248 SIGIO_UNLOCK();
2249 }
2250
2251 static int
2252 filt_sigattach(struct knote *kn)
2253 {
2254 struct proc *p = curproc;
2255
2256 kn->kn_ptr.p_proc = p;
2257 kn->kn_flags |= EV_CLEAR; /* automatically set */
2258
2259 PROC_LOCK(p);
2260 SLIST_INSERT_HEAD(&p->p_klist, kn, kn_selnext);
2261 PROC_UNLOCK(p);
2262
2263 return (0);
2264 }
2265
2266 static void
2267 filt_sigdetach(struct knote *kn)
2268 {
2269 struct proc *p = kn->kn_ptr.p_proc;
2270
2271 PROC_LOCK(p);
2272 SLIST_REMOVE(&p->p_klist, kn, knote, kn_selnext);
2273 PROC_UNLOCK(p);
2274 }
2275
2276 /*
2277 * signal knotes are shared with proc knotes, so we apply a mask to
2278 * the hint in order to differentiate them from process hints. This
2279 * could be avoided by using a signal-specific knote list, but probably
2280 * isn't worth the trouble.
2281 */
2282 static int
2283 filt_signal(struct knote *kn, long hint)
2284 {
2285
2286 if (hint & NOTE_SIGNAL) {
2287 hint &= ~NOTE_SIGNAL;
2288
2289 if (kn->kn_id == hint)
2290 kn->kn_data++;
2291 }
2292 return (kn->kn_data != 0);
2293 }
Cache object: bc0ab861f7e20c37cbb926c75f08ff41
|