The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_sig.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*
    2  * Copyright (c) 1982, 1986, 1989, 1991, 1993
    3  *      The Regents of the University of California.  All rights reserved.
    4  * (c) UNIX System Laboratories, Inc.
    5  * All or some portions of this file are derived from material licensed
    6  * to the University of California by American Telephone and Telegraph
    7  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
    8  * the permission of UNIX System Laboratories, Inc.
    9  *
   10  * Redistribution and use in source and binary forms, with or without
   11  * modification, are permitted provided that the following conditions
   12  * are met:
   13  * 1. Redistributions of source code must retain the above copyright
   14  *    notice, this list of conditions and the following disclaimer.
   15  * 2. Redistributions in binary form must reproduce the above copyright
   16  *    notice, this list of conditions and the following disclaimer in the
   17  *    documentation and/or other materials provided with the distribution.
   18  * 3. Neither the name of the University nor the names of its contributors
   19  *    may be used to endorse or promote products derived from this software
   20  *    without specific prior written permission.
   21  *
   22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
   23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   32  * SUCH DAMAGE.
   33  *
   34  *      @(#)kern_sig.c  8.7 (Berkeley) 4/18/94
   35  * $FreeBSD: src/sys/kern/kern_sig.c,v 1.72.2.17 2003/05/16 16:34:34 obrien Exp $
   36  */
   37 
   38 #include "opt_ktrace.h"
   39 
   40 #include <sys/param.h>
   41 #include <sys/systm.h>
   42 #include <sys/kernel.h>
   43 #include <sys/sysproto.h>
   44 #include <sys/signalvar.h>
   45 #include <sys/resourcevar.h>
   46 #include <sys/vnode.h>
   47 #include <sys/event.h>
   48 #include <sys/proc.h>
   49 #include <sys/nlookup.h>
   50 #include <sys/pioctl.h>
   51 #include <sys/acct.h>
   52 #include <sys/fcntl.h>
   53 #include <sys/lock.h>
   54 #include <sys/wait.h>
   55 #include <sys/ktrace.h>
   56 #include <sys/syslog.h>
   57 #include <sys/stat.h>
   58 #include <sys/sysent.h>
   59 #include <sys/sysctl.h>
   60 #include <sys/malloc.h>
   61 #include <sys/interrupt.h>
   62 #include <sys/unistd.h>
   63 #include <sys/kern_syscall.h>
   64 #include <sys/vkernel.h>
   65 
   66 #include <sys/signal2.h>
   67 #include <sys/thread2.h>
   68 #include <sys/spinlock2.h>
   69 
   70 #include <machine/cpu.h>
   71 #include <machine/smp.h>
   72 
   73 static int      coredump(struct lwp *, int);
   74 static char     *expand_name(const char *, uid_t, pid_t);
   75 static int      dokillpg(int sig, int pgid, int all);
   76 static int      sig_ffs(sigset_t *set);
   77 static int      sigprop(int sig);
   78 static void     lwp_signotify(struct lwp *lp);
   79 static void     lwp_signotify_remote(void *arg);
   80 static int      kern_sigtimedwait(sigset_t set, siginfo_t *info,
   81                     struct timespec *timeout);
   82 
   83 static int      filt_sigattach(struct knote *kn);
   84 static void     filt_sigdetach(struct knote *kn);
   85 static int      filt_signal(struct knote *kn, long hint);
   86 
   87 struct filterops sig_filtops =
   88         { 0, filt_sigattach, filt_sigdetach, filt_signal };
   89 
   90 static int      kern_logsigexit = 1;
   91 SYSCTL_INT(_kern, KERN_LOGSIGEXIT, logsigexit, CTLFLAG_RW, 
   92     &kern_logsigexit, 0, 
   93     "Log processes quitting on abnormal signals to syslog(3)");
   94 
   95 /*
   96  * Can process p, with pcred pc, send the signal sig to process q?
   97  */
   98 #define CANSIGNAL(q, sig) \
   99         (!p_trespass(curproc->p_ucred, (q)->p_ucred) || \
  100         ((sig) == SIGCONT && (q)->p_session == curproc->p_session))
  101 
  102 /*
  103  * Policy -- Can real uid ruid with ucred uc send a signal to process q?
  104  */
  105 #define CANSIGIO(ruid, uc, q) \
  106         ((uc)->cr_uid == 0 || \
  107             (ruid) == (q)->p_ucred->cr_ruid || \
  108             (uc)->cr_uid == (q)->p_ucred->cr_ruid || \
  109             (ruid) == (q)->p_ucred->cr_uid || \
  110             (uc)->cr_uid == (q)->p_ucred->cr_uid)
  111 
  112 int sugid_coredump;
  113 SYSCTL_INT(_kern, OID_AUTO, sugid_coredump, CTLFLAG_RW, 
  114         &sugid_coredump, 0, "Enable coredumping set user/group ID processes");
  115 
  116 static int      do_coredump = 1;
  117 SYSCTL_INT(_kern, OID_AUTO, coredump, CTLFLAG_RW,
  118         &do_coredump, 0, "Enable/Disable coredumps");
  119 
  120 /*
  121  * Signal properties and actions.
  122  * The array below categorizes the signals and their default actions
  123  * according to the following properties:
  124  */
  125 #define SA_KILL         0x01            /* terminates process by default */
  126 #define SA_CORE         0x02            /* ditto and coredumps */
  127 #define SA_STOP         0x04            /* suspend process */
  128 #define SA_TTYSTOP      0x08            /* ditto, from tty */
  129 #define SA_IGNORE       0x10            /* ignore by default */
  130 #define SA_CONT         0x20            /* continue if suspended */
  131 #define SA_CANTMASK     0x40            /* non-maskable, catchable */
  132 #define SA_CKPT         0x80            /* checkpoint process */
  133 
  134 
  135 static int sigproptbl[NSIG] = {
  136         SA_KILL,                /* SIGHUP */
  137         SA_KILL,                /* SIGINT */
  138         SA_KILL|SA_CORE,        /* SIGQUIT */
  139         SA_KILL|SA_CORE,        /* SIGILL */
  140         SA_KILL|SA_CORE,        /* SIGTRAP */
  141         SA_KILL|SA_CORE,        /* SIGABRT */
  142         SA_KILL|SA_CORE,        /* SIGEMT */
  143         SA_KILL|SA_CORE,        /* SIGFPE */
  144         SA_KILL,                /* SIGKILL */
  145         SA_KILL|SA_CORE,        /* SIGBUS */
  146         SA_KILL|SA_CORE,        /* SIGSEGV */
  147         SA_KILL|SA_CORE,        /* SIGSYS */
  148         SA_KILL,                /* SIGPIPE */
  149         SA_KILL,                /* SIGALRM */
  150         SA_KILL,                /* SIGTERM */
  151         SA_IGNORE,              /* SIGURG */
  152         SA_STOP,                /* SIGSTOP */
  153         SA_STOP|SA_TTYSTOP,     /* SIGTSTP */
  154         SA_IGNORE|SA_CONT,      /* SIGCONT */
  155         SA_IGNORE,              /* SIGCHLD */
  156         SA_STOP|SA_TTYSTOP,     /* SIGTTIN */
  157         SA_STOP|SA_TTYSTOP,     /* SIGTTOU */
  158         SA_IGNORE,              /* SIGIO */
  159         SA_KILL,                /* SIGXCPU */
  160         SA_KILL,                /* SIGXFSZ */
  161         SA_KILL,                /* SIGVTALRM */
  162         SA_KILL,                /* SIGPROF */
  163         SA_IGNORE,              /* SIGWINCH  */
  164         SA_IGNORE,              /* SIGINFO */
  165         SA_KILL,                /* SIGUSR1 */
  166         SA_KILL,                /* SIGUSR2 */
  167         SA_IGNORE,              /* SIGTHR */
  168         SA_CKPT,                /* SIGCKPT */ 
  169         SA_KILL|SA_CKPT,        /* SIGCKPTEXIT */  
  170         SA_IGNORE,
  171         SA_IGNORE,
  172         SA_IGNORE,
  173         SA_IGNORE,
  174         SA_IGNORE,
  175         SA_IGNORE,
  176         SA_IGNORE,
  177         SA_IGNORE,
  178         SA_IGNORE,
  179         SA_IGNORE,
  180         SA_IGNORE,
  181         SA_IGNORE,
  182         SA_IGNORE,
  183         SA_IGNORE,
  184         SA_IGNORE,
  185         SA_IGNORE,
  186         SA_IGNORE,
  187         SA_IGNORE,
  188         SA_IGNORE,
  189         SA_IGNORE,
  190         SA_IGNORE,
  191         SA_IGNORE,
  192         SA_IGNORE,
  193         SA_IGNORE,
  194         SA_IGNORE,
  195         SA_IGNORE,
  196         SA_IGNORE,
  197         SA_IGNORE,
  198         SA_IGNORE,
  199         SA_IGNORE,
  200 
  201 };
  202 
  203 static __inline int
  204 sigprop(int sig)
  205 {
  206 
  207         if (sig > 0 && sig < NSIG)
  208                 return (sigproptbl[_SIG_IDX(sig)]);
  209         return (0);
  210 }
  211 
  212 static __inline int
  213 sig_ffs(sigset_t *set)
  214 {
  215         int i;
  216 
  217         for (i = 0; i < _SIG_WORDS; i++)
  218                 if (set->__bits[i])
  219                         return (ffs(set->__bits[i]) + (i * 32));
  220         return (0);
  221 }
  222 
  223 /* 
  224  * No requirements. 
  225  */
  226 int
  227 kern_sigaction(int sig, struct sigaction *act, struct sigaction *oact)
  228 {
  229         struct thread *td = curthread;
  230         struct proc *p = td->td_proc;
  231         struct lwp *lp;
  232         struct sigacts *ps = p->p_sigacts;
  233 
  234         if (sig <= 0 || sig > _SIG_MAXSIG)
  235                 return (EINVAL);
  236 
  237         lwkt_gettoken(&p->p_token);
  238 
  239         if (oact) {
  240                 oact->sa_handler = ps->ps_sigact[_SIG_IDX(sig)];
  241                 oact->sa_mask = ps->ps_catchmask[_SIG_IDX(sig)];
  242                 oact->sa_flags = 0;
  243                 if (SIGISMEMBER(ps->ps_sigonstack, sig))
  244                         oact->sa_flags |= SA_ONSTACK;
  245                 if (!SIGISMEMBER(ps->ps_sigintr, sig))
  246                         oact->sa_flags |= SA_RESTART;
  247                 if (SIGISMEMBER(ps->ps_sigreset, sig))
  248                         oact->sa_flags |= SA_RESETHAND;
  249                 if (SIGISMEMBER(ps->ps_signodefer, sig))
  250                         oact->sa_flags |= SA_NODEFER;
  251                 if (SIGISMEMBER(ps->ps_siginfo, sig))
  252                         oact->sa_flags |= SA_SIGINFO;
  253                 if (sig == SIGCHLD && p->p_sigacts->ps_flag & PS_NOCLDSTOP)
  254                         oact->sa_flags |= SA_NOCLDSTOP;
  255                 if (sig == SIGCHLD && p->p_sigacts->ps_flag & PS_NOCLDWAIT)
  256                         oact->sa_flags |= SA_NOCLDWAIT;
  257         }
  258         if (act) {
  259                 /*
  260                  * Check for invalid requests.  KILL and STOP cannot be
  261                  * caught.
  262                  */
  263                 if (sig == SIGKILL || sig == SIGSTOP) {
  264                         if (act->sa_handler != SIG_DFL) {
  265                                 lwkt_reltoken(&p->p_token);
  266                                 return (EINVAL);
  267                         }
  268                 }
  269 
  270                 /*
  271                  * Change setting atomically.
  272                  */
  273                 ps->ps_catchmask[_SIG_IDX(sig)] = act->sa_mask;
  274                 SIG_CANTMASK(ps->ps_catchmask[_SIG_IDX(sig)]);
  275                 if (act->sa_flags & SA_SIGINFO) {
  276                         ps->ps_sigact[_SIG_IDX(sig)] =
  277                             (__sighandler_t *)act->sa_sigaction;
  278                         SIGADDSET(ps->ps_siginfo, sig);
  279                 } else {
  280                         ps->ps_sigact[_SIG_IDX(sig)] = act->sa_handler;
  281                         SIGDELSET(ps->ps_siginfo, sig);
  282                 }
  283                 if (!(act->sa_flags & SA_RESTART))
  284                         SIGADDSET(ps->ps_sigintr, sig);
  285                 else
  286                         SIGDELSET(ps->ps_sigintr, sig);
  287                 if (act->sa_flags & SA_ONSTACK)
  288                         SIGADDSET(ps->ps_sigonstack, sig);
  289                 else
  290                         SIGDELSET(ps->ps_sigonstack, sig);
  291                 if (act->sa_flags & SA_RESETHAND)
  292                         SIGADDSET(ps->ps_sigreset, sig);
  293                 else
  294                         SIGDELSET(ps->ps_sigreset, sig);
  295                 if (act->sa_flags & SA_NODEFER)
  296                         SIGADDSET(ps->ps_signodefer, sig);
  297                 else
  298                         SIGDELSET(ps->ps_signodefer, sig);
  299                 if (sig == SIGCHLD) {
  300                         if (act->sa_flags & SA_NOCLDSTOP)
  301                                 p->p_sigacts->ps_flag |= PS_NOCLDSTOP;
  302                         else
  303                                 p->p_sigacts->ps_flag &= ~PS_NOCLDSTOP;
  304                         if (act->sa_flags & SA_NOCLDWAIT) {
  305                                 /*
  306                                  * Paranoia: since SA_NOCLDWAIT is implemented
  307                                  * by reparenting the dying child to PID 1 (and
  308                                  * trust it to reap the zombie), PID 1 itself
  309                                  * is forbidden to set SA_NOCLDWAIT.
  310                                  */
  311                                 if (p->p_pid == 1)
  312                                         p->p_sigacts->ps_flag &= ~PS_NOCLDWAIT;
  313                                 else
  314                                         p->p_sigacts->ps_flag |= PS_NOCLDWAIT;
  315                         } else {
  316                                 p->p_sigacts->ps_flag &= ~PS_NOCLDWAIT;
  317                         }
  318                         if (ps->ps_sigact[_SIG_IDX(SIGCHLD)] == SIG_IGN)
  319                                 ps->ps_flag |= PS_CLDSIGIGN;
  320                         else
  321                                 ps->ps_flag &= ~PS_CLDSIGIGN;
  322                 }
  323                 /*
  324                  * Set bit in p_sigignore for signals that are set to SIG_IGN,
  325                  * and for signals set to SIG_DFL where the default is to
  326                  * ignore. However, don't put SIGCONT in p_sigignore, as we
  327                  * have to restart the process.
  328                  *
  329                  * Also remove the signal from the process and lwp signal
  330                  * list.
  331                  */
  332                 if (ps->ps_sigact[_SIG_IDX(sig)] == SIG_IGN ||
  333                     (sigprop(sig) & SA_IGNORE &&
  334                      ps->ps_sigact[_SIG_IDX(sig)] == SIG_DFL)) {
  335                         SIGDELSET(p->p_siglist, sig);
  336                         FOREACH_LWP_IN_PROC(lp, p) {
  337                                 spin_lock(&lp->lwp_spin);
  338                                 SIGDELSET(lp->lwp_siglist, sig);
  339                                 spin_unlock(&lp->lwp_spin);
  340                         }
  341                         if (sig != SIGCONT) {
  342                                 /* easier in ksignal */
  343                                 SIGADDSET(p->p_sigignore, sig);
  344                         }
  345                         SIGDELSET(p->p_sigcatch, sig);
  346                 } else {
  347                         SIGDELSET(p->p_sigignore, sig);
  348                         if (ps->ps_sigact[_SIG_IDX(sig)] == SIG_DFL)
  349                                 SIGDELSET(p->p_sigcatch, sig);
  350                         else
  351                                 SIGADDSET(p->p_sigcatch, sig);
  352                 }
  353         }
  354         lwkt_reltoken(&p->p_token);
  355         return (0);
  356 }
  357 
  358 int
  359 sys_sigaction(struct sigaction_args *uap)
  360 {
  361         struct sigaction act, oact;
  362         struct sigaction *actp, *oactp;
  363         int error;
  364 
  365         actp = (uap->act != NULL) ? &act : NULL;
  366         oactp = (uap->oact != NULL) ? &oact : NULL;
  367         if (actp) {
  368                 error = copyin(uap->act, actp, sizeof(act));
  369                 if (error)
  370                         return (error);
  371         }
  372         error = kern_sigaction(uap->sig, actp, oactp);
  373         if (oactp && !error) {
  374                 error = copyout(oactp, uap->oact, sizeof(oact));
  375         }
  376         return (error);
  377 }
  378 
  379 /*
  380  * Initialize signal state for process 0;
  381  * set to ignore signals that are ignored by default.
  382  */
  383 void
  384 siginit(struct proc *p)
  385 {
  386         int i;
  387 
  388         for (i = 1; i <= NSIG; i++)
  389                 if (sigprop(i) & SA_IGNORE && i != SIGCONT)
  390                         SIGADDSET(p->p_sigignore, i);
  391 }
  392 
  393 /*
  394  * Reset signals for an exec of the specified process.
  395  */
  396 void
  397 execsigs(struct proc *p)
  398 {
  399         struct sigacts *ps = p->p_sigacts;
  400         struct lwp *lp;
  401         int sig;
  402 
  403         lp = ONLY_LWP_IN_PROC(p);
  404 
  405         /*
  406          * Reset caught signals.  Held signals remain held
  407          * through p_sigmask (unless they were caught,
  408          * and are now ignored by default).
  409          */
  410         while (SIGNOTEMPTY(p->p_sigcatch)) {
  411                 sig = sig_ffs(&p->p_sigcatch);
  412                 SIGDELSET(p->p_sigcatch, sig);
  413                 if (sigprop(sig) & SA_IGNORE) {
  414                         if (sig != SIGCONT)
  415                                 SIGADDSET(p->p_sigignore, sig);
  416                         SIGDELSET(p->p_siglist, sig);
  417                         /* don't need spinlock */
  418                         SIGDELSET(lp->lwp_siglist, sig);
  419                 }
  420                 ps->ps_sigact[_SIG_IDX(sig)] = SIG_DFL;
  421         }
  422 
  423         /*
  424          * Reset stack state to the user stack.
  425          * Clear set of signals caught on the signal stack.
  426          */
  427         lp->lwp_sigstk.ss_flags = SS_DISABLE;
  428         lp->lwp_sigstk.ss_size = 0;
  429         lp->lwp_sigstk.ss_sp = NULL;
  430         lp->lwp_flags &= ~LWP_ALTSTACK;
  431         /*
  432          * Reset no zombies if child dies flag as Solaris does.
  433          */
  434         p->p_sigacts->ps_flag &= ~(PS_NOCLDWAIT | PS_CLDSIGIGN);
  435         if (ps->ps_sigact[_SIG_IDX(SIGCHLD)] == SIG_IGN)
  436                 ps->ps_sigact[_SIG_IDX(SIGCHLD)] = SIG_DFL;
  437 }
  438 
  439 /*
  440  * kern_sigprocmask() - MP SAFE ONLY IF p == curproc
  441  *
  442  *      Manipulate signal mask.  This routine is MP SAFE *ONLY* if
  443  *      p == curproc.
  444  */
  445 int
  446 kern_sigprocmask(int how, sigset_t *set, sigset_t *oset)
  447 {
  448         struct thread *td = curthread;
  449         struct lwp *lp = td->td_lwp;
  450         struct proc *p = td->td_proc;
  451         int error;
  452 
  453         lwkt_gettoken(&p->p_token);
  454 
  455         if (oset != NULL)
  456                 *oset = lp->lwp_sigmask;
  457 
  458         error = 0;
  459         if (set != NULL) {
  460                 switch (how) {
  461                 case SIG_BLOCK:
  462                         SIG_CANTMASK(*set);
  463                         SIGSETOR(lp->lwp_sigmask, *set);
  464                         break;
  465                 case SIG_UNBLOCK:
  466                         SIGSETNAND(lp->lwp_sigmask, *set);
  467                         break;
  468                 case SIG_SETMASK:
  469                         SIG_CANTMASK(*set);
  470                         lp->lwp_sigmask = *set;
  471                         break;
  472                 default:
  473                         error = EINVAL;
  474                         break;
  475                 }
  476         }
  477 
  478         lwkt_reltoken(&p->p_token);
  479 
  480         return (error);
  481 }
  482 
  483 /*
  484  * sigprocmask()
  485  *
  486  * MPSAFE
  487  */
  488 int
  489 sys_sigprocmask(struct sigprocmask_args *uap)
  490 {
  491         sigset_t set, oset;
  492         sigset_t *setp, *osetp;
  493         int error;
  494 
  495         setp = (uap->set != NULL) ? &set : NULL;
  496         osetp = (uap->oset != NULL) ? &oset : NULL;
  497         if (setp) {
  498                 error = copyin(uap->set, setp, sizeof(set));
  499                 if (error)
  500                         return (error);
  501         }
  502         error = kern_sigprocmask(uap->how, setp, osetp);
  503         if (osetp && !error) {
  504                 error = copyout(osetp, uap->oset, sizeof(oset));
  505         }
  506         return (error);
  507 }
  508 
  509 /*
  510  * MPSAFE
  511  */
  512 int
  513 kern_sigpending(struct __sigset *set)
  514 {
  515         struct lwp *lp = curthread->td_lwp;
  516 
  517         *set = lwp_sigpend(lp);
  518 
  519         return (0);
  520 }
  521 
  522 /*
  523  * MPSAFE
  524  */
  525 int
  526 sys_sigpending(struct sigpending_args *uap)
  527 {
  528         sigset_t set;
  529         int error;
  530 
  531         error = kern_sigpending(&set);
  532 
  533         if (error == 0)
  534                 error = copyout(&set, uap->set, sizeof(set));
  535         return (error);
  536 }
  537 
  538 /*
  539  * Suspend process until signal, providing mask to be set
  540  * in the meantime.
  541  *
  542  * MPSAFE
  543  */
  544 int
  545 kern_sigsuspend(struct __sigset *set)
  546 {
  547         struct thread *td = curthread;
  548         struct lwp *lp = td->td_lwp;
  549         struct proc *p = td->td_proc;
  550         struct sigacts *ps = p->p_sigacts;
  551 
  552         /*
  553          * When returning from sigsuspend, we want
  554          * the old mask to be restored after the
  555          * signal handler has finished.  Thus, we
  556          * save it here and mark the sigacts structure
  557          * to indicate this.
  558          */
  559         lp->lwp_oldsigmask = lp->lwp_sigmask;
  560         lp->lwp_flags |= LWP_OLDMASK;
  561 
  562         SIG_CANTMASK(*set);
  563         lp->lwp_sigmask = *set;
  564         while (tsleep(ps, PCATCH, "pause", 0) == 0)
  565                 /* void */;
  566         /* always return EINTR rather than ERESTART... */
  567         return (EINTR);
  568 }
  569 
  570 /*
  571  * Note nonstandard calling convention: libc stub passes mask, not
  572  * pointer, to save a copyin.
  573  *
  574  * MPSAFE
  575  */
  576 int
  577 sys_sigsuspend(struct sigsuspend_args *uap)
  578 {
  579         sigset_t mask;
  580         int error;
  581 
  582         error = copyin(uap->sigmask, &mask, sizeof(mask));
  583         if (error)
  584                 return (error);
  585 
  586         error = kern_sigsuspend(&mask);
  587 
  588         return (error);
  589 }
  590 
  591 /*
  592  * MPSAFE
  593  */
  594 int
  595 kern_sigaltstack(struct sigaltstack *ss, struct sigaltstack *oss)
  596 {
  597         struct thread *td = curthread;
  598         struct lwp *lp = td->td_lwp;
  599         struct proc *p = td->td_proc;
  600 
  601         if ((lp->lwp_flags & LWP_ALTSTACK) == 0)
  602                 lp->lwp_sigstk.ss_flags |= SS_DISABLE;
  603 
  604         if (oss)
  605                 *oss = lp->lwp_sigstk;
  606 
  607         if (ss) {
  608                 if (ss->ss_flags & ~SS_DISABLE)
  609                         return (EINVAL);
  610                 if (ss->ss_flags & SS_DISABLE) {
  611                         if (lp->lwp_sigstk.ss_flags & SS_ONSTACK)
  612                                 return (EINVAL);
  613                         lp->lwp_flags &= ~LWP_ALTSTACK;
  614                         lp->lwp_sigstk.ss_flags = ss->ss_flags;
  615                 } else {
  616                         if (ss->ss_size < p->p_sysent->sv_minsigstksz)
  617                                 return (ENOMEM);
  618                         lp->lwp_flags |= LWP_ALTSTACK;
  619                         lp->lwp_sigstk = *ss;
  620                 }
  621         }
  622 
  623         return (0);
  624 }
  625 
  626 /*
  627  * MPSAFE
  628  */
  629 int
  630 sys_sigaltstack(struct sigaltstack_args *uap)
  631 {
  632         stack_t ss, oss;
  633         int error;
  634 
  635         if (uap->ss) {
  636                 error = copyin(uap->ss, &ss, sizeof(ss));
  637                 if (error)
  638                         return (error);
  639         }
  640 
  641         error = kern_sigaltstack(uap->ss ? &ss : NULL,
  642             uap->oss ? &oss : NULL);
  643 
  644         if (error == 0 && uap->oss)
  645                 error = copyout(&oss, uap->oss, sizeof(*uap->oss));
  646         return (error);
  647 }
  648 
  649 /*
  650  * Common code for kill process group/broadcast kill.
  651  * cp is calling process.
  652  */
  653 struct killpg_info {
  654         int nfound;
  655         int sig;
  656 };
  657 
  658 static int killpg_all_callback(struct proc *p, void *data);
  659 
  660 static int
  661 dokillpg(int sig, int pgid, int all)
  662 {
  663         struct killpg_info info;
  664         struct proc *cp = curproc;
  665         struct proc *p;
  666         struct pgrp *pgrp;
  667 
  668         info.nfound = 0;
  669         info.sig = sig;
  670 
  671         if (all) {
  672                 /*
  673                  * broadcast
  674                  */
  675                 allproc_scan(killpg_all_callback, &info);
  676         } else {
  677                 if (pgid == 0) {
  678                         /*
  679                          * zero pgid means send to my process group.
  680                          */
  681                         pgrp = cp->p_pgrp;
  682                         pgref(pgrp);
  683                 } else {
  684                         pgrp = pgfind(pgid);
  685                         if (pgrp == NULL)
  686                                 return (ESRCH);
  687                 }
  688 
  689                 /*
  690                  * Must interlock all signals against fork
  691                  */
  692                 lockmgr(&pgrp->pg_lock, LK_EXCLUSIVE);
  693                 LIST_FOREACH(p, &pgrp->pg_members, p_pglist) {
  694                         if (p->p_pid <= 1 || 
  695                             p->p_stat == SZOMB ||
  696                             (p->p_flags & P_SYSTEM) ||
  697                             !CANSIGNAL(p, sig)) {
  698                                 continue;
  699                         }
  700                         ++info.nfound;
  701                         if (sig)
  702                                 ksignal(p, sig);
  703                 }
  704                 lockmgr(&pgrp->pg_lock, LK_RELEASE);
  705                 pgrel(pgrp);
  706         }
  707         return (info.nfound ? 0 : ESRCH);
  708 }
  709 
  710 static int
  711 killpg_all_callback(struct proc *p, void *data)
  712 {
  713         struct killpg_info *info = data;
  714 
  715         if (p->p_pid <= 1 || (p->p_flags & P_SYSTEM) ||
  716             p == curproc || !CANSIGNAL(p, info->sig)) {
  717                 return (0);
  718         }
  719         ++info->nfound;
  720         if (info->sig)
  721                 ksignal(p, info->sig);
  722         return(0);
  723 }
  724 
  725 /*
  726  * Send a general signal to a process or LWPs within that process.
  727  *
  728  * Note that new signals cannot be sent if a process is exiting or already
  729  * a zombie, but we return success anyway as userland is likely to not handle
  730  * the race properly.
  731  * 
  732  * No requirements.
  733  */
  734 int
  735 kern_kill(int sig, pid_t pid, lwpid_t tid)
  736 {
  737         int t;
  738 
  739         if ((u_int)sig > _SIG_MAXSIG)
  740                 return (EINVAL);
  741 
  742         if (pid > 0) {
  743                 struct proc *p;
  744                 struct lwp *lp = NULL;
  745 
  746                 /*
  747                  * Send a signal to a single process.  If the kill() is
  748                  * racing an exiting process which has not yet been reaped
  749                  * act as though the signal was delivered successfully but
  750                  * don't actually try to deliver the signal.
  751                  */
  752                 if ((p = pfind(pid)) == NULL) {
  753                         if ((p = zpfind(pid)) == NULL)
  754                                 return (ESRCH);
  755                         PRELE(p);
  756                         return (0);
  757                 }
  758                 lwkt_gettoken(&p->p_token);
  759                 if (!CANSIGNAL(p, sig)) {
  760                         lwkt_reltoken(&p->p_token);
  761                         PRELE(p);
  762                         return (EPERM);
  763                 }
  764 
  765                 /*
  766                  * NOP if the process is exiting.  Note that lwpsignal() is
  767                  * called directly with P_WEXIT set to kill individual LWPs
  768                  * during exit, which is allowed.
  769                  */
  770                 if (p->p_flags & P_WEXIT) {
  771                         lwkt_reltoken(&p->p_token);
  772                         PRELE(p);
  773                         return (0);
  774                 }
  775                 if (tid != -1) {
  776                         lp = lwp_rb_tree_RB_LOOKUP(&p->p_lwp_tree, tid);
  777                         if (lp == NULL) {
  778                                 lwkt_reltoken(&p->p_token);
  779                                 PRELE(p);
  780                                 return (ESRCH);
  781                         }
  782                 }
  783                 if (sig)
  784                         lwpsignal(p, lp, sig);
  785                 lwkt_reltoken(&p->p_token);
  786                 PRELE(p);
  787 
  788                 return (0);
  789         }
  790 
  791         /*
  792          * If we come here, pid is a special broadcast pid.
  793          * This doesn't mix with a tid.
  794          */
  795         if (tid != -1)
  796                 return (EINVAL);
  797 
  798         switch (pid) {
  799         case -1:                /* broadcast signal */
  800                 t = (dokillpg(sig, 0, 1));
  801                 break;
  802         case 0:                 /* signal own process group */
  803                 t = (dokillpg(sig, 0, 0));
  804                 break;
  805         default:                /* negative explicit process group */
  806                 t = (dokillpg(sig, -pid, 0));
  807                 break;
  808         }
  809         return t;
  810 }
  811 
  812 int
  813 sys_kill(struct kill_args *uap)
  814 {
  815         int error;
  816 
  817         error = kern_kill(uap->signum, uap->pid, -1);
  818         return (error);
  819 }
  820 
  821 int
  822 sys_lwp_kill(struct lwp_kill_args *uap)
  823 {
  824         int error;
  825         pid_t pid = uap->pid;
  826 
  827         /*
  828          * A tid is mandatory for lwp_kill(), otherwise
  829          * you could simply use kill().
  830          */
  831         if (uap->tid == -1)
  832                 return (EINVAL);
  833 
  834         /*
  835          * To save on a getpid() function call for intra-process
  836          * signals, pid == -1 means current process.
  837          */
  838         if (pid == -1)
  839                 pid = curproc->p_pid;
  840 
  841         error = kern_kill(uap->signum, pid, uap->tid);
  842         return (error);
  843 }
  844 
  845 /*
  846  * Send a signal to a process group.
  847  */
  848 void
  849 gsignal(int pgid, int sig)
  850 {
  851         struct pgrp *pgrp;
  852 
  853         if (pgid && (pgrp = pgfind(pgid)))
  854                 pgsignal(pgrp, sig, 0);
  855 }
  856 
  857 /*
  858  * Send a signal to a process group.  If checktty is 1,
  859  * limit to members which have a controlling terminal.
  860  *
  861  * pg_lock interlocks against a fork that might be in progress, to
  862  * ensure that the new child process picks up the signal.
  863  */
  864 void
  865 pgsignal(struct pgrp *pgrp, int sig, int checkctty)
  866 {
  867         struct proc *p;
  868 
  869         /*
  870          * Must interlock all signals against fork
  871          */
  872         if (pgrp) {
  873                 pgref(pgrp);
  874                 lockmgr(&pgrp->pg_lock, LK_EXCLUSIVE);
  875                 LIST_FOREACH(p, &pgrp->pg_members, p_pglist) {
  876                         if (checkctty == 0 || p->p_flags & P_CONTROLT)
  877                                 ksignal(p, sig);
  878                 }
  879                 lockmgr(&pgrp->pg_lock, LK_RELEASE);
  880                 pgrel(pgrp);
  881         }
  882 }
  883 
  884 /*
  885  * Send a signal caused by a trap to the current lwp.  If it will be caught
  886  * immediately, deliver it with correct code.  Otherwise, post it normally.
  887  *
  888  * These signals may ONLY be delivered to the specified lwp and may never
  889  * be delivered to the process generically.
  890  */
  891 void
  892 trapsignal(struct lwp *lp, int sig, u_long code)
  893 {
  894         struct proc *p = lp->lwp_proc;
  895         struct sigacts *ps = p->p_sigacts;
  896 
  897         /*
  898          * If we are a virtual kernel running an emulated user process
  899          * context, switch back to the virtual kernel context before
  900          * trying to post the signal.
  901          */
  902         if (lp->lwp_vkernel && lp->lwp_vkernel->ve) {
  903                 struct trapframe *tf = lp->lwp_md.md_regs;
  904                 tf->tf_trapno = 0;
  905                 vkernel_trap(lp, tf);
  906         }
  907 
  908 
  909         if ((p->p_flags & P_TRACED) == 0 && SIGISMEMBER(p->p_sigcatch, sig) &&
  910             !SIGISMEMBER(lp->lwp_sigmask, sig)) {
  911                 lp->lwp_ru.ru_nsignals++;
  912 #ifdef KTRACE
  913                 if (KTRPOINT(lp->lwp_thread, KTR_PSIG))
  914                         ktrpsig(lp, sig, ps->ps_sigact[_SIG_IDX(sig)],
  915                                 &lp->lwp_sigmask, code);
  916 #endif
  917                 (*p->p_sysent->sv_sendsig)(ps->ps_sigact[_SIG_IDX(sig)], sig,
  918                                                 &lp->lwp_sigmask, code);
  919                 SIGSETOR(lp->lwp_sigmask, ps->ps_catchmask[_SIG_IDX(sig)]);
  920                 if (!SIGISMEMBER(ps->ps_signodefer, sig))
  921                         SIGADDSET(lp->lwp_sigmask, sig);
  922                 if (SIGISMEMBER(ps->ps_sigreset, sig)) {
  923                         /*
  924                          * See kern_sigaction() for origin of this code.
  925                          */
  926                         SIGDELSET(p->p_sigcatch, sig);
  927                         if (sig != SIGCONT &&
  928                             sigprop(sig) & SA_IGNORE)
  929                                 SIGADDSET(p->p_sigignore, sig);
  930                         ps->ps_sigact[_SIG_IDX(sig)] = SIG_DFL;
  931                 }
  932         } else {
  933                 lp->lwp_code = code;    /* XXX for core dump/debugger */
  934                 lp->lwp_sig = sig;      /* XXX to verify code */
  935                 lwpsignal(p, lp, sig);
  936         }
  937 }
  938 
  939 /*
  940  * Find a suitable lwp to deliver the signal to.  Returns NULL if all
  941  * lwps hold the signal blocked.
  942  *
  943  * Caller must hold p->p_token.
  944  *
  945  * Returns a lp or NULL.  If non-NULL the lp is held and its token is
  946  * acquired.
  947  */
  948 static struct lwp *
  949 find_lwp_for_signal(struct proc *p, int sig)
  950 {
  951         struct lwp *lp;
  952         struct lwp *run, *sleep, *stop;
  953 
  954         /*
  955          * If the running/preempted thread belongs to the proc to which
  956          * the signal is being delivered and this thread does not block
  957          * the signal, then we can avoid a context switch by delivering
  958          * the signal to this thread, because it will return to userland
  959          * soon anyways.
  960          */
  961         lp = lwkt_preempted_proc();
  962         if (lp != NULL && lp->lwp_proc == p) {
  963                 LWPHOLD(lp);
  964                 lwkt_gettoken(&lp->lwp_token);
  965                 if (!SIGISMEMBER(lp->lwp_sigmask, sig)) {
  966                         /* return w/ token held */
  967                         return (lp);
  968                 }
  969                 lwkt_reltoken(&lp->lwp_token);
  970                 LWPRELE(lp);
  971         }
  972 
  973         run = sleep = stop = NULL;
  974         FOREACH_LWP_IN_PROC(lp, p) {
  975                 /*
  976                  * If the signal is being blocked by the lwp, then this
  977                  * lwp is not eligible for receiving the signal.
  978                  */
  979                 LWPHOLD(lp);
  980                 lwkt_gettoken(&lp->lwp_token);
  981 
  982                 if (SIGISMEMBER(lp->lwp_sigmask, sig)) {
  983                         lwkt_reltoken(&lp->lwp_token);
  984                         LWPRELE(lp);
  985                         continue;
  986                 }
  987 
  988                 switch (lp->lwp_stat) {
  989                 case LSRUN:
  990                         if (sleep) {
  991                                 lwkt_token_swap();
  992                                 lwkt_reltoken(&sleep->lwp_token);
  993                                 LWPRELE(sleep);
  994                                 sleep = NULL;
  995                                 run = lp;
  996                         } else if (stop) {
  997                                 lwkt_token_swap();
  998                                 lwkt_reltoken(&stop->lwp_token);
  999                                 LWPRELE(stop);
 1000                                 stop = NULL;
 1001                                 run = lp;
 1002                         } else {
 1003                                 run = lp;
 1004                         }
 1005                         break;
 1006                 case LSSLEEP:
 1007                         if (lp->lwp_flags & LWP_SINTR) {
 1008                                 if (sleep) {
 1009                                         lwkt_reltoken(&lp->lwp_token);
 1010                                         LWPRELE(lp);
 1011                                 } else if (stop) {
 1012                                         lwkt_token_swap();
 1013                                         lwkt_reltoken(&stop->lwp_token);
 1014                                         LWPRELE(stop);
 1015                                         stop = NULL;
 1016                                         sleep = lp;
 1017                                 } else {
 1018                                         sleep = lp;
 1019                                 }
 1020                         } else {
 1021                                 lwkt_reltoken(&lp->lwp_token);
 1022                                 LWPRELE(lp);
 1023                         }
 1024                         break;
 1025                 case LSSTOP:
 1026                         if (sleep) {
 1027                                 lwkt_reltoken(&lp->lwp_token);
 1028                                 LWPRELE(lp);
 1029                         } else if (stop) {
 1030                                 lwkt_reltoken(&lp->lwp_token);
 1031                                 LWPRELE(lp);
 1032                         } else {
 1033                                 stop = lp;
 1034                         }
 1035                         break;
 1036                 }
 1037                 if (run)
 1038                         break;
 1039         }
 1040 
 1041         if (run != NULL)
 1042                 return (run);
 1043         else if (sleep != NULL)
 1044                 return (sleep);
 1045         else
 1046                 return (stop);
 1047 }
 1048 
 1049 /*
 1050  * Send the signal to the process.  If the signal has an action, the action
 1051  * is usually performed by the target process rather than the caller; we add
 1052  * the signal to the set of pending signals for the process.
 1053  *
 1054  * Exceptions:
 1055  *   o When a stop signal is sent to a sleeping process that takes the
 1056  *     default action, the process is stopped without awakening it.
 1057  *   o SIGCONT restarts stopped processes (or puts them back to sleep)
 1058  *     regardless of the signal action (eg, blocked or ignored).
 1059  *
 1060  * Other ignored signals are discarded immediately.
 1061  *
 1062  * If the caller wishes to call this function from a hard code section the
 1063  * caller must already hold p->p_token (see kern_clock.c).
 1064  *
 1065  * No requirements.
 1066  */
 1067 void
 1068 ksignal(struct proc *p, int sig)
 1069 {
 1070         lwpsignal(p, NULL, sig);
 1071 }
 1072 
 1073 /*
 1074  * The core for ksignal.  lp may be NULL, then a suitable thread
 1075  * will be chosen.  If not, lp MUST be a member of p.
 1076  *
 1077  * If the caller wishes to call this function from a hard code section the
 1078  * caller must already hold p->p_token.
 1079  *
 1080  * No requirements.
 1081  */
 1082 void
 1083 lwpsignal(struct proc *p, struct lwp *lp, int sig)
 1084 {
 1085         struct proc *q;
 1086         sig_t action;
 1087         int prop;
 1088 
 1089         if (sig > _SIG_MAXSIG || sig <= 0) {
 1090                 kprintf("lwpsignal: signal %d\n", sig);
 1091                 panic("lwpsignal signal number");
 1092         }
 1093 
 1094         KKASSERT(lp == NULL || lp->lwp_proc == p);
 1095 
 1096         /*
 1097          * We don't want to race... well, all sorts of things.  Get appropriate
 1098          * tokens.
 1099          *
 1100          * Don't try to deliver a generic signal to an exiting process,
 1101          * the signal structures could be in flux.  We check the LWP later
 1102          * on.
 1103          */
 1104         PHOLD(p);
 1105         lwkt_gettoken(&p->p_token);
 1106         if (lp) {
 1107                 LWPHOLD(lp);
 1108                 lwkt_gettoken(&lp->lwp_token);
 1109         } else if (p->p_flags & P_WEXIT) {
 1110                 goto out;
 1111         }
 1112 
 1113         prop = sigprop(sig);
 1114 
 1115         /*
 1116          * If proc is traced, always give parent a chance;
 1117          * if signal event is tracked by procfs, give *that*
 1118          * a chance, as well.
 1119          */
 1120         if ((p->p_flags & P_TRACED) || (p->p_stops & S_SIG)) {
 1121                 action = SIG_DFL;
 1122         } else {
 1123                 /*
 1124                  * Do not try to deliver signals to an exiting lwp.  Note
 1125                  * that we must still deliver the signal if P_WEXIT is set
 1126                  * in the process flags.
 1127                  */
 1128                 if (lp && (lp->lwp_mpflags & LWP_MP_WEXIT)) {
 1129                         if (lp) {
 1130                                 lwkt_reltoken(&lp->lwp_token);
 1131                                 LWPRELE(lp);
 1132                         }
 1133                         lwkt_reltoken(&p->p_token);
 1134                         PRELE(p);
 1135                         return;
 1136                 }
 1137 
 1138                 /*
 1139                  * If the signal is being ignored, then we forget about
 1140                  * it immediately.  NOTE: We don't set SIGCONT in p_sigignore,
 1141                  * and if it is set to SIG_IGN, action will be SIG_DFL here.
 1142                  */
 1143                 if (SIGISMEMBER(p->p_sigignore, sig)) {
 1144                         /*
 1145                          * Even if a signal is set SIG_IGN, it may still be
 1146                          * lurking in a kqueue.
 1147                          */
 1148                         KNOTE(&p->p_klist, NOTE_SIGNAL | sig);
 1149                         if (lp) {
 1150                                 lwkt_reltoken(&lp->lwp_token);
 1151                                 LWPRELE(lp);
 1152                         }
 1153                         lwkt_reltoken(&p->p_token);
 1154                         PRELE(p);
 1155                         return;
 1156                 }
 1157                 if (SIGISMEMBER(p->p_sigcatch, sig))
 1158                         action = SIG_CATCH;
 1159                 else
 1160                         action = SIG_DFL;
 1161         }
 1162 
 1163         /*
 1164          * If continuing, clear any pending STOP signals.
 1165          */
 1166         if (prop & SA_CONT)
 1167                 SIG_STOPSIGMASK(p->p_siglist);
 1168         
 1169         if (prop & SA_STOP) {
 1170                 /*
 1171                  * If sending a tty stop signal to a member of an orphaned
 1172                  * process group, discard the signal here if the action
 1173                  * is default; don't stop the process below if sleeping,
 1174                  * and don't clear any pending SIGCONT.
 1175                  */
 1176                 if (prop & SA_TTYSTOP && p->p_pgrp->pg_jobc == 0 &&
 1177                     action == SIG_DFL) {
 1178                         if (lp) {
 1179                                 lwkt_reltoken(&lp->lwp_token);
 1180                                 LWPRELE(lp);
 1181                         }
 1182                         lwkt_reltoken(&p->p_token);
 1183                         PRELE(p);
 1184                         return;
 1185                 }
 1186                 SIG_CONTSIGMASK(p->p_siglist);
 1187                 p->p_flags &= ~P_CONTINUED;
 1188         }
 1189 
 1190         if (p->p_stat == SSTOP) {
 1191                 /*
 1192                  * Nobody can handle this signal, add it to the lwp or
 1193                  * process pending list 
 1194                  */
 1195                 if (lp) {
 1196                         spin_lock(&lp->lwp_spin);
 1197                         SIGADDSET(lp->lwp_siglist, sig);
 1198                         spin_unlock(&lp->lwp_spin);
 1199                 } else {
 1200                         SIGADDSET(p->p_siglist, sig);
 1201                 }
 1202 
 1203                 /*
 1204                  * If the process is stopped and is being traced, then no
 1205                  * further action is necessary.
 1206                  */
 1207                 if (p->p_flags & P_TRACED)
 1208                         goto out;
 1209 
 1210                 /*
 1211                  * If the process is stopped and receives a KILL signal,
 1212                  * make the process runnable.
 1213                  */
 1214                 if (sig == SIGKILL) {
 1215                         proc_unstop(p);
 1216                         goto active_process;
 1217                 }
 1218 
 1219                 /*
 1220                  * If the process is stopped and receives a CONT signal,
 1221                  * then try to make the process runnable again.
 1222                  */
 1223                 if (prop & SA_CONT) {
 1224                         /*
 1225                          * If SIGCONT is default (or ignored), we continue the
 1226                          * process but don't leave the signal in p_siglist, as
 1227                          * it has no further action.  If SIGCONT is held, we
 1228                          * continue the process and leave the signal in
 1229                          * p_siglist.  If the process catches SIGCONT, let it
 1230                          * handle the signal itself.
 1231                          *
 1232                          * XXX what if the signal is being held blocked?
 1233                          *
 1234                          * Token required to interlock kern_wait().
 1235                          * Reparenting can also cause a race so we have to
 1236                          * hold (q).
 1237                          */
 1238                         q = p->p_pptr;
 1239                         PHOLD(q);
 1240                         lwkt_gettoken(&q->p_token);
 1241                         p->p_flags |= P_CONTINUED;
 1242                         wakeup(q);
 1243                         if (action == SIG_DFL)
 1244                                 SIGDELSET(p->p_siglist, sig);
 1245                         proc_unstop(p);
 1246                         lwkt_reltoken(&q->p_token);
 1247                         PRELE(q);
 1248                         if (action == SIG_CATCH)
 1249                                 goto active_process;
 1250                         goto out;
 1251                 }
 1252 
 1253                 /*
 1254                  * If the process is stopped and receives another STOP
 1255                  * signal, we do not need to stop it again.  If we did
 1256                  * the shell could get confused.
 1257                  *
 1258                  * However, if the current/preempted lwp is part of the
 1259                  * process receiving the signal, we need to keep it,
 1260                  * so that this lwp can stop in issignal() later, as
 1261                  * we don't want to wait until it reaches userret!
 1262                  */
 1263                 if (prop & SA_STOP) {
 1264                         if (lwkt_preempted_proc() == NULL ||
 1265                             lwkt_preempted_proc()->lwp_proc != p)
 1266                                 SIGDELSET(p->p_siglist, sig);
 1267                 }
 1268 
 1269                 /*
 1270                  * Otherwise the process is stopped and it received some
 1271                  * signal, which does not change its stopped state.  When
 1272                  * the process is continued a wakeup(p) will be issued which
 1273                  * will wakeup any threads sleeping in tstop().
 1274                  */
 1275                 if (lp == NULL) {
 1276                         /* NOTE: returns lp w/ token held */
 1277                         lp = find_lwp_for_signal(p, sig);
 1278                 }
 1279                 goto out;
 1280 
 1281                 /* NOTREACHED */
 1282         }
 1283         /* else not stopped */
 1284 active_process:
 1285 
 1286         /*
 1287          * Never deliver a lwp-specific signal to a random lwp.
 1288          */
 1289         if (lp == NULL) {
 1290                 /* NOTE: returns lp w/ token held */
 1291                 lp = find_lwp_for_signal(p, sig);
 1292                 if (lp) {
 1293                         if (SIGISMEMBER(lp->lwp_sigmask, sig)) {
 1294                                 lwkt_reltoken(&lp->lwp_token);
 1295                                 LWPRELE(lp);
 1296                                 lp = NULL;
 1297                         }
 1298                 }
 1299         }
 1300 
 1301         /*
 1302          * Deliver to the process generically if (1) the signal is being
 1303          * sent to any thread or (2) we could not find a thread to deliver
 1304          * it to.
 1305          */
 1306         if (lp == NULL) {
 1307                 SIGADDSET(p->p_siglist, sig);
 1308                 goto out;
 1309         }
 1310 
 1311         /*
 1312          * Deliver to a specific LWP whether it masks it or not.  It will
 1313          * not be dispatched if masked but we must still deliver it.
 1314          */
 1315         if (p->p_nice > NZERO && action == SIG_DFL && (prop & SA_KILL) &&
 1316             (p->p_flags & P_TRACED) == 0) {
 1317                 p->p_nice = NZERO;
 1318         }
 1319 
 1320         /*
 1321          * If the process receives a STOP signal which indeed needs to
 1322          * stop the process, do so.  If the process chose to catch the
 1323          * signal, it will be treated like any other signal.
 1324          */
 1325         if ((prop & SA_STOP) && action == SIG_DFL) {
 1326                 /*
 1327                  * If a child holding parent blocked, stopping
 1328                  * could cause deadlock.  Take no action at this
 1329                  * time.
 1330                  */
 1331                 if (p->p_flags & P_PPWAIT) {
 1332                         SIGADDSET(p->p_siglist, sig);
 1333                         goto out;
 1334                 }
 1335 
 1336                 /*
 1337                  * Do not actually try to manipulate the process, but simply
 1338                  * stop it.  Lwps will stop as soon as they safely can.
 1339                  *
 1340                  * Ignore stop if the process is exiting.
 1341                  */
 1342                 if ((p->p_flags & P_WEXIT) == 0) {
 1343                         p->p_xstat = sig;
 1344                         proc_stop(p);
 1345                 }
 1346                 goto out;
 1347         }
 1348 
 1349         /*
 1350          * If it is a CONT signal with default action, just ignore it.
 1351          */
 1352         if ((prop & SA_CONT) && action == SIG_DFL)
 1353                 goto out;
 1354 
 1355         /*
 1356          * Mark signal pending at this specific thread.
 1357          */
 1358         spin_lock(&lp->lwp_spin);
 1359         SIGADDSET(lp->lwp_siglist, sig);
 1360         spin_unlock(&lp->lwp_spin);
 1361 
 1362         lwp_signotify(lp);
 1363 
 1364 out:
 1365         if (lp) {
 1366                 lwkt_reltoken(&lp->lwp_token);
 1367                 LWPRELE(lp);
 1368         }
 1369         lwkt_reltoken(&p->p_token);
 1370         PRELE(p);
 1371 }
 1372 
 1373 /*
 1374  * Notify the LWP that a signal has arrived.  The LWP does not have to be
 1375  * sleeping on the current cpu.
 1376  *
 1377  * p->p_token and lp->lwp_token must be held on call.
 1378  *
 1379  * We can only safely schedule the thread on its current cpu and only if
 1380  * one of the SINTR flags is set.  If an SINTR flag is set AND we are on
 1381  * the correct cpu we are properly interlocked, otherwise we could be
 1382  * racing other thread transition states (or the lwp is on the user scheduler
 1383  * runq but not scheduled) and must not do anything.
 1384  *
 1385  * Since we hold the lwp token we know the lwp cannot be ripped out from
 1386  * under us so we can safely hold it to prevent it from being ripped out
 1387  * from under us if we are forced to IPI another cpu to make the local
 1388  * checks there.
 1389  *
 1390  * Adjustment of lp->lwp_stat can only occur when we hold the lwp_token,
 1391  * which we won't in an IPI so any fixups have to be done here, effectively
 1392  * replicating part of what setrunnable() does.
 1393  */
 1394 static void
 1395 lwp_signotify(struct lwp *lp)
 1396 {
 1397         ASSERT_LWKT_TOKEN_HELD(&lp->lwp_proc->p_token);
 1398 
 1399         crit_enter();
 1400         if (lp == lwkt_preempted_proc()) {
 1401                 /*
 1402                  * lwp is on the current cpu AND it is currently running
 1403                  * (we preempted it).
 1404                  */
 1405                 signotify();
 1406         } else if (lp->lwp_flags & LWP_SINTR) {
 1407                 /*
 1408                  * lwp is sitting in tsleep() with PCATCH set
 1409                  */
 1410                 if (lp->lwp_thread->td_gd == mycpu) {
 1411                         setrunnable(lp);
 1412                 } else {
 1413                         /*
 1414                          * We can only adjust lwp_stat while we hold the
 1415                          * lwp_token, and we won't in the IPI function.
 1416                          */
 1417                         LWPHOLD(lp);
 1418                         if (lp->lwp_stat == LSSTOP)
 1419                                 lp->lwp_stat = LSSLEEP;
 1420                         lwkt_send_ipiq(lp->lwp_thread->td_gd,
 1421                                        lwp_signotify_remote, lp);
 1422                 }
 1423         } else if (lp->lwp_thread->td_flags & TDF_SINTR) {
 1424                 /*
 1425                  * lwp is sitting in lwkt_sleep() with PCATCH set.
 1426                  */
 1427                 if (lp->lwp_thread->td_gd == mycpu) {
 1428                         setrunnable(lp);
 1429                 } else {
 1430                         /*
 1431                          * We can only adjust lwp_stat while we hold the
 1432                          * lwp_token, and we won't in the IPI function.
 1433                          */
 1434                         LWPHOLD(lp);
 1435                         if (lp->lwp_stat == LSSTOP)
 1436                                 lp->lwp_stat = LSSLEEP;
 1437                         lwkt_send_ipiq(lp->lwp_thread->td_gd,
 1438                                        lwp_signotify_remote, lp);
 1439                 }
 1440         } else {
 1441                 /*
 1442                  * Otherwise the lwp is either in some uninterruptable state
 1443                  * or it is on the userland scheduler's runqueue waiting to
 1444                  * be scheduled to a cpu.
 1445                  */
 1446         }
 1447         crit_exit();
 1448 }
 1449 
 1450 /*
 1451  * This function is called via an IPI so we cannot call setrunnable() here
 1452  * (because while we hold the lp we don't own its token, and can't get it
 1453  * from an IPI).
 1454  *
 1455  * We are interlocked by virtue of being on the same cpu as the target.  If
 1456  * we still are and LWP_SINTR or TDF_SINTR is set we can safely schedule
 1457  * the target thread.
 1458  */
 1459 static void
 1460 lwp_signotify_remote(void *arg)
 1461 {
 1462         struct lwp *lp = arg;
 1463         thread_t td = lp->lwp_thread;
 1464 
 1465         if (lp == lwkt_preempted_proc()) {
 1466                 signotify();
 1467                 LWPRELE(lp);
 1468         } else if (td->td_gd == mycpu) {
 1469                 if ((lp->lwp_flags & LWP_SINTR) ||
 1470                     (td->td_flags & TDF_SINTR)) {
 1471                         lwkt_schedule(td);
 1472                 }
 1473                 LWPRELE(lp);
 1474         } else {
 1475                 lwkt_send_ipiq(td->td_gd, lwp_signotify_remote, lp);
 1476                 /* LWPHOLD() is forwarded to the target cpu */
 1477         }
 1478 }
 1479 
 1480 /*
 1481  * Caller must hold p->p_token
 1482  */
 1483 void
 1484 proc_stop(struct proc *p)
 1485 {
 1486         struct proc *q;
 1487         struct lwp *lp;
 1488 
 1489         ASSERT_LWKT_TOKEN_HELD(&p->p_token);
 1490 
 1491         /* If somebody raced us, be happy with it */
 1492         if (p->p_stat == SSTOP || p->p_stat == SZOMB) {
 1493                 return;
 1494         }
 1495         p->p_stat = SSTOP;
 1496 
 1497         FOREACH_LWP_IN_PROC(lp, p) {
 1498                 LWPHOLD(lp);
 1499                 lwkt_gettoken(&lp->lwp_token);
 1500 
 1501                 switch (lp->lwp_stat) {
 1502                 case LSSTOP:
 1503                         /*
 1504                          * Do nothing, we are already counted in
 1505                          * p_nstopped.
 1506                          */
 1507                         break;
 1508 
 1509                 case LSSLEEP:
 1510                         /*
 1511                          * We're sleeping, but we will stop before
 1512                          * returning to userspace, so count us
 1513                          * as stopped as well.  We set LWP_MP_WSTOP
 1514                          * to signal the lwp that it should not
 1515                          * increase p_nstopped when reaching tstop().
 1516                          *
 1517                          * LWP_MP_WSTOP is protected by lp->lwp_token.
 1518                          */
 1519                         if ((lp->lwp_mpflags & LWP_MP_WSTOP) == 0) {
 1520                                 atomic_set_int(&lp->lwp_mpflags, LWP_MP_WSTOP);
 1521                                 ++p->p_nstopped;
 1522                         }
 1523                         break;
 1524 
 1525                 case LSRUN:
 1526                         /*
 1527                          * We might notify ourself, but that's not
 1528                          * a problem.
 1529                          */
 1530                         lwp_signotify(lp);
 1531                         break;
 1532                 }
 1533                 lwkt_reltoken(&lp->lwp_token);
 1534                 LWPRELE(lp);
 1535         }
 1536 
 1537         if (p->p_nstopped == p->p_nthreads) {
 1538                 /*
 1539                  * Token required to interlock kern_wait().  Reparenting can
 1540                  * also cause a race so we have to hold (q).
 1541                  */
 1542                 q = p->p_pptr;
 1543                 PHOLD(q);
 1544                 lwkt_gettoken(&q->p_token);
 1545                 p->p_flags &= ~P_WAITED;
 1546                 wakeup(q);
 1547                 if ((q->p_sigacts->ps_flag & PS_NOCLDSTOP) == 0)
 1548                         ksignal(p->p_pptr, SIGCHLD);
 1549                 lwkt_reltoken(&q->p_token);
 1550                 PRELE(q);
 1551         }
 1552 }
 1553 
 1554 /*
 1555  * Caller must hold p_token
 1556  */
 1557 void
 1558 proc_unstop(struct proc *p)
 1559 {
 1560         struct lwp *lp;
 1561 
 1562         ASSERT_LWKT_TOKEN_HELD(&p->p_token);
 1563 
 1564         if (p->p_stat != SSTOP)
 1565                 return;
 1566 
 1567         p->p_stat = SACTIVE;
 1568 
 1569         FOREACH_LWP_IN_PROC(lp, p) {
 1570                 LWPHOLD(lp);
 1571                 lwkt_gettoken(&lp->lwp_token);
 1572 
 1573                 switch (lp->lwp_stat) {
 1574                 case LSRUN:
 1575                         /*
 1576                          * Uh?  Not stopped?  Well, I guess that's okay.
 1577                          */
 1578                         if (bootverbose)
 1579                                 kprintf("proc_unstop: lwp %d/%d not sleeping\n",
 1580                                         p->p_pid, lp->lwp_tid);
 1581                         break;
 1582 
 1583                 case LSSLEEP:
 1584                         /*
 1585                          * Still sleeping.  Don't bother waking it up.
 1586                          * However, if this thread was counted as
 1587                          * stopped, undo this.
 1588                          *
 1589                          * Nevertheless we call setrunnable() so that it
 1590                          * will wake up in case a signal or timeout arrived
 1591                          * in the meantime.
 1592                          *
 1593                          * LWP_MP_WSTOP is protected by lp->lwp_token.
 1594                          */
 1595                         if (lp->lwp_mpflags & LWP_MP_WSTOP) {
 1596                                 atomic_clear_int(&lp->lwp_mpflags,
 1597                                                  LWP_MP_WSTOP);
 1598                                 --p->p_nstopped;
 1599                         } else {
 1600                                 if (bootverbose)
 1601                                         kprintf("proc_unstop: lwp %d/%d sleeping, not stopped\n",
 1602                                                 p->p_pid, lp->lwp_tid);
 1603                         }
 1604                         /* FALLTHROUGH */
 1605 
 1606                 case LSSTOP:
 1607                         /*
 1608                          * This handles any lwp's waiting in a tsleep with
 1609                          * SIGCATCH.
 1610                          */
 1611                         lwp_signotify(lp);
 1612                         break;
 1613 
 1614                 }
 1615                 lwkt_reltoken(&lp->lwp_token);
 1616                 LWPRELE(lp);
 1617         }
 1618 
 1619         /*
 1620          * This handles any lwp's waiting in tstop().  We have interlocked
 1621          * the setting of p_stat by acquiring and releasing each lpw's
 1622          * token.
 1623          */
 1624         wakeup(p);
 1625 }
 1626 
 1627 /* 
 1628  * No requirements.
 1629  */
 1630 static int
 1631 kern_sigtimedwait(sigset_t waitset, siginfo_t *info, struct timespec *timeout)
 1632 {
 1633         sigset_t savedmask, set;
 1634         struct proc *p = curproc;
 1635         struct lwp *lp = curthread->td_lwp;
 1636         int error, sig, hz, timevalid = 0;
 1637         struct timespec rts, ets, ts;
 1638         struct timeval tv;
 1639 
 1640         error = 0;
 1641         sig = 0;
 1642         ets.tv_sec = 0;         /* silence compiler warning */
 1643         ets.tv_nsec = 0;        /* silence compiler warning */
 1644         SIG_CANTMASK(waitset);
 1645         savedmask = lp->lwp_sigmask;
 1646 
 1647         if (timeout) {
 1648                 if (timeout->tv_sec >= 0 && timeout->tv_nsec >= 0 &&
 1649                     timeout->tv_nsec < 1000000000) {
 1650                         timevalid = 1;
 1651                         getnanouptime(&rts);
 1652                         ets = rts;
 1653                         timespecadd(&ets, timeout);
 1654                 }
 1655         }
 1656 
 1657         for (;;) {
 1658                 set = lwp_sigpend(lp);
 1659                 SIGSETAND(set, waitset);
 1660                 if ((sig = sig_ffs(&set)) != 0) {
 1661                         SIGFILLSET(lp->lwp_sigmask);
 1662                         SIGDELSET(lp->lwp_sigmask, sig);
 1663                         SIG_CANTMASK(lp->lwp_sigmask);
 1664                         sig = issignal(lp, 1);
 1665                         /*
 1666                          * It may be a STOP signal, in the case, issignal
 1667                          * returns 0, because we may stop there, and new
 1668                          * signal can come in, we should restart if we got
 1669                          * nothing.
 1670                          */
 1671                         if (sig == 0)
 1672                                 continue;
 1673                         else
 1674                                 break;
 1675                 }
 1676 
 1677                 /*
 1678                  * Previous checking got nothing, and we retried but still
 1679                  * got nothing, we should return the error status.
 1680                  */
 1681                 if (error)
 1682                         break;
 1683 
 1684                 /*
 1685                  * POSIX says this must be checked after looking for pending
 1686                  * signals.
 1687                  */
 1688                 if (timeout) {
 1689                         if (timevalid == 0) {
 1690                                 error = EINVAL;
 1691                                 break;
 1692                         }
 1693                         getnanouptime(&rts);
 1694                         if (timespeccmp(&rts, &ets, >=)) {
 1695                                 error = EAGAIN;
 1696                                 break;
 1697                         }
 1698                         ts = ets;
 1699                         timespecsub(&ts, &rts);
 1700                         TIMESPEC_TO_TIMEVAL(&tv, &ts);
 1701                         hz = tvtohz_high(&tv);
 1702                 } else {
 1703                         hz = 0;
 1704                 }
 1705 
 1706                 lp->lwp_sigmask = savedmask;
 1707                 SIGSETNAND(lp->lwp_sigmask, waitset);
 1708                 /*
 1709                  * We won't ever be woken up.  Instead, our sleep will
 1710                  * be broken in lwpsignal().
 1711                  */
 1712                 error = tsleep(&p->p_sigacts, PCATCH, "sigwt", hz);
 1713                 if (timeout) {
 1714                         if (error == ERESTART) {
 1715                                 /* can not restart a timeout wait. */
 1716                                 error = EINTR;
 1717                         } else if (error == EAGAIN) {
 1718                                 /* will calculate timeout by ourself. */
 1719                                 error = 0;
 1720                         }
 1721                 }
 1722                 /* Retry ... */
 1723         }
 1724 
 1725         lp->lwp_sigmask = savedmask;
 1726         if (sig) {
 1727                 error = 0;
 1728                 bzero(info, sizeof(*info));
 1729                 info->si_signo = sig;
 1730                 spin_lock(&lp->lwp_spin);
 1731                 lwp_delsig(lp, sig);    /* take the signal! */
 1732                 spin_unlock(&lp->lwp_spin);
 1733 
 1734                 if (sig == SIGKILL) {
 1735                         sigexit(lp, sig);
 1736                         /* NOT REACHED */
 1737                 }
 1738         }
 1739 
 1740         return (error);
 1741 }
 1742 
 1743 /*
 1744  * MPALMOSTSAFE
 1745  */
 1746 int
 1747 sys_sigtimedwait(struct sigtimedwait_args *uap)
 1748 {
 1749         struct timespec ts;
 1750         struct timespec *timeout;
 1751         sigset_t set;
 1752         siginfo_t info;
 1753         int error;
 1754 
 1755         if (uap->timeout) {
 1756                 error = copyin(uap->timeout, &ts, sizeof(ts));
 1757                 if (error)
 1758                         return (error);
 1759                 timeout = &ts;
 1760         } else {
 1761                 timeout = NULL;
 1762         }
 1763         error = copyin(uap->set, &set, sizeof(set));
 1764         if (error)
 1765                 return (error);
 1766         error = kern_sigtimedwait(set, &info, timeout);
 1767         if (error)
 1768                 return (error);
 1769         if (uap->info)
 1770                 error = copyout(&info, uap->info, sizeof(info));
 1771         /* Repost if we got an error. */
 1772         /*
 1773          * XXX lwp
 1774          *
 1775          * This could transform a thread-specific signal to another
 1776          * thread / process pending signal.
 1777          */
 1778         if (error) {
 1779                 ksignal(curproc, info.si_signo);
 1780         } else {
 1781                 uap->sysmsg_result = info.si_signo;
 1782         }
 1783         return (error);
 1784 }
 1785 
 1786 /*
 1787  * MPALMOSTSAFE
 1788  */
 1789 int
 1790 sys_sigwaitinfo(struct sigwaitinfo_args *uap)
 1791 {
 1792         siginfo_t info;
 1793         sigset_t set;
 1794         int error;
 1795 
 1796         error = copyin(uap->set, &set, sizeof(set));
 1797         if (error)
 1798                 return (error);
 1799         error = kern_sigtimedwait(set, &info, NULL);
 1800         if (error)
 1801                 return (error);
 1802         if (uap->info)
 1803                 error = copyout(&info, uap->info, sizeof(info));
 1804         /* Repost if we got an error. */
 1805         /*
 1806          * XXX lwp
 1807          *
 1808          * This could transform a thread-specific signal to another
 1809          * thread / process pending signal.
 1810          */
 1811         if (error) {
 1812                 ksignal(curproc, info.si_signo);
 1813         } else {
 1814                 uap->sysmsg_result = info.si_signo;
 1815         }
 1816         return (error);
 1817 }
 1818 
 1819 /*
 1820  * If the current process has received a signal that would interrupt a
 1821  * system call, return EINTR or ERESTART as appropriate.
 1822  */
 1823 int
 1824 iscaught(struct lwp *lp)
 1825 {
 1826         struct proc *p = lp->lwp_proc;
 1827         int sig;
 1828 
 1829         if (p) {
 1830                 if ((sig = CURSIG(lp)) != 0) {
 1831                         if (SIGISMEMBER(p->p_sigacts->ps_sigintr, sig))
 1832                                 return (EINTR);                        
 1833                         return (ERESTART);     
 1834                 }                         
 1835         }
 1836         return(EWOULDBLOCK);
 1837 }
 1838 
 1839 /*
 1840  * If the current process has received a signal (should be caught or cause
 1841  * termination, should interrupt current syscall), return the signal number.
 1842  * Stop signals with default action are processed immediately, then cleared;
 1843  * they aren't returned.  This is checked after each entry to the system for
 1844  * a syscall or trap (though this can usually be done without calling issignal
 1845  * by checking the pending signal masks in the CURSIG macro).
 1846  *
 1847  * This routine is called via CURSIG/__cursig.  We will acquire and release
 1848  * p->p_token but if the caller needs to interlock the test the caller must
 1849  * also hold p->p_token.
 1850  *
 1851  *      while (sig = CURSIG(curproc))
 1852  *              postsig(sig);
 1853  *
 1854  * MPSAFE
 1855  */
 1856 int
 1857 issignal(struct lwp *lp, int maytrace)
 1858 {
 1859         struct proc *p = lp->lwp_proc;
 1860         sigset_t mask;
 1861         int sig, prop;
 1862 
 1863         lwkt_gettoken(&p->p_token);
 1864 
 1865         for (;;) {
 1866                 int traced = (p->p_flags & P_TRACED) || (p->p_stops & S_SIG);
 1867 
 1868                 /*
 1869                  * If this process is supposed to stop, stop this thread.
 1870                  */
 1871                 if (p->p_stat == SSTOP)
 1872                         tstop();
 1873 
 1874                 mask = lwp_sigpend(lp);
 1875                 SIGSETNAND(mask, lp->lwp_sigmask);
 1876                 if (p->p_flags & P_PPWAIT)
 1877                         SIG_STOPSIGMASK(mask);
 1878                 if (SIGISEMPTY(mask)) {         /* no signal to send */
 1879                         lwkt_reltoken(&p->p_token);
 1880                         return (0);
 1881                 }
 1882                 sig = sig_ffs(&mask);
 1883 
 1884                 STOPEVENT(p, S_SIG, sig);
 1885 
 1886                 /*
 1887                  * We should see pending but ignored signals
 1888                  * only if P_TRACED was on when they were posted.
 1889                  */
 1890                 if (SIGISMEMBER(p->p_sigignore, sig) && (traced == 0)) {
 1891                         spin_lock(&lp->lwp_spin);
 1892                         lwp_delsig(lp, sig);
 1893                         spin_unlock(&lp->lwp_spin);
 1894                         continue;
 1895                 }
 1896                 if (maytrace &&
 1897                     (p->p_flags & P_TRACED) &&
 1898                     (p->p_flags & P_PPWAIT) == 0) {
 1899                         /*
 1900                          * If traced, always stop, and stay stopped until
 1901                          * released by the parent.
 1902                          *
 1903                          * NOTE: SSTOP may get cleared during the loop,
 1904                          * but we do not re-notify the parent if we have 
 1905                          * to loop several times waiting for the parent
 1906                          * to let us continue.
 1907                          *
 1908                          * XXX not sure if this is still true
 1909                          */
 1910                         p->p_xstat = sig;
 1911                         proc_stop(p);
 1912                         do {
 1913                                 tstop();
 1914                         } while (!trace_req(p) && (p->p_flags & P_TRACED));
 1915 
 1916                         /*
 1917                          * If parent wants us to take the signal,
 1918                          * then it will leave it in p->p_xstat;
 1919                          * otherwise we just look for signals again.
 1920                          */
 1921                         spin_lock(&lp->lwp_spin);
 1922                         lwp_delsig(lp, sig);    /* clear old signal */
 1923                         spin_unlock(&lp->lwp_spin);
 1924                         sig = p->p_xstat;
 1925                         if (sig == 0)
 1926                                 continue;
 1927 
 1928                         /*
 1929                          * Put the new signal into p_siglist.  If the
 1930                          * signal is being masked, look for other signals.
 1931                          *
 1932                          * XXX lwp might need a call to ksignal()
 1933                          */
 1934                         SIGADDSET(p->p_siglist, sig);
 1935                         if (SIGISMEMBER(lp->lwp_sigmask, sig))
 1936                                 continue;
 1937 
 1938                         /*
 1939                          * If the traced bit got turned off, go back up
 1940                          * to the top to rescan signals.  This ensures
 1941                          * that p_sig* and ps_sigact are consistent.
 1942                          */
 1943                         if ((p->p_flags & P_TRACED) == 0)
 1944                                 continue;
 1945                 }
 1946 
 1947                 prop = sigprop(sig);
 1948 
 1949                 /*
 1950                  * Decide whether the signal should be returned.
 1951                  * Return the signal's number, or fall through
 1952                  * to clear it from the pending mask.
 1953                  */
 1954                 switch ((intptr_t)p->p_sigacts->ps_sigact[_SIG_IDX(sig)]) {
 1955                 case (intptr_t)SIG_DFL:
 1956                         /*
 1957                          * Don't take default actions on system processes.
 1958                          */
 1959                         if (p->p_pid <= 1) {
 1960 #ifdef DIAGNOSTIC
 1961                                 /*
 1962                                  * Are you sure you want to ignore SIGSEGV
 1963                                  * in init? XXX
 1964                                  */
 1965                                 kprintf("Process (pid %lu) got signal %d\n",
 1966                                         (u_long)p->p_pid, sig);
 1967 #endif
 1968                                 break;          /* == ignore */
 1969                         }
 1970 
 1971                         /*
 1972                          * Handle the in-kernel checkpoint action
 1973                          */
 1974                         if (prop & SA_CKPT) {
 1975                                 checkpoint_signal_handler(lp);
 1976                                 break;
 1977                         }
 1978 
 1979                         /*
 1980                          * If there is a pending stop signal to process
 1981                          * with default action, stop here,
 1982                          * then clear the signal.  However,
 1983                          * if process is member of an orphaned
 1984                          * process group, ignore tty stop signals.
 1985                          */
 1986                         if (prop & SA_STOP) {
 1987                                 if (p->p_flags & P_TRACED ||
 1988                                     (p->p_pgrp->pg_jobc == 0 &&
 1989                                     prop & SA_TTYSTOP))
 1990                                         break;  /* == ignore */
 1991                                 if ((p->p_flags & P_WEXIT) == 0) {
 1992                                         p->p_xstat = sig;
 1993                                         proc_stop(p);
 1994                                         tstop();
 1995                                 }
 1996                                 break;
 1997                         } else if (prop & SA_IGNORE) {
 1998                                 /*
 1999                                  * Except for SIGCONT, shouldn't get here.
 2000                                  * Default action is to ignore; drop it.
 2001                                  */
 2002                                 break;          /* == ignore */
 2003                         } else {
 2004                                 lwkt_reltoken(&p->p_token);
 2005                                 return (sig);
 2006                         }
 2007 
 2008                         /*NOTREACHED*/
 2009 
 2010                 case (intptr_t)SIG_IGN:
 2011                         /*
 2012                          * Masking above should prevent us ever trying
 2013                          * to take action on an ignored signal other
 2014                          * than SIGCONT, unless process is traced.
 2015                          */
 2016                         if ((prop & SA_CONT) == 0 &&
 2017                             (p->p_flags & P_TRACED) == 0)
 2018                                 kprintf("issignal\n");
 2019                         break;          /* == ignore */
 2020 
 2021                 default:
 2022                         /*
 2023                          * This signal has an action, let
 2024                          * postsig() process it.
 2025                          */
 2026                         lwkt_reltoken(&p->p_token);
 2027                         return (sig);
 2028                 }
 2029                 spin_lock(&lp->lwp_spin);
 2030                 lwp_delsig(lp, sig);            /* take the signal! */
 2031                 spin_unlock(&lp->lwp_spin);
 2032         }
 2033         /* NOTREACHED */
 2034 }
 2035 
 2036 /*
 2037  * Take the action for the specified signal
 2038  * from the current set of pending signals.
 2039  *
 2040  * Caller must hold p->p_token
 2041  */
 2042 void
 2043 postsig(int sig)
 2044 {
 2045         struct lwp *lp = curthread->td_lwp;
 2046         struct proc *p = lp->lwp_proc;
 2047         struct sigacts *ps = p->p_sigacts;
 2048         sig_t action;
 2049         sigset_t returnmask;
 2050         int code;
 2051 
 2052         KASSERT(sig != 0, ("postsig"));
 2053 
 2054         KNOTE(&p->p_klist, NOTE_SIGNAL | sig);
 2055 
 2056         /*
 2057          * If we are a virtual kernel running an emulated user process
 2058          * context, switch back to the virtual kernel context before
 2059          * trying to post the signal.
 2060          */
 2061         if (lp->lwp_vkernel && lp->lwp_vkernel->ve) {
 2062                 struct trapframe *tf = lp->lwp_md.md_regs;
 2063                 tf->tf_trapno = 0;
 2064                 vkernel_trap(lp, tf);
 2065         }
 2066 
 2067         spin_lock(&lp->lwp_spin);
 2068         lwp_delsig(lp, sig);
 2069         spin_unlock(&lp->lwp_spin);
 2070         action = ps->ps_sigact[_SIG_IDX(sig)];
 2071 #ifdef KTRACE
 2072         if (KTRPOINT(lp->lwp_thread, KTR_PSIG))
 2073                 ktrpsig(lp, sig, action, lp->lwp_flags & LWP_OLDMASK ?
 2074                         &lp->lwp_oldsigmask : &lp->lwp_sigmask, 0);
 2075 #endif
 2076         STOPEVENT(p, S_SIG, sig);
 2077 
 2078         if (action == SIG_DFL) {
 2079                 /*
 2080                  * Default action, where the default is to kill
 2081                  * the process.  (Other cases were ignored above.)
 2082                  */
 2083                 sigexit(lp, sig);
 2084                 /* NOTREACHED */
 2085         } else {
 2086                 /*
 2087                  * If we get here, the signal must be caught.
 2088                  */
 2089                 KASSERT(action != SIG_IGN && !SIGISMEMBER(lp->lwp_sigmask, sig),
 2090                     ("postsig action"));
 2091 
 2092                 /*
 2093                  * Reset the signal handler if asked to
 2094                  */
 2095                 if (SIGISMEMBER(ps->ps_sigreset, sig)) {
 2096                         /*
 2097                          * See kern_sigaction() for origin of this code.
 2098                          */
 2099                         SIGDELSET(p->p_sigcatch, sig);
 2100                         if (sig != SIGCONT &&
 2101                             sigprop(sig) & SA_IGNORE)
 2102                                 SIGADDSET(p->p_sigignore, sig);
 2103                         ps->ps_sigact[_SIG_IDX(sig)] = SIG_DFL;
 2104                 }
 2105 
 2106                 /*
 2107                  * Set the signal mask and calculate the mask to restore
 2108                  * when the signal function returns.
 2109                  *
 2110                  * Special case: user has done a sigsuspend.  Here the
 2111                  * current mask is not of interest, but rather the
 2112                  * mask from before the sigsuspend is what we want
 2113                  * restored after the signal processing is completed.
 2114                  */
 2115                 if (lp->lwp_flags & LWP_OLDMASK) {
 2116                         returnmask = lp->lwp_oldsigmask;
 2117                         lp->lwp_flags &= ~LWP_OLDMASK;
 2118                 } else {
 2119                         returnmask = lp->lwp_sigmask;
 2120                 }
 2121 
 2122                 SIGSETOR(lp->lwp_sigmask, ps->ps_catchmask[_SIG_IDX(sig)]);
 2123                 if (!SIGISMEMBER(ps->ps_signodefer, sig))
 2124                         SIGADDSET(lp->lwp_sigmask, sig);
 2125 
 2126                 lp->lwp_ru.ru_nsignals++;
 2127                 if (lp->lwp_sig != sig) {
 2128                         code = 0;
 2129                 } else {
 2130                         code = lp->lwp_code;
 2131                         lp->lwp_code = 0;
 2132                         lp->lwp_sig = 0;
 2133                 }
 2134                 (*p->p_sysent->sv_sendsig)(action, sig, &returnmask, code);
 2135         }
 2136 }
 2137 
 2138 /*
 2139  * Kill the current process for stated reason.
 2140  */
 2141 void
 2142 killproc(struct proc *p, char *why)
 2143 {
 2144         log(LOG_ERR, "pid %d (%s), uid %d, was killed: %s\n", 
 2145                 p->p_pid, p->p_comm,
 2146                 p->p_ucred ? p->p_ucred->cr_uid : -1, why);
 2147         ksignal(p, SIGKILL);
 2148 }
 2149 
 2150 /*
 2151  * Force the current process to exit with the specified signal, dumping core
 2152  * if appropriate.  We bypass the normal tests for masked and caught signals,
 2153  * allowing unrecoverable failures to terminate the process without changing
 2154  * signal state.  Mark the accounting record with the signal termination.
 2155  * If dumping core, save the signal number for the debugger.  Calls exit and
 2156  * does not return.
 2157  *
 2158  * This routine does not return.
 2159  */
 2160 void
 2161 sigexit(struct lwp *lp, int sig)
 2162 {
 2163         struct proc *p = lp->lwp_proc;
 2164 
 2165         lwkt_gettoken(&p->p_token);
 2166         p->p_acflag |= AXSIG;
 2167         if (sigprop(sig) & SA_CORE) {
 2168                 lp->lwp_sig = sig;
 2169                 /*
 2170                  * Log signals which would cause core dumps
 2171                  * (Log as LOG_INFO to appease those who don't want
 2172                  * these messages.)
 2173                  * XXX : Todo, as well as euid, write out ruid too
 2174                  */
 2175                 if (coredump(lp, sig) == 0)
 2176                         sig |= WCOREFLAG;
 2177                 if (kern_logsigexit)
 2178                         log(LOG_INFO,
 2179                             "pid %d (%s), uid %d: exited on signal %d%s\n",
 2180                             p->p_pid, p->p_comm,
 2181                             p->p_ucred ? p->p_ucred->cr_uid : -1,
 2182                             sig &~ WCOREFLAG,
 2183                             sig & WCOREFLAG ? " (core dumped)" : "");
 2184         }
 2185         lwkt_reltoken(&p->p_token);
 2186         exit1(W_EXITCODE(0, sig));
 2187         /* NOTREACHED */
 2188 }
 2189 
 2190 static char corefilename[MAXPATHLEN+1] = {"%N.core"};
 2191 SYSCTL_STRING(_kern, OID_AUTO, corefile, CTLFLAG_RW, corefilename,
 2192               sizeof(corefilename), "process corefile name format string");
 2193 
 2194 /*
 2195  * expand_name(name, uid, pid)
 2196  * Expand the name described in corefilename, using name, uid, and pid.
 2197  * corefilename is a kprintf-like string, with three format specifiers:
 2198  *      %N      name of process ("name")
 2199  *      %P      process id (pid)
 2200  *      %U      user id (uid)
 2201  * For example, "%N.core" is the default; they can be disabled completely
 2202  * by using "/dev/null", or all core files can be stored in "/cores/%U/%N-%P".
 2203  * This is controlled by the sysctl variable kern.corefile (see above).
 2204  */
 2205 
 2206 static char *
 2207 expand_name(const char *name, uid_t uid, pid_t pid)
 2208 {
 2209         char *temp;
 2210         char buf[11];           /* Buffer for pid/uid -- max 4B */
 2211         int i, n;
 2212         char *format = corefilename;
 2213         size_t namelen;
 2214 
 2215         temp = kmalloc(MAXPATHLEN + 1, M_TEMP, M_NOWAIT);
 2216         if (temp == NULL)
 2217                 return NULL;
 2218         namelen = strlen(name);
 2219         for (i = 0, n = 0; n < MAXPATHLEN && format[i]; i++) {
 2220                 int l;
 2221                 switch (format[i]) {
 2222                 case '%':       /* Format character */
 2223                         i++;
 2224                         switch (format[i]) {
 2225                         case '%':
 2226                                 temp[n++] = '%';
 2227                                 break;
 2228                         case 'N':       /* process name */
 2229                                 if ((n + namelen) > MAXPATHLEN) {
 2230                                         log(LOG_ERR, "pid %d (%s), uid (%u):  Path `%s%s' is too long\n",
 2231                                             pid, name, uid, temp, name);
 2232                                         kfree(temp, M_TEMP);
 2233                                         return NULL;
 2234                                 }
 2235                                 memcpy(temp+n, name, namelen);
 2236                                 n += namelen;
 2237                                 break;
 2238                         case 'P':       /* process id */
 2239                                 l = ksprintf(buf, "%u", pid);
 2240                                 if ((n + l) > MAXPATHLEN) {
 2241                                         log(LOG_ERR, "pid %d (%s), uid (%u):  Path `%s%s' is too long\n",
 2242                                             pid, name, uid, temp, name);
 2243                                         kfree(temp, M_TEMP);
 2244                                         return NULL;
 2245                                 }
 2246                                 memcpy(temp+n, buf, l);
 2247                                 n += l;
 2248                                 break;
 2249                         case 'U':       /* user id */
 2250                                 l = ksprintf(buf, "%u", uid);
 2251                                 if ((n + l) > MAXPATHLEN) {
 2252                                         log(LOG_ERR, "pid %d (%s), uid (%u):  Path `%s%s' is too long\n",
 2253                                             pid, name, uid, temp, name);
 2254                                         kfree(temp, M_TEMP);
 2255                                         return NULL;
 2256                                 }
 2257                                 memcpy(temp+n, buf, l);
 2258                                 n += l;
 2259                                 break;
 2260                         default:
 2261                                 log(LOG_ERR, "Unknown format character %c in `%s'\n", format[i], format);
 2262                         }
 2263                         break;
 2264                 default:
 2265                         temp[n++] = format[i];
 2266                 }
 2267         }
 2268         temp[n] = '\0';
 2269         return temp;
 2270 }
 2271 
 2272 /*
 2273  * Dump a process' core.  The main routine does some
 2274  * policy checking, and creates the name of the coredump;
 2275  * then it passes on a vnode and a size limit to the process-specific
 2276  * coredump routine if there is one; if there _is not_ one, it returns
 2277  * ENOSYS; otherwise it returns the error from the process-specific routine.
 2278  *
 2279  * The parameter `lp' is the lwp which triggered the coredump.
 2280  */
 2281 
 2282 static int
 2283 coredump(struct lwp *lp, int sig)
 2284 {
 2285         struct proc *p = lp->lwp_proc;
 2286         struct vnode *vp;
 2287         struct ucred *cred = p->p_ucred;
 2288         struct flock lf;
 2289         struct nlookupdata nd;
 2290         struct vattr vattr;
 2291         int error, error1;
 2292         char *name;                     /* name of corefile */
 2293         off_t limit;
 2294         
 2295         STOPEVENT(p, S_CORE, 0);
 2296 
 2297         if (((sugid_coredump == 0) && p->p_flags & P_SUGID) || do_coredump == 0)
 2298                 return (EFAULT);
 2299         
 2300         /*
 2301          * Note that the bulk of limit checking is done after
 2302          * the corefile is created.  The exception is if the limit
 2303          * for corefiles is 0, in which case we don't bother
 2304          * creating the corefile at all.  This layout means that
 2305          * a corefile is truncated instead of not being created,
 2306          * if it is larger than the limit.
 2307          */
 2308         limit = p->p_rlimit[RLIMIT_CORE].rlim_cur;
 2309         if (limit == 0)
 2310                 return EFBIG;
 2311 
 2312         name = expand_name(p->p_comm, p->p_ucred->cr_uid, p->p_pid);
 2313         if (name == NULL)
 2314                 return (EINVAL);
 2315         error = nlookup_init(&nd, name, UIO_SYSSPACE, NLC_LOCKVP);
 2316         if (error == 0)
 2317                 error = vn_open(&nd, NULL, O_CREAT | FWRITE | O_NOFOLLOW, S_IRUSR | S_IWUSR);
 2318         kfree(name, M_TEMP);
 2319         if (error) {
 2320                 nlookup_done(&nd);
 2321                 return (error);
 2322         }
 2323         vp = nd.nl_open_vp;
 2324         nd.nl_open_vp = NULL;
 2325         nlookup_done(&nd);
 2326 
 2327         vn_unlock(vp);
 2328         lf.l_whence = SEEK_SET;
 2329         lf.l_start = 0;
 2330         lf.l_len = 0;
 2331         lf.l_type = F_WRLCK;
 2332         error = VOP_ADVLOCK(vp, (caddr_t)p, F_SETLK, &lf, 0);
 2333         if (error)
 2334                 goto out2;
 2335 
 2336         /* Don't dump to non-regular files or files with links. */
 2337         if (vp->v_type != VREG ||
 2338             VOP_GETATTR(vp, &vattr) || vattr.va_nlink != 1) {
 2339                 error = EFAULT;
 2340                 goto out1;
 2341         }
 2342 
 2343         /* Don't dump to files current user does not own */
 2344         if (vattr.va_uid != p->p_ucred->cr_uid) {
 2345                 error = EFAULT;
 2346                 goto out1;
 2347         }
 2348 
 2349         VATTR_NULL(&vattr);
 2350         vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
 2351         vattr.va_size = 0;
 2352         VOP_SETATTR(vp, &vattr, cred);
 2353         p->p_acflag |= ACORE;
 2354         vn_unlock(vp);
 2355 
 2356         error = p->p_sysent->sv_coredump ?
 2357                   p->p_sysent->sv_coredump(lp, sig, vp, limit) : ENOSYS;
 2358 
 2359 out1:
 2360         lf.l_type = F_UNLCK;
 2361         VOP_ADVLOCK(vp, (caddr_t)p, F_UNLCK, &lf, 0);
 2362 out2:
 2363         error1 = vn_close(vp, FWRITE);
 2364         if (error == 0)
 2365                 error = error1;
 2366         return (error);
 2367 }
 2368 
 2369 /*
 2370  * Nonexistent system call-- signal process (may want to handle it).
 2371  * Flag error in case process won't see signal immediately (blocked or ignored).
 2372  *
 2373  * MPALMOSTSAFE
 2374  */
 2375 /* ARGSUSED */
 2376 int
 2377 sys_nosys(struct nosys_args *args)
 2378 {
 2379         lwpsignal(curproc, curthread->td_lwp, SIGSYS);
 2380         return (EINVAL);
 2381 }
 2382 
 2383 /*
 2384  * Send a SIGIO or SIGURG signal to a process or process group using
 2385  * stored credentials rather than those of the current process.
 2386  */
 2387 void
 2388 pgsigio(struct sigio *sigio, int sig, int checkctty)
 2389 {
 2390         if (sigio == NULL)
 2391                 return;
 2392                 
 2393         if (sigio->sio_pgid > 0) {
 2394                 if (CANSIGIO(sigio->sio_ruid, sigio->sio_ucred,
 2395                              sigio->sio_proc))
 2396                         ksignal(sigio->sio_proc, sig);
 2397         } else if (sigio->sio_pgid < 0) {
 2398                 struct proc *p;
 2399                 struct pgrp *pg = sigio->sio_pgrp;
 2400 
 2401                 /*
 2402                  * Must interlock all signals against fork
 2403                  */
 2404                 pgref(pg);
 2405                 lockmgr(&pg->pg_lock, LK_EXCLUSIVE);
 2406                 LIST_FOREACH(p, &pg->pg_members, p_pglist) {
 2407                         if (CANSIGIO(sigio->sio_ruid, sigio->sio_ucred, p) &&
 2408                             (checkctty == 0 || (p->p_flags & P_CONTROLT)))
 2409                                 ksignal(p, sig);
 2410                 }
 2411                 lockmgr(&pg->pg_lock, LK_RELEASE);
 2412                 pgrel(pg);
 2413         }
 2414 }
 2415 
 2416 static int
 2417 filt_sigattach(struct knote *kn)
 2418 {
 2419         struct proc *p = curproc;
 2420 
 2421         kn->kn_ptr.p_proc = p;
 2422         kn->kn_flags |= EV_CLEAR;               /* automatically set */
 2423 
 2424         /* XXX lock the proc here while adding to the list? */
 2425         knote_insert(&p->p_klist, kn);
 2426 
 2427         return (0);
 2428 }
 2429 
 2430 static void
 2431 filt_sigdetach(struct knote *kn)
 2432 {
 2433         struct proc *p = kn->kn_ptr.p_proc;
 2434 
 2435         knote_remove(&p->p_klist, kn);
 2436 }
 2437 
 2438 /*
 2439  * signal knotes are shared with proc knotes, so we apply a mask to 
 2440  * the hint in order to differentiate them from process hints.  This
 2441  * could be avoided by using a signal-specific knote list, but probably
 2442  * isn't worth the trouble.
 2443  */
 2444 static int
 2445 filt_signal(struct knote *kn, long hint)
 2446 {
 2447         if (hint & NOTE_SIGNAL) {
 2448                 hint &= ~NOTE_SIGNAL;
 2449 
 2450                 if (kn->kn_id == hint)
 2451                         kn->kn_data++;
 2452         }
 2453         return (kn->kn_data != 0);
 2454 }

Cache object: 1fb4d2102da8730f65f21da56261f282


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.