The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_sig.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 1982, 1986, 1989, 1991, 1993
    3  *      The Regents of the University of California.  All rights reserved.
    4  * (c) UNIX System Laboratories, Inc.
    5  * All or some portions of this file are derived from material licensed
    6  * to the University of California by American Telephone and Telegraph
    7  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
    8  * the permission of UNIX System Laboratories, Inc.
    9  *
   10  * Redistribution and use in source and binary forms, with or without
   11  * modification, are permitted provided that the following conditions
   12  * are met:
   13  * 1. Redistributions of source code must retain the above copyright
   14  *    notice, this list of conditions and the following disclaimer.
   15  * 2. Redistributions in binary form must reproduce the above copyright
   16  *    notice, this list of conditions and the following disclaimer in the
   17  *    documentation and/or other materials provided with the distribution.
   18  * 4. Neither the name of the University nor the names of its contributors
   19  *    may be used to endorse or promote products derived from this software
   20  *    without specific prior written permission.
   21  *
   22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
   23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   32  * SUCH DAMAGE.
   33  *
   34  *      @(#)kern_sig.c  8.7 (Berkeley) 4/18/94
   35  */
   36 
   37 #include <sys/cdefs.h>
   38 __FBSDID("$FreeBSD$");
   39 
   40 #include "opt_compat.h"
   41 #include "opt_gzio.h"
   42 #include "opt_ktrace.h"
   43 
   44 #include <sys/param.h>
   45 #include <sys/ctype.h>
   46 #include <sys/systm.h>
   47 #include <sys/signalvar.h>
   48 #include <sys/vnode.h>
   49 #include <sys/acct.h>
   50 #include <sys/bus.h>
   51 #include <sys/capsicum.h>
   52 #include <sys/condvar.h>
   53 #include <sys/event.h>
   54 #include <sys/fcntl.h>
   55 #include <sys/imgact.h>
   56 #include <sys/kernel.h>
   57 #include <sys/ktr.h>
   58 #include <sys/ktrace.h>
   59 #include <sys/lock.h>
   60 #include <sys/malloc.h>
   61 #include <sys/mutex.h>
   62 #include <sys/refcount.h>
   63 #include <sys/namei.h>
   64 #include <sys/proc.h>
   65 #include <sys/procdesc.h>
   66 #include <sys/ptrace.h>
   67 #include <sys/posix4.h>
   68 #include <sys/pioctl.h>
   69 #include <sys/racct.h>
   70 #include <sys/resourcevar.h>
   71 #include <sys/sdt.h>
   72 #include <sys/sbuf.h>
   73 #include <sys/sleepqueue.h>
   74 #include <sys/smp.h>
   75 #include <sys/stat.h>
   76 #include <sys/sx.h>
   77 #include <sys/syscallsubr.h>
   78 #include <sys/sysctl.h>
   79 #include <sys/sysent.h>
   80 #include <sys/syslog.h>
   81 #include <sys/sysproto.h>
   82 #include <sys/timers.h>
   83 #include <sys/unistd.h>
   84 #include <sys/wait.h>
   85 #include <vm/vm.h>
   86 #include <vm/vm_extern.h>
   87 #include <vm/uma.h>
   88 
   89 #include <sys/jail.h>
   90 
   91 #include <machine/cpu.h>
   92 
   93 #include <security/audit/audit.h>
   94 
   95 #define ONSIG   32              /* NSIG for osig* syscalls.  XXX. */
   96 
   97 SDT_PROVIDER_DECLARE(proc);
   98 SDT_PROBE_DEFINE3(proc, , , signal__send,
   99     "struct thread *", "struct proc *", "int");
  100 SDT_PROBE_DEFINE2(proc, , , signal__clear,
  101     "int", "ksiginfo_t *");
  102 SDT_PROBE_DEFINE3(proc, , , signal__discard,
  103     "struct thread *", "struct proc *", "int");
  104 
  105 static int      coredump(struct thread *);
  106 static int      killpg1(struct thread *td, int sig, int pgid, int all,
  107                     ksiginfo_t *ksi);
  108 static int      issignal(struct thread *td);
  109 static int      sigprop(int sig);
  110 static void     tdsigwakeup(struct thread *, int, sig_t, int);
  111 static int      sig_suspend_threads(struct thread *, struct proc *, int);
  112 static int      filt_sigattach(struct knote *kn);
  113 static void     filt_sigdetach(struct knote *kn);
  114 static int      filt_signal(struct knote *kn, long hint);
  115 static struct thread *sigtd(struct proc *p, int sig, int prop);
  116 static void     sigqueue_start(void);
  117 
  118 static uma_zone_t       ksiginfo_zone = NULL;
  119 struct filterops sig_filtops = {
  120         .f_isfd = 0,
  121         .f_attach = filt_sigattach,
  122         .f_detach = filt_sigdetach,
  123         .f_event = filt_signal,
  124 };
  125 
  126 static int      kern_logsigexit = 1;
  127 SYSCTL_INT(_kern, KERN_LOGSIGEXIT, logsigexit, CTLFLAG_RW,
  128     &kern_logsigexit, 0,
  129     "Log processes quitting on abnormal signals to syslog(3)");
  130 
  131 static int      kern_forcesigexit = 1;
  132 SYSCTL_INT(_kern, OID_AUTO, forcesigexit, CTLFLAG_RW,
  133     &kern_forcesigexit, 0, "Force trap signal to be handled");
  134 
  135 static SYSCTL_NODE(_kern, OID_AUTO, sigqueue, CTLFLAG_RW, 0,
  136     "POSIX real time signal");
  137 
  138 static int      max_pending_per_proc = 128;
  139 SYSCTL_INT(_kern_sigqueue, OID_AUTO, max_pending_per_proc, CTLFLAG_RW,
  140     &max_pending_per_proc, 0, "Max pending signals per proc");
  141 
  142 static int      preallocate_siginfo = 1024;
  143 SYSCTL_INT(_kern_sigqueue, OID_AUTO, preallocate, CTLFLAG_RDTUN,
  144     &preallocate_siginfo, 0, "Preallocated signal memory size");
  145 
  146 static int      signal_overflow = 0;
  147 SYSCTL_INT(_kern_sigqueue, OID_AUTO, overflow, CTLFLAG_RD,
  148     &signal_overflow, 0, "Number of signals overflew");
  149 
  150 static int      signal_alloc_fail = 0;
  151 SYSCTL_INT(_kern_sigqueue, OID_AUTO, alloc_fail, CTLFLAG_RD,
  152     &signal_alloc_fail, 0, "signals failed to be allocated");
  153 
  154 static int      kern_lognosys = 0;
  155 SYSCTL_INT(_kern, OID_AUTO, lognosys, CTLFLAG_RWTUN, &kern_lognosys, 0,
  156     "Log invalid syscalls");
  157 
  158 SYSINIT(signal, SI_SUB_P1003_1B, SI_ORDER_FIRST+3, sigqueue_start, NULL);
  159 
  160 /*
  161  * Policy -- Can ucred cr1 send SIGIO to process cr2?
  162  * Should use cr_cansignal() once cr_cansignal() allows SIGIO and SIGURG
  163  * in the right situations.
  164  */
  165 #define CANSIGIO(cr1, cr2) \
  166         ((cr1)->cr_uid == 0 || \
  167             (cr1)->cr_ruid == (cr2)->cr_ruid || \
  168             (cr1)->cr_uid == (cr2)->cr_ruid || \
  169             (cr1)->cr_ruid == (cr2)->cr_uid || \
  170             (cr1)->cr_uid == (cr2)->cr_uid)
  171 
  172 static int      sugid_coredump;
  173 SYSCTL_INT(_kern, OID_AUTO, sugid_coredump, CTLFLAG_RWTUN,
  174     &sugid_coredump, 0, "Allow setuid and setgid processes to dump core");
  175 
  176 static int      capmode_coredump;
  177 SYSCTL_INT(_kern, OID_AUTO, capmode_coredump, CTLFLAG_RWTUN,
  178     &capmode_coredump, 0, "Allow processes in capability mode to dump core");
  179 
  180 static int      do_coredump = 1;
  181 SYSCTL_INT(_kern, OID_AUTO, coredump, CTLFLAG_RW,
  182         &do_coredump, 0, "Enable/Disable coredumps");
  183 
  184 static int      set_core_nodump_flag = 0;
  185 SYSCTL_INT(_kern, OID_AUTO, nodump_coredump, CTLFLAG_RW, &set_core_nodump_flag,
  186         0, "Enable setting the NODUMP flag on coredump files");
  187 
  188 static int      coredump_devctl = 0;
  189 SYSCTL_INT(_kern, OID_AUTO, coredump_devctl, CTLFLAG_RW, &coredump_devctl,
  190         0, "Generate a devctl notification when processes coredump");
  191 
  192 /*
  193  * Signal properties and actions.
  194  * The array below categorizes the signals and their default actions
  195  * according to the following properties:
  196  */
  197 #define SA_KILL         0x01            /* terminates process by default */
  198 #define SA_CORE         0x02            /* ditto and coredumps */
  199 #define SA_STOP         0x04            /* suspend process */
  200 #define SA_TTYSTOP      0x08            /* ditto, from tty */
  201 #define SA_IGNORE       0x10            /* ignore by default */
  202 #define SA_CONT         0x20            /* continue if suspended */
  203 #define SA_CANTMASK     0x40            /* non-maskable, catchable */
  204 
  205 static int sigproptbl[NSIG] = {
  206         SA_KILL,                        /* SIGHUP */
  207         SA_KILL,                        /* SIGINT */
  208         SA_KILL|SA_CORE,                /* SIGQUIT */
  209         SA_KILL|SA_CORE,                /* SIGILL */
  210         SA_KILL|SA_CORE,                /* SIGTRAP */
  211         SA_KILL|SA_CORE,                /* SIGABRT */
  212         SA_KILL|SA_CORE,                /* SIGEMT */
  213         SA_KILL|SA_CORE,                /* SIGFPE */
  214         SA_KILL,                        /* SIGKILL */
  215         SA_KILL|SA_CORE,                /* SIGBUS */
  216         SA_KILL|SA_CORE,                /* SIGSEGV */
  217         SA_KILL|SA_CORE,                /* SIGSYS */
  218         SA_KILL,                        /* SIGPIPE */
  219         SA_KILL,                        /* SIGALRM */
  220         SA_KILL,                        /* SIGTERM */
  221         SA_IGNORE,                      /* SIGURG */
  222         SA_STOP,                        /* SIGSTOP */
  223         SA_STOP|SA_TTYSTOP,             /* SIGTSTP */
  224         SA_IGNORE|SA_CONT,              /* SIGCONT */
  225         SA_IGNORE,                      /* SIGCHLD */
  226         SA_STOP|SA_TTYSTOP,             /* SIGTTIN */
  227         SA_STOP|SA_TTYSTOP,             /* SIGTTOU */
  228         SA_IGNORE,                      /* SIGIO */
  229         SA_KILL,                        /* SIGXCPU */
  230         SA_KILL,                        /* SIGXFSZ */
  231         SA_KILL,                        /* SIGVTALRM */
  232         SA_KILL,                        /* SIGPROF */
  233         SA_IGNORE,                      /* SIGWINCH  */
  234         SA_IGNORE,                      /* SIGINFO */
  235         SA_KILL,                        /* SIGUSR1 */
  236         SA_KILL,                        /* SIGUSR2 */
  237 };
  238 
  239 static void reschedule_signals(struct proc *p, sigset_t block, int flags);
  240 
  241 static void
  242 sigqueue_start(void)
  243 {
  244         ksiginfo_zone = uma_zcreate("ksiginfo", sizeof(ksiginfo_t),
  245                 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
  246         uma_prealloc(ksiginfo_zone, preallocate_siginfo);
  247         p31b_setcfg(CTL_P1003_1B_REALTIME_SIGNALS, _POSIX_REALTIME_SIGNALS);
  248         p31b_setcfg(CTL_P1003_1B_RTSIG_MAX, SIGRTMAX - SIGRTMIN + 1);
  249         p31b_setcfg(CTL_P1003_1B_SIGQUEUE_MAX, max_pending_per_proc);
  250 }
  251 
  252 ksiginfo_t *
  253 ksiginfo_alloc(int wait)
  254 {
  255         int flags;
  256 
  257         flags = M_ZERO;
  258         if (! wait)
  259                 flags |= M_NOWAIT;
  260         if (ksiginfo_zone != NULL)
  261                 return ((ksiginfo_t *)uma_zalloc(ksiginfo_zone, flags));
  262         return (NULL);
  263 }
  264 
  265 void
  266 ksiginfo_free(ksiginfo_t *ksi)
  267 {
  268         uma_zfree(ksiginfo_zone, ksi);
  269 }
  270 
  271 static __inline int
  272 ksiginfo_tryfree(ksiginfo_t *ksi)
  273 {
  274         if (!(ksi->ksi_flags & KSI_EXT)) {
  275                 uma_zfree(ksiginfo_zone, ksi);
  276                 return (1);
  277         }
  278         return (0);
  279 }
  280 
  281 void
  282 sigqueue_init(sigqueue_t *list, struct proc *p)
  283 {
  284         SIGEMPTYSET(list->sq_signals);
  285         SIGEMPTYSET(list->sq_kill);
  286         SIGEMPTYSET(list->sq_ptrace);
  287         TAILQ_INIT(&list->sq_list);
  288         list->sq_proc = p;
  289         list->sq_flags = SQ_INIT;
  290 }
  291 
  292 /*
  293  * Get a signal's ksiginfo.
  294  * Return:
  295  *      0       -       signal not found
  296  *      others  -       signal number
  297  */
  298 static int
  299 sigqueue_get(sigqueue_t *sq, int signo, ksiginfo_t *si)
  300 {
  301         struct proc *p = sq->sq_proc;
  302         struct ksiginfo *ksi, *next;
  303         int count = 0;
  304 
  305         KASSERT(sq->sq_flags & SQ_INIT, ("sigqueue not inited"));
  306 
  307         if (!SIGISMEMBER(sq->sq_signals, signo))
  308                 return (0);
  309 
  310         if (SIGISMEMBER(sq->sq_ptrace, signo)) {
  311                 count++;
  312                 SIGDELSET(sq->sq_ptrace, signo);
  313                 si->ksi_flags |= KSI_PTRACE;
  314         }
  315         if (SIGISMEMBER(sq->sq_kill, signo)) {
  316                 count++;
  317                 if (count == 1)
  318                         SIGDELSET(sq->sq_kill, signo);
  319         }
  320 
  321         TAILQ_FOREACH_SAFE(ksi, &sq->sq_list, ksi_link, next) {
  322                 if (ksi->ksi_signo == signo) {
  323                         if (count == 0) {
  324                                 TAILQ_REMOVE(&sq->sq_list, ksi, ksi_link);
  325                                 ksi->ksi_sigq = NULL;
  326                                 ksiginfo_copy(ksi, si);
  327                                 if (ksiginfo_tryfree(ksi) && p != NULL)
  328                                         p->p_pendingcnt--;
  329                         }
  330                         if (++count > 1)
  331                                 break;
  332                 }
  333         }
  334 
  335         if (count <= 1)
  336                 SIGDELSET(sq->sq_signals, signo);
  337         si->ksi_signo = signo;
  338         return (signo);
  339 }
  340 
  341 void
  342 sigqueue_take(ksiginfo_t *ksi)
  343 {
  344         struct ksiginfo *kp;
  345         struct proc     *p;
  346         sigqueue_t      *sq;
  347 
  348         if (ksi == NULL || (sq = ksi->ksi_sigq) == NULL)
  349                 return;
  350 
  351         p = sq->sq_proc;
  352         TAILQ_REMOVE(&sq->sq_list, ksi, ksi_link);
  353         ksi->ksi_sigq = NULL;
  354         if (!(ksi->ksi_flags & KSI_EXT) && p != NULL)
  355                 p->p_pendingcnt--;
  356 
  357         for (kp = TAILQ_FIRST(&sq->sq_list); kp != NULL;
  358              kp = TAILQ_NEXT(kp, ksi_link)) {
  359                 if (kp->ksi_signo == ksi->ksi_signo)
  360                         break;
  361         }
  362         if (kp == NULL && !SIGISMEMBER(sq->sq_kill, ksi->ksi_signo) &&
  363             !SIGISMEMBER(sq->sq_ptrace, ksi->ksi_signo))
  364                 SIGDELSET(sq->sq_signals, ksi->ksi_signo);
  365 }
  366 
  367 static int
  368 sigqueue_add(sigqueue_t *sq, int signo, ksiginfo_t *si)
  369 {
  370         struct proc *p = sq->sq_proc;
  371         struct ksiginfo *ksi;
  372         int ret = 0;
  373 
  374         KASSERT(sq->sq_flags & SQ_INIT, ("sigqueue not inited"));
  375 
  376         /*
  377          * SIGKILL/SIGSTOP cannot be caught or masked, so take the fast path
  378          * for these signals.
  379          */
  380         if (signo == SIGKILL || signo == SIGSTOP || si == NULL) {
  381                 SIGADDSET(sq->sq_kill, signo);
  382                 goto out_set_bit;
  383         }
  384 
  385         /* directly insert the ksi, don't copy it */
  386         if (si->ksi_flags & KSI_INS) {
  387                 if (si->ksi_flags & KSI_HEAD)
  388                         TAILQ_INSERT_HEAD(&sq->sq_list, si, ksi_link);
  389                 else
  390                         TAILQ_INSERT_TAIL(&sq->sq_list, si, ksi_link);
  391                 si->ksi_sigq = sq;
  392                 goto out_set_bit;
  393         }
  394 
  395         if (__predict_false(ksiginfo_zone == NULL)) {
  396                 SIGADDSET(sq->sq_kill, signo);
  397                 goto out_set_bit;
  398         }
  399 
  400         if (p != NULL && p->p_pendingcnt >= max_pending_per_proc) {
  401                 signal_overflow++;
  402                 ret = EAGAIN;
  403         } else if ((ksi = ksiginfo_alloc(0)) == NULL) {
  404                 signal_alloc_fail++;
  405                 ret = EAGAIN;
  406         } else {
  407                 if (p != NULL)
  408                         p->p_pendingcnt++;
  409                 ksiginfo_copy(si, ksi);
  410                 ksi->ksi_signo = signo;
  411                 if (si->ksi_flags & KSI_HEAD)
  412                         TAILQ_INSERT_HEAD(&sq->sq_list, ksi, ksi_link);
  413                 else
  414                         TAILQ_INSERT_TAIL(&sq->sq_list, ksi, ksi_link);
  415                 ksi->ksi_sigq = sq;
  416         }
  417 
  418         if (ret != 0) {
  419                 if ((si->ksi_flags & KSI_PTRACE) != 0) {
  420                         SIGADDSET(sq->sq_ptrace, signo);
  421                         ret = 0;
  422                         goto out_set_bit;
  423                 } else if ((si->ksi_flags & KSI_TRAP) != 0 ||
  424                     (si->ksi_flags & KSI_SIGQ) == 0) {
  425                         SIGADDSET(sq->sq_kill, signo);
  426                         ret = 0;
  427                         goto out_set_bit;
  428                 }
  429                 return (ret);
  430         }
  431 
  432 out_set_bit:
  433         SIGADDSET(sq->sq_signals, signo);
  434         return (ret);
  435 }
  436 
  437 void
  438 sigqueue_flush(sigqueue_t *sq)
  439 {
  440         struct proc *p = sq->sq_proc;
  441         ksiginfo_t *ksi;
  442 
  443         KASSERT(sq->sq_flags & SQ_INIT, ("sigqueue not inited"));
  444 
  445         if (p != NULL)
  446                 PROC_LOCK_ASSERT(p, MA_OWNED);
  447 
  448         while ((ksi = TAILQ_FIRST(&sq->sq_list)) != NULL) {
  449                 TAILQ_REMOVE(&sq->sq_list, ksi, ksi_link);
  450                 ksi->ksi_sigq = NULL;
  451                 if (ksiginfo_tryfree(ksi) && p != NULL)
  452                         p->p_pendingcnt--;
  453         }
  454 
  455         SIGEMPTYSET(sq->sq_signals);
  456         SIGEMPTYSET(sq->sq_kill);
  457         SIGEMPTYSET(sq->sq_ptrace);
  458 }
  459 
  460 static void
  461 sigqueue_move_set(sigqueue_t *src, sigqueue_t *dst, const sigset_t *set)
  462 {
  463         sigset_t tmp;
  464         struct proc *p1, *p2;
  465         ksiginfo_t *ksi, *next;
  466 
  467         KASSERT(src->sq_flags & SQ_INIT, ("src sigqueue not inited"));
  468         KASSERT(dst->sq_flags & SQ_INIT, ("dst sigqueue not inited"));
  469         p1 = src->sq_proc;
  470         p2 = dst->sq_proc;
  471         /* Move siginfo to target list */
  472         TAILQ_FOREACH_SAFE(ksi, &src->sq_list, ksi_link, next) {
  473                 if (SIGISMEMBER(*set, ksi->ksi_signo)) {
  474                         TAILQ_REMOVE(&src->sq_list, ksi, ksi_link);
  475                         if (p1 != NULL)
  476                                 p1->p_pendingcnt--;
  477                         TAILQ_INSERT_TAIL(&dst->sq_list, ksi, ksi_link);
  478                         ksi->ksi_sigq = dst;
  479                         if (p2 != NULL)
  480                                 p2->p_pendingcnt++;
  481                 }
  482         }
  483 
  484         /* Move pending bits to target list */
  485         tmp = src->sq_kill;
  486         SIGSETAND(tmp, *set);
  487         SIGSETOR(dst->sq_kill, tmp);
  488         SIGSETNAND(src->sq_kill, tmp);
  489 
  490         tmp = src->sq_ptrace;
  491         SIGSETAND(tmp, *set);
  492         SIGSETOR(dst->sq_ptrace, tmp);
  493         SIGSETNAND(src->sq_ptrace, tmp);
  494 
  495         tmp = src->sq_signals;
  496         SIGSETAND(tmp, *set);
  497         SIGSETOR(dst->sq_signals, tmp);
  498         SIGSETNAND(src->sq_signals, tmp);
  499 }
  500 
  501 #if 0
  502 static void
  503 sigqueue_move(sigqueue_t *src, sigqueue_t *dst, int signo)
  504 {
  505         sigset_t set;
  506 
  507         SIGEMPTYSET(set);
  508         SIGADDSET(set, signo);
  509         sigqueue_move_set(src, dst, &set);
  510 }
  511 #endif
  512 
  513 static void
  514 sigqueue_delete_set(sigqueue_t *sq, const sigset_t *set)
  515 {
  516         struct proc *p = sq->sq_proc;
  517         ksiginfo_t *ksi, *next;
  518 
  519         KASSERT(sq->sq_flags & SQ_INIT, ("src sigqueue not inited"));
  520 
  521         /* Remove siginfo queue */
  522         TAILQ_FOREACH_SAFE(ksi, &sq->sq_list, ksi_link, next) {
  523                 if (SIGISMEMBER(*set, ksi->ksi_signo)) {
  524                         TAILQ_REMOVE(&sq->sq_list, ksi, ksi_link);
  525                         ksi->ksi_sigq = NULL;
  526                         if (ksiginfo_tryfree(ksi) && p != NULL)
  527                                 p->p_pendingcnt--;
  528                 }
  529         }
  530         SIGSETNAND(sq->sq_kill, *set);
  531         SIGSETNAND(sq->sq_ptrace, *set);
  532         SIGSETNAND(sq->sq_signals, *set);
  533 }
  534 
  535 void
  536 sigqueue_delete(sigqueue_t *sq, int signo)
  537 {
  538         sigset_t set;
  539 
  540         SIGEMPTYSET(set);
  541         SIGADDSET(set, signo);
  542         sigqueue_delete_set(sq, &set);
  543 }
  544 
  545 /* Remove a set of signals for a process */
  546 static void
  547 sigqueue_delete_set_proc(struct proc *p, const sigset_t *set)
  548 {
  549         sigqueue_t worklist;
  550         struct thread *td0;
  551 
  552         PROC_LOCK_ASSERT(p, MA_OWNED);
  553 
  554         sigqueue_init(&worklist, NULL);
  555         sigqueue_move_set(&p->p_sigqueue, &worklist, set);
  556 
  557         FOREACH_THREAD_IN_PROC(p, td0)
  558                 sigqueue_move_set(&td0->td_sigqueue, &worklist, set);
  559 
  560         sigqueue_flush(&worklist);
  561 }
  562 
  563 void
  564 sigqueue_delete_proc(struct proc *p, int signo)
  565 {
  566         sigset_t set;
  567 
  568         SIGEMPTYSET(set);
  569         SIGADDSET(set, signo);
  570         sigqueue_delete_set_proc(p, &set);
  571 }
  572 
  573 static void
  574 sigqueue_delete_stopmask_proc(struct proc *p)
  575 {
  576         sigset_t set;
  577 
  578         SIGEMPTYSET(set);
  579         SIGADDSET(set, SIGSTOP);
  580         SIGADDSET(set, SIGTSTP);
  581         SIGADDSET(set, SIGTTIN);
  582         SIGADDSET(set, SIGTTOU);
  583         sigqueue_delete_set_proc(p, &set);
  584 }
  585 
  586 /*
  587  * Determine signal that should be delivered to thread td, the current
  588  * thread, 0 if none.  If there is a pending stop signal with default
  589  * action, the process stops in issignal().
  590  */
  591 int
  592 cursig(struct thread *td)
  593 {
  594         PROC_LOCK_ASSERT(td->td_proc, MA_OWNED);
  595         mtx_assert(&td->td_proc->p_sigacts->ps_mtx, MA_OWNED);
  596         THREAD_LOCK_ASSERT(td, MA_NOTOWNED);
  597         return (SIGPENDING(td) ? issignal(td) : 0);
  598 }
  599 
  600 /*
  601  * Arrange for ast() to handle unmasked pending signals on return to user
  602  * mode.  This must be called whenever a signal is added to td_sigqueue or
  603  * unmasked in td_sigmask.
  604  */
  605 void
  606 signotify(struct thread *td)
  607 {
  608         struct proc *p;
  609 
  610         p = td->td_proc;
  611 
  612         PROC_LOCK_ASSERT(p, MA_OWNED);
  613 
  614         if (SIGPENDING(td)) {
  615                 thread_lock(td);
  616                 td->td_flags |= TDF_NEEDSIGCHK | TDF_ASTPENDING;
  617                 thread_unlock(td);
  618         }
  619 }
  620 
  621 /*
  622  * Returns 1 (true) if altstack is configured for the thread, and the
  623  * passed stack bottom address falls into the altstack range.  Handles
  624  * the 43 compat special case where the alt stack size is zero.
  625  */
  626 int
  627 sigonstack(size_t sp)
  628 {
  629         struct thread *td;
  630 
  631         td = curthread;
  632         if ((td->td_pflags & TDP_ALTSTACK) == 0)
  633                 return (0);
  634 #if defined(COMPAT_43)
  635         if (td->td_sigstk.ss_size == 0)
  636                 return ((td->td_sigstk.ss_flags & SS_ONSTACK) != 0);
  637 #endif
  638         return (sp >= (size_t)td->td_sigstk.ss_sp &&
  639             sp < td->td_sigstk.ss_size + (size_t)td->td_sigstk.ss_sp);
  640 }
  641 
  642 static __inline int
  643 sigprop(int sig)
  644 {
  645 
  646         if (sig > 0 && sig < NSIG)
  647                 return (sigproptbl[_SIG_IDX(sig)]);
  648         return (0);
  649 }
  650 
  651 int
  652 sig_ffs(sigset_t *set)
  653 {
  654         int i;
  655 
  656         for (i = 0; i < _SIG_WORDS; i++)
  657                 if (set->__bits[i])
  658                         return (ffs(set->__bits[i]) + (i * 32));
  659         return (0);
  660 }
  661 
  662 static bool
  663 sigact_flag_test(const struct sigaction *act, int flag)
  664 {
  665 
  666         /*
  667          * SA_SIGINFO is reset when signal disposition is set to
  668          * ignore or default.  Other flags are kept according to user
  669          * settings.
  670          */
  671         return ((act->sa_flags & flag) != 0 && (flag != SA_SIGINFO ||
  672             ((__sighandler_t *)act->sa_sigaction != SIG_IGN &&
  673             (__sighandler_t *)act->sa_sigaction != SIG_DFL)));
  674 }
  675 
  676 /*
  677  * kern_sigaction
  678  * sigaction
  679  * freebsd4_sigaction
  680  * osigaction
  681  */
  682 int
  683 kern_sigaction(struct thread *td, int sig, const struct sigaction *act,
  684     struct sigaction *oact, int flags)
  685 {
  686         struct sigacts *ps;
  687         struct proc *p = td->td_proc;
  688 
  689         if (!_SIG_VALID(sig))
  690                 return (EINVAL);
  691         if (act != NULL && act->sa_handler != SIG_DFL &&
  692             act->sa_handler != SIG_IGN && (act->sa_flags & ~(SA_ONSTACK |
  693             SA_RESTART | SA_RESETHAND | SA_NOCLDSTOP | SA_NODEFER |
  694             SA_NOCLDWAIT | SA_SIGINFO)) != 0)
  695                 return (EINVAL);
  696 
  697         PROC_LOCK(p);
  698         ps = p->p_sigacts;
  699         mtx_lock(&ps->ps_mtx);
  700         if (oact) {
  701                 memset(oact, 0, sizeof(*oact));
  702                 oact->sa_mask = ps->ps_catchmask[_SIG_IDX(sig)];
  703                 if (SIGISMEMBER(ps->ps_sigonstack, sig))
  704                         oact->sa_flags |= SA_ONSTACK;
  705                 if (!SIGISMEMBER(ps->ps_sigintr, sig))
  706                         oact->sa_flags |= SA_RESTART;
  707                 if (SIGISMEMBER(ps->ps_sigreset, sig))
  708                         oact->sa_flags |= SA_RESETHAND;
  709                 if (SIGISMEMBER(ps->ps_signodefer, sig))
  710                         oact->sa_flags |= SA_NODEFER;
  711                 if (SIGISMEMBER(ps->ps_siginfo, sig)) {
  712                         oact->sa_flags |= SA_SIGINFO;
  713                         oact->sa_sigaction =
  714                             (__siginfohandler_t *)ps->ps_sigact[_SIG_IDX(sig)];
  715                 } else
  716                         oact->sa_handler = ps->ps_sigact[_SIG_IDX(sig)];
  717                 if (sig == SIGCHLD && ps->ps_flag & PS_NOCLDSTOP)
  718                         oact->sa_flags |= SA_NOCLDSTOP;
  719                 if (sig == SIGCHLD && ps->ps_flag & PS_NOCLDWAIT)
  720                         oact->sa_flags |= SA_NOCLDWAIT;
  721         }
  722         if (act) {
  723                 if ((sig == SIGKILL || sig == SIGSTOP) &&
  724                     act->sa_handler != SIG_DFL) {
  725                         mtx_unlock(&ps->ps_mtx);
  726                         PROC_UNLOCK(p);
  727                         return (EINVAL);
  728                 }
  729 
  730                 /*
  731                  * Change setting atomically.
  732                  */
  733 
  734                 ps->ps_catchmask[_SIG_IDX(sig)] = act->sa_mask;
  735                 SIG_CANTMASK(ps->ps_catchmask[_SIG_IDX(sig)]);
  736                 if (sigact_flag_test(act, SA_SIGINFO)) {
  737                         ps->ps_sigact[_SIG_IDX(sig)] =
  738                             (__sighandler_t *)act->sa_sigaction;
  739                         SIGADDSET(ps->ps_siginfo, sig);
  740                 } else {
  741                         ps->ps_sigact[_SIG_IDX(sig)] = act->sa_handler;
  742                         SIGDELSET(ps->ps_siginfo, sig);
  743                 }
  744                 if (!sigact_flag_test(act, SA_RESTART))
  745                         SIGADDSET(ps->ps_sigintr, sig);
  746                 else
  747                         SIGDELSET(ps->ps_sigintr, sig);
  748                 if (sigact_flag_test(act, SA_ONSTACK))
  749                         SIGADDSET(ps->ps_sigonstack, sig);
  750                 else
  751                         SIGDELSET(ps->ps_sigonstack, sig);
  752                 if (sigact_flag_test(act, SA_RESETHAND))
  753                         SIGADDSET(ps->ps_sigreset, sig);
  754                 else
  755                         SIGDELSET(ps->ps_sigreset, sig);
  756                 if (sigact_flag_test(act, SA_NODEFER))
  757                         SIGADDSET(ps->ps_signodefer, sig);
  758                 else
  759                         SIGDELSET(ps->ps_signodefer, sig);
  760                 if (sig == SIGCHLD) {
  761                         if (act->sa_flags & SA_NOCLDSTOP)
  762                                 ps->ps_flag |= PS_NOCLDSTOP;
  763                         else
  764                                 ps->ps_flag &= ~PS_NOCLDSTOP;
  765                         if (act->sa_flags & SA_NOCLDWAIT) {
  766                                 /*
  767                                  * Paranoia: since SA_NOCLDWAIT is implemented
  768                                  * by reparenting the dying child to PID 1 (and
  769                                  * trust it to reap the zombie), PID 1 itself
  770                                  * is forbidden to set SA_NOCLDWAIT.
  771                                  */
  772                                 if (p->p_pid == 1)
  773                                         ps->ps_flag &= ~PS_NOCLDWAIT;
  774                                 else
  775                                         ps->ps_flag |= PS_NOCLDWAIT;
  776                         } else
  777                                 ps->ps_flag &= ~PS_NOCLDWAIT;
  778                         if (ps->ps_sigact[_SIG_IDX(SIGCHLD)] == SIG_IGN)
  779                                 ps->ps_flag |= PS_CLDSIGIGN;
  780                         else
  781                                 ps->ps_flag &= ~PS_CLDSIGIGN;
  782                 }
  783                 /*
  784                  * Set bit in ps_sigignore for signals that are set to SIG_IGN,
  785                  * and for signals set to SIG_DFL where the default is to
  786                  * ignore. However, don't put SIGCONT in ps_sigignore, as we
  787                  * have to restart the process.
  788                  */
  789                 if (ps->ps_sigact[_SIG_IDX(sig)] == SIG_IGN ||
  790                     (sigprop(sig) & SA_IGNORE &&
  791                      ps->ps_sigact[_SIG_IDX(sig)] == SIG_DFL)) {
  792                         /* never to be seen again */
  793                         sigqueue_delete_proc(p, sig);
  794                         if (sig != SIGCONT)
  795                                 /* easier in psignal */
  796                                 SIGADDSET(ps->ps_sigignore, sig);
  797                         SIGDELSET(ps->ps_sigcatch, sig);
  798                 } else {
  799                         SIGDELSET(ps->ps_sigignore, sig);
  800                         if (ps->ps_sigact[_SIG_IDX(sig)] == SIG_DFL)
  801                                 SIGDELSET(ps->ps_sigcatch, sig);
  802                         else
  803                                 SIGADDSET(ps->ps_sigcatch, sig);
  804                 }
  805 #ifdef COMPAT_FREEBSD4
  806                 if (ps->ps_sigact[_SIG_IDX(sig)] == SIG_IGN ||
  807                     ps->ps_sigact[_SIG_IDX(sig)] == SIG_DFL ||
  808                     (flags & KSA_FREEBSD4) == 0)
  809                         SIGDELSET(ps->ps_freebsd4, sig);
  810                 else
  811                         SIGADDSET(ps->ps_freebsd4, sig);
  812 #endif
  813 #ifdef COMPAT_43
  814                 if (ps->ps_sigact[_SIG_IDX(sig)] == SIG_IGN ||
  815                     ps->ps_sigact[_SIG_IDX(sig)] == SIG_DFL ||
  816                     (flags & KSA_OSIGSET) == 0)
  817                         SIGDELSET(ps->ps_osigset, sig);
  818                 else
  819                         SIGADDSET(ps->ps_osigset, sig);
  820 #endif
  821         }
  822         mtx_unlock(&ps->ps_mtx);
  823         PROC_UNLOCK(p);
  824         return (0);
  825 }
  826 
  827 #ifndef _SYS_SYSPROTO_H_
  828 struct sigaction_args {
  829         int     sig;
  830         struct  sigaction *act;
  831         struct  sigaction *oact;
  832 };
  833 #endif
  834 int
  835 sys_sigaction(struct thread *td, struct sigaction_args *uap)
  836 {
  837         struct sigaction act, oact;
  838         struct sigaction *actp, *oactp;
  839         int error;
  840 
  841         actp = (uap->act != NULL) ? &act : NULL;
  842         oactp = (uap->oact != NULL) ? &oact : NULL;
  843         if (actp) {
  844                 error = copyin(uap->act, actp, sizeof(act));
  845                 if (error)
  846                         return (error);
  847         }
  848         error = kern_sigaction(td, uap->sig, actp, oactp, 0);
  849         if (oactp && !error)
  850                 error = copyout(oactp, uap->oact, sizeof(oact));
  851         return (error);
  852 }
  853 
  854 #ifdef COMPAT_FREEBSD4
  855 #ifndef _SYS_SYSPROTO_H_
  856 struct freebsd4_sigaction_args {
  857         int     sig;
  858         struct  sigaction *act;
  859         struct  sigaction *oact;
  860 };
  861 #endif
  862 int
  863 freebsd4_sigaction(struct thread *td, struct freebsd4_sigaction_args *uap)
  864 {
  865         struct sigaction act, oact;
  866         struct sigaction *actp, *oactp;
  867         int error;
  868 
  869 
  870         actp = (uap->act != NULL) ? &act : NULL;
  871         oactp = (uap->oact != NULL) ? &oact : NULL;
  872         if (actp) {
  873                 error = copyin(uap->act, actp, sizeof(act));
  874                 if (error)
  875                         return (error);
  876         }
  877         error = kern_sigaction(td, uap->sig, actp, oactp, KSA_FREEBSD4);
  878         if (oactp && !error)
  879                 error = copyout(oactp, uap->oact, sizeof(oact));
  880         return (error);
  881 }
  882 #endif  /* COMAPT_FREEBSD4 */
  883 
  884 #ifdef COMPAT_43        /* XXX - COMPAT_FBSD3 */
  885 #ifndef _SYS_SYSPROTO_H_
  886 struct osigaction_args {
  887         int     signum;
  888         struct  osigaction *nsa;
  889         struct  osigaction *osa;
  890 };
  891 #endif
  892 int
  893 osigaction(struct thread *td, struct osigaction_args *uap)
  894 {
  895         struct osigaction sa;
  896         struct sigaction nsa, osa;
  897         struct sigaction *nsap, *osap;
  898         int error;
  899 
  900         if (uap->signum <= 0 || uap->signum >= ONSIG)
  901                 return (EINVAL);
  902 
  903         nsap = (uap->nsa != NULL) ? &nsa : NULL;
  904         osap = (uap->osa != NULL) ? &osa : NULL;
  905 
  906         if (nsap) {
  907                 error = copyin(uap->nsa, &sa, sizeof(sa));
  908                 if (error)
  909                         return (error);
  910                 nsap->sa_handler = sa.sa_handler;
  911                 nsap->sa_flags = sa.sa_flags;
  912                 OSIG2SIG(sa.sa_mask, nsap->sa_mask);
  913         }
  914         error = kern_sigaction(td, uap->signum, nsap, osap, KSA_OSIGSET);
  915         if (osap && !error) {
  916                 sa.sa_handler = osap->sa_handler;
  917                 sa.sa_flags = osap->sa_flags;
  918                 SIG2OSIG(osap->sa_mask, sa.sa_mask);
  919                 error = copyout(&sa, uap->osa, sizeof(sa));
  920         }
  921         return (error);
  922 }
  923 
  924 #if !defined(__i386__)
  925 /* Avoid replicating the same stub everywhere */
  926 int
  927 osigreturn(struct thread *td, struct osigreturn_args *uap)
  928 {
  929 
  930         return (nosys(td, (struct nosys_args *)uap));
  931 }
  932 #endif
  933 #endif /* COMPAT_43 */
  934 
  935 /*
  936  * Initialize signal state for process 0;
  937  * set to ignore signals that are ignored by default.
  938  */
  939 void
  940 siginit(struct proc *p)
  941 {
  942         int i;
  943         struct sigacts *ps;
  944 
  945         PROC_LOCK(p);
  946         ps = p->p_sigacts;
  947         mtx_lock(&ps->ps_mtx);
  948         for (i = 1; i <= NSIG; i++) {
  949                 if (sigprop(i) & SA_IGNORE && i != SIGCONT) {
  950                         SIGADDSET(ps->ps_sigignore, i);
  951                 }
  952         }
  953         mtx_unlock(&ps->ps_mtx);
  954         PROC_UNLOCK(p);
  955 }
  956 
  957 /*
  958  * Reset specified signal to the default disposition.
  959  */
  960 static void
  961 sigdflt(struct sigacts *ps, int sig)
  962 {
  963 
  964         mtx_assert(&ps->ps_mtx, MA_OWNED);
  965         SIGDELSET(ps->ps_sigcatch, sig);
  966         if ((sigprop(sig) & SA_IGNORE) != 0 && sig != SIGCONT)
  967                 SIGADDSET(ps->ps_sigignore, sig);
  968         ps->ps_sigact[_SIG_IDX(sig)] = SIG_DFL;
  969         SIGDELSET(ps->ps_siginfo, sig);
  970 }
  971 
  972 /*
  973  * Reset signals for an exec of the specified process.
  974  */
  975 void
  976 execsigs(struct proc *p)
  977 {
  978         sigset_t osigignore;
  979         struct sigacts *ps;
  980         int sig;
  981         struct thread *td;
  982 
  983         /*
  984          * Reset caught signals.  Held signals remain held
  985          * through td_sigmask (unless they were caught,
  986          * and are now ignored by default).
  987          */
  988         PROC_LOCK_ASSERT(p, MA_OWNED);
  989         ps = p->p_sigacts;
  990         mtx_lock(&ps->ps_mtx);
  991         sig_drop_caught(p);
  992 
  993         /*
  994          * As CloudABI processes cannot modify signal handlers, fully
  995          * reset all signals to their default behavior. Do ignore
  996          * SIGPIPE, as it would otherwise be impossible to recover from
  997          * writes to broken pipes and sockets.
  998          */
  999         if (SV_PROC_ABI(p) == SV_ABI_CLOUDABI) {
 1000                 osigignore = ps->ps_sigignore;
 1001                 while (SIGNOTEMPTY(osigignore)) {
 1002                         sig = sig_ffs(&osigignore);
 1003                         SIGDELSET(osigignore, sig);
 1004                         if (sig != SIGPIPE)
 1005                                 sigdflt(ps, sig);
 1006                 }
 1007                 SIGADDSET(ps->ps_sigignore, SIGPIPE);
 1008         }
 1009 
 1010         /*
 1011          * Reset stack state to the user stack.
 1012          * Clear set of signals caught on the signal stack.
 1013          */
 1014         td = curthread;
 1015         MPASS(td->td_proc == p);
 1016         td->td_sigstk.ss_flags = SS_DISABLE;
 1017         td->td_sigstk.ss_size = 0;
 1018         td->td_sigstk.ss_sp = 0;
 1019         td->td_pflags &= ~TDP_ALTSTACK;
 1020         /*
 1021          * Reset no zombies if child dies flag as Solaris does.
 1022          */
 1023         ps->ps_flag &= ~(PS_NOCLDWAIT | PS_CLDSIGIGN);
 1024         if (ps->ps_sigact[_SIG_IDX(SIGCHLD)] == SIG_IGN)
 1025                 ps->ps_sigact[_SIG_IDX(SIGCHLD)] = SIG_DFL;
 1026         mtx_unlock(&ps->ps_mtx);
 1027 }
 1028 
 1029 /*
 1030  * kern_sigprocmask()
 1031  *
 1032  *      Manipulate signal mask.
 1033  */
 1034 int
 1035 kern_sigprocmask(struct thread *td, int how, sigset_t *set, sigset_t *oset,
 1036     int flags)
 1037 {
 1038         sigset_t new_block, oset1;
 1039         struct proc *p;
 1040         int error;
 1041 
 1042         p = td->td_proc;
 1043         if ((flags & SIGPROCMASK_PROC_LOCKED) != 0)
 1044                 PROC_LOCK_ASSERT(p, MA_OWNED);
 1045         else
 1046                 PROC_LOCK(p);
 1047         mtx_assert(&p->p_sigacts->ps_mtx, (flags & SIGPROCMASK_PS_LOCKED) != 0
 1048             ? MA_OWNED : MA_NOTOWNED);
 1049         if (oset != NULL)
 1050                 *oset = td->td_sigmask;
 1051 
 1052         error = 0;
 1053         if (set != NULL) {
 1054                 switch (how) {
 1055                 case SIG_BLOCK:
 1056                         SIG_CANTMASK(*set);
 1057                         oset1 = td->td_sigmask;
 1058                         SIGSETOR(td->td_sigmask, *set);
 1059                         new_block = td->td_sigmask;
 1060                         SIGSETNAND(new_block, oset1);
 1061                         break;
 1062                 case SIG_UNBLOCK:
 1063                         SIGSETNAND(td->td_sigmask, *set);
 1064                         signotify(td);
 1065                         goto out;
 1066                 case SIG_SETMASK:
 1067                         SIG_CANTMASK(*set);
 1068                         oset1 = td->td_sigmask;
 1069                         if (flags & SIGPROCMASK_OLD)
 1070                                 SIGSETLO(td->td_sigmask, *set);
 1071                         else
 1072                                 td->td_sigmask = *set;
 1073                         new_block = td->td_sigmask;
 1074                         SIGSETNAND(new_block, oset1);
 1075                         signotify(td);
 1076                         break;
 1077                 default:
 1078                         error = EINVAL;
 1079                         goto out;
 1080                 }
 1081 
 1082                 /*
 1083                  * The new_block set contains signals that were not previously
 1084                  * blocked, but are blocked now.
 1085                  *
 1086                  * In case we block any signal that was not previously blocked
 1087                  * for td, and process has the signal pending, try to schedule
 1088                  * signal delivery to some thread that does not block the
 1089                  * signal, possibly waking it up.
 1090                  */
 1091                 if (p->p_numthreads != 1)
 1092                         reschedule_signals(p, new_block, flags);
 1093         }
 1094 
 1095 out:
 1096         if (!(flags & SIGPROCMASK_PROC_LOCKED))
 1097                 PROC_UNLOCK(p);
 1098         return (error);
 1099 }
 1100 
 1101 #ifndef _SYS_SYSPROTO_H_
 1102 struct sigprocmask_args {
 1103         int     how;
 1104         const sigset_t *set;
 1105         sigset_t *oset;
 1106 };
 1107 #endif
 1108 int
 1109 sys_sigprocmask(struct thread *td, struct sigprocmask_args *uap)
 1110 {
 1111         sigset_t set, oset;
 1112         sigset_t *setp, *osetp;
 1113         int error;
 1114 
 1115         setp = (uap->set != NULL) ? &set : NULL;
 1116         osetp = (uap->oset != NULL) ? &oset : NULL;
 1117         if (setp) {
 1118                 error = copyin(uap->set, setp, sizeof(set));
 1119                 if (error)
 1120                         return (error);
 1121         }
 1122         error = kern_sigprocmask(td, uap->how, setp, osetp, 0);
 1123         if (osetp && !error) {
 1124                 error = copyout(osetp, uap->oset, sizeof(oset));
 1125         }
 1126         return (error);
 1127 }
 1128 
 1129 #ifdef COMPAT_43        /* XXX - COMPAT_FBSD3 */
 1130 #ifndef _SYS_SYSPROTO_H_
 1131 struct osigprocmask_args {
 1132         int     how;
 1133         osigset_t mask;
 1134 };
 1135 #endif
 1136 int
 1137 osigprocmask(struct thread *td, struct osigprocmask_args *uap)
 1138 {
 1139         sigset_t set, oset;
 1140         int error;
 1141 
 1142         OSIG2SIG(uap->mask, set);
 1143         error = kern_sigprocmask(td, uap->how, &set, &oset, 1);
 1144         SIG2OSIG(oset, td->td_retval[0]);
 1145         return (error);
 1146 }
 1147 #endif /* COMPAT_43 */
 1148 
 1149 int
 1150 sys_sigwait(struct thread *td, struct sigwait_args *uap)
 1151 {
 1152         ksiginfo_t ksi;
 1153         sigset_t set;
 1154         int error;
 1155 
 1156         error = copyin(uap->set, &set, sizeof(set));
 1157         if (error) {
 1158                 td->td_retval[0] = error;
 1159                 return (0);
 1160         }
 1161 
 1162         error = kern_sigtimedwait(td, set, &ksi, NULL);
 1163         if (error) {
 1164                 if (error == EINTR && td->td_proc->p_osrel < P_OSREL_SIGWAIT)
 1165                         error = ERESTART;
 1166                 if (error == ERESTART)
 1167                         return (error);
 1168                 td->td_retval[0] = error;
 1169                 return (0);
 1170         }
 1171 
 1172         error = copyout(&ksi.ksi_signo, uap->sig, sizeof(ksi.ksi_signo));
 1173         td->td_retval[0] = error;
 1174         return (0);
 1175 }
 1176 
 1177 int
 1178 sys_sigtimedwait(struct thread *td, struct sigtimedwait_args *uap)
 1179 {
 1180         struct timespec ts;
 1181         struct timespec *timeout;
 1182         sigset_t set;
 1183         ksiginfo_t ksi;
 1184         int error;
 1185 
 1186         if (uap->timeout) {
 1187                 error = copyin(uap->timeout, &ts, sizeof(ts));
 1188                 if (error)
 1189                         return (error);
 1190 
 1191                 timeout = &ts;
 1192         } else
 1193                 timeout = NULL;
 1194 
 1195         error = copyin(uap->set, &set, sizeof(set));
 1196         if (error)
 1197                 return (error);
 1198 
 1199         error = kern_sigtimedwait(td, set, &ksi, timeout);
 1200         if (error)
 1201                 return (error);
 1202 
 1203         if (uap->info)
 1204                 error = copyout(&ksi.ksi_info, uap->info, sizeof(siginfo_t));
 1205 
 1206         if (error == 0)
 1207                 td->td_retval[0] = ksi.ksi_signo;
 1208         return (error);
 1209 }
 1210 
 1211 int
 1212 sys_sigwaitinfo(struct thread *td, struct sigwaitinfo_args *uap)
 1213 {
 1214         ksiginfo_t ksi;
 1215         sigset_t set;
 1216         int error;
 1217 
 1218         error = copyin(uap->set, &set, sizeof(set));
 1219         if (error)
 1220                 return (error);
 1221 
 1222         error = kern_sigtimedwait(td, set, &ksi, NULL);
 1223         if (error)
 1224                 return (error);
 1225 
 1226         if (uap->info)
 1227                 error = copyout(&ksi.ksi_info, uap->info, sizeof(siginfo_t));
 1228 
 1229         if (error == 0)
 1230                 td->td_retval[0] = ksi.ksi_signo;
 1231         return (error);
 1232 }
 1233 
 1234 static void
 1235 proc_td_siginfo_capture(struct thread *td, siginfo_t *si)
 1236 {
 1237         struct thread *thr;
 1238 
 1239         FOREACH_THREAD_IN_PROC(td->td_proc, thr) {
 1240                 if (thr == td)
 1241                         thr->td_si = *si;
 1242                 else
 1243                         thr->td_si.si_signo = 0;
 1244         }
 1245 }
 1246 
 1247 int
 1248 kern_sigtimedwait(struct thread *td, sigset_t waitset, ksiginfo_t *ksi,
 1249         struct timespec *timeout)
 1250 {
 1251         struct sigacts *ps;
 1252         sigset_t saved_mask, new_block;
 1253         struct proc *p;
 1254         int error, sig, timo, timevalid = 0;
 1255         struct timespec rts, ets, ts;
 1256         struct timeval tv;
 1257         bool traced;
 1258 
 1259         p = td->td_proc;
 1260         error = 0;
 1261         ets.tv_sec = 0;
 1262         ets.tv_nsec = 0;
 1263         traced = false;
 1264 
 1265         if (timeout != NULL) {
 1266                 if (timeout->tv_nsec >= 0 && timeout->tv_nsec < 1000000000) {
 1267                         timevalid = 1;
 1268                         getnanouptime(&rts);
 1269                         ets = rts;
 1270                         timespecadd(&ets, timeout);
 1271                 }
 1272         }
 1273         ksiginfo_init(ksi);
 1274         /* Some signals can not be waited for. */
 1275         SIG_CANTMASK(waitset);
 1276         ps = p->p_sigacts;
 1277         PROC_LOCK(p);
 1278         saved_mask = td->td_sigmask;
 1279         SIGSETNAND(td->td_sigmask, waitset);
 1280         for (;;) {
 1281                 mtx_lock(&ps->ps_mtx);
 1282                 sig = cursig(td);
 1283                 mtx_unlock(&ps->ps_mtx);
 1284                 KASSERT(sig >= 0, ("sig %d", sig));
 1285                 if (sig != 0 && SIGISMEMBER(waitset, sig)) {
 1286                         if (sigqueue_get(&td->td_sigqueue, sig, ksi) != 0 ||
 1287                             sigqueue_get(&p->p_sigqueue, sig, ksi) != 0) {
 1288                                 error = 0;
 1289                                 break;
 1290                         }
 1291                 }
 1292 
 1293                 if (error != 0)
 1294                         break;
 1295 
 1296                 /*
 1297                  * POSIX says this must be checked after looking for pending
 1298                  * signals.
 1299                  */
 1300                 if (timeout != NULL) {
 1301                         if (!timevalid) {
 1302                                 error = EINVAL;
 1303                                 break;
 1304                         }
 1305                         getnanouptime(&rts);
 1306                         if (timespeccmp(&rts, &ets, >=)) {
 1307                                 error = EAGAIN;
 1308                                 break;
 1309                         }
 1310                         ts = ets;
 1311                         timespecsub(&ts, &rts);
 1312                         TIMESPEC_TO_TIMEVAL(&tv, &ts);
 1313                         timo = tvtohz(&tv);
 1314                 } else {
 1315                         timo = 0;
 1316                 }
 1317 
 1318                 if (traced) {
 1319                         error = EINTR;
 1320                         break;
 1321                 }
 1322 
 1323                 error = msleep(ps, &p->p_mtx, PPAUSE|PCATCH, "sigwait", timo);
 1324 
 1325                 if (timeout != NULL) {
 1326                         if (error == ERESTART) {
 1327                                 /* Timeout can not be restarted. */
 1328                                 error = EINTR;
 1329                         } else if (error == EAGAIN) {
 1330                                 /* We will calculate timeout by ourself. */
 1331                                 error = 0;
 1332                         }
 1333                 }
 1334 
 1335                 /*
 1336                  * If PTRACE_SCE or PTRACE_SCX were set after
 1337                  * userspace entered the syscall, return spurious
 1338                  * EINTR after wait was done.  Only do this as last
 1339                  * resort after rechecking for possible queued signals
 1340                  * and expired timeouts.
 1341                  */
 1342                 if (error == 0 && (p->p_ptevents & PTRACE_SYSCALL) != 0)
 1343                         traced = true;
 1344         }
 1345 
 1346         new_block = saved_mask;
 1347         SIGSETNAND(new_block, td->td_sigmask);
 1348         td->td_sigmask = saved_mask;
 1349         /*
 1350          * Fewer signals can be delivered to us, reschedule signal
 1351          * notification.
 1352          */
 1353         if (p->p_numthreads != 1)
 1354                 reschedule_signals(p, new_block, 0);
 1355 
 1356         if (error == 0) {
 1357                 SDT_PROBE2(proc, , , signal__clear, sig, ksi);
 1358 
 1359                 if (ksi->ksi_code == SI_TIMER)
 1360                         itimer_accept(p, ksi->ksi_timerid, ksi);
 1361 
 1362 #ifdef KTRACE
 1363                 if (KTRPOINT(td, KTR_PSIG)) {
 1364                         sig_t action;
 1365 
 1366                         mtx_lock(&ps->ps_mtx);
 1367                         action = ps->ps_sigact[_SIG_IDX(sig)];
 1368                         mtx_unlock(&ps->ps_mtx);
 1369                         ktrpsig(sig, action, &td->td_sigmask, ksi->ksi_code);
 1370                 }
 1371 #endif
 1372                 if (sig == SIGKILL) {
 1373                         proc_td_siginfo_capture(td, &ksi->ksi_info);
 1374                         sigexit(td, sig);
 1375                 }
 1376         }
 1377         PROC_UNLOCK(p);
 1378         return (error);
 1379 }
 1380 
 1381 #ifndef _SYS_SYSPROTO_H_
 1382 struct sigpending_args {
 1383         sigset_t        *set;
 1384 };
 1385 #endif
 1386 int
 1387 sys_sigpending(struct thread *td, struct sigpending_args *uap)
 1388 {
 1389         struct proc *p = td->td_proc;
 1390         sigset_t pending;
 1391 
 1392         PROC_LOCK(p);
 1393         pending = p->p_sigqueue.sq_signals;
 1394         SIGSETOR(pending, td->td_sigqueue.sq_signals);
 1395         PROC_UNLOCK(p);
 1396         return (copyout(&pending, uap->set, sizeof(sigset_t)));
 1397 }
 1398 
 1399 #ifdef COMPAT_43        /* XXX - COMPAT_FBSD3 */
 1400 #ifndef _SYS_SYSPROTO_H_
 1401 struct osigpending_args {
 1402         int     dummy;
 1403 };
 1404 #endif
 1405 int
 1406 osigpending(struct thread *td, struct osigpending_args *uap)
 1407 {
 1408         struct proc *p = td->td_proc;
 1409         sigset_t pending;
 1410 
 1411         PROC_LOCK(p);
 1412         pending = p->p_sigqueue.sq_signals;
 1413         SIGSETOR(pending, td->td_sigqueue.sq_signals);
 1414         PROC_UNLOCK(p);
 1415         SIG2OSIG(pending, td->td_retval[0]);
 1416         return (0);
 1417 }
 1418 #endif /* COMPAT_43 */
 1419 
 1420 #if defined(COMPAT_43)
 1421 /*
 1422  * Generalized interface signal handler, 4.3-compatible.
 1423  */
 1424 #ifndef _SYS_SYSPROTO_H_
 1425 struct osigvec_args {
 1426         int     signum;
 1427         struct  sigvec *nsv;
 1428         struct  sigvec *osv;
 1429 };
 1430 #endif
 1431 /* ARGSUSED */
 1432 int
 1433 osigvec(struct thread *td, struct osigvec_args *uap)
 1434 {
 1435         struct sigvec vec;
 1436         struct sigaction nsa, osa;
 1437         struct sigaction *nsap, *osap;
 1438         int error;
 1439 
 1440         if (uap->signum <= 0 || uap->signum >= ONSIG)
 1441                 return (EINVAL);
 1442         nsap = (uap->nsv != NULL) ? &nsa : NULL;
 1443         osap = (uap->osv != NULL) ? &osa : NULL;
 1444         if (nsap) {
 1445                 error = copyin(uap->nsv, &vec, sizeof(vec));
 1446                 if (error)
 1447                         return (error);
 1448                 nsap->sa_handler = vec.sv_handler;
 1449                 OSIG2SIG(vec.sv_mask, nsap->sa_mask);
 1450                 nsap->sa_flags = vec.sv_flags;
 1451                 nsap->sa_flags ^= SA_RESTART;   /* opposite of SV_INTERRUPT */
 1452         }
 1453         error = kern_sigaction(td, uap->signum, nsap, osap, KSA_OSIGSET);
 1454         if (osap && !error) {
 1455                 vec.sv_handler = osap->sa_handler;
 1456                 SIG2OSIG(osap->sa_mask, vec.sv_mask);
 1457                 vec.sv_flags = osap->sa_flags;
 1458                 vec.sv_flags &= ~SA_NOCLDWAIT;
 1459                 vec.sv_flags ^= SA_RESTART;
 1460                 error = copyout(&vec, uap->osv, sizeof(vec));
 1461         }
 1462         return (error);
 1463 }
 1464 
 1465 #ifndef _SYS_SYSPROTO_H_
 1466 struct osigblock_args {
 1467         int     mask;
 1468 };
 1469 #endif
 1470 int
 1471 osigblock(struct thread *td, struct osigblock_args *uap)
 1472 {
 1473         sigset_t set, oset;
 1474 
 1475         OSIG2SIG(uap->mask, set);
 1476         kern_sigprocmask(td, SIG_BLOCK, &set, &oset, 0);
 1477         SIG2OSIG(oset, td->td_retval[0]);
 1478         return (0);
 1479 }
 1480 
 1481 #ifndef _SYS_SYSPROTO_H_
 1482 struct osigsetmask_args {
 1483         int     mask;
 1484 };
 1485 #endif
 1486 int
 1487 osigsetmask(struct thread *td, struct osigsetmask_args *uap)
 1488 {
 1489         sigset_t set, oset;
 1490 
 1491         OSIG2SIG(uap->mask, set);
 1492         kern_sigprocmask(td, SIG_SETMASK, &set, &oset, 0);
 1493         SIG2OSIG(oset, td->td_retval[0]);
 1494         return (0);
 1495 }
 1496 #endif /* COMPAT_43 */
 1497 
 1498 /*
 1499  * Suspend calling thread until signal, providing mask to be set in the
 1500  * meantime.
 1501  */
 1502 #ifndef _SYS_SYSPROTO_H_
 1503 struct sigsuspend_args {
 1504         const sigset_t *sigmask;
 1505 };
 1506 #endif
 1507 /* ARGSUSED */
 1508 int
 1509 sys_sigsuspend(struct thread *td, struct sigsuspend_args *uap)
 1510 {
 1511         sigset_t mask;
 1512         int error;
 1513 
 1514         error = copyin(uap->sigmask, &mask, sizeof(mask));
 1515         if (error)
 1516                 return (error);
 1517         return (kern_sigsuspend(td, mask));
 1518 }
 1519 
 1520 int
 1521 kern_sigsuspend(struct thread *td, sigset_t mask)
 1522 {
 1523         struct proc *p = td->td_proc;
 1524         int has_sig, sig;
 1525 
 1526         /*
 1527          * When returning from sigsuspend, we want
 1528          * the old mask to be restored after the
 1529          * signal handler has finished.  Thus, we
 1530          * save it here and mark the sigacts structure
 1531          * to indicate this.
 1532          */
 1533         PROC_LOCK(p);
 1534         kern_sigprocmask(td, SIG_SETMASK, &mask, &td->td_oldsigmask,
 1535             SIGPROCMASK_PROC_LOCKED);
 1536         td->td_pflags |= TDP_OLDMASK;
 1537 
 1538         /*
 1539          * Process signals now. Otherwise, we can get spurious wakeup
 1540          * due to signal entered process queue, but delivered to other
 1541          * thread. But sigsuspend should return only on signal
 1542          * delivery.
 1543          */
 1544         (p->p_sysent->sv_set_syscall_retval)(td, EINTR);
 1545         for (has_sig = 0; !has_sig;) {
 1546                 while (msleep(&p->p_sigacts, &p->p_mtx, PPAUSE|PCATCH, "pause",
 1547                         0) == 0)
 1548                         /* void */;
 1549                 thread_suspend_check(0);
 1550                 mtx_lock(&p->p_sigacts->ps_mtx);
 1551                 while ((sig = cursig(td)) != 0) {
 1552                         KASSERT(sig >= 0, ("sig %d", sig));
 1553                         has_sig += postsig(sig);
 1554                 }
 1555                 mtx_unlock(&p->p_sigacts->ps_mtx);
 1556 
 1557                 /*
 1558                  * If PTRACE_SCE or PTRACE_SCX were set after
 1559                  * userspace entered the syscall, return spurious
 1560                  * EINTR.
 1561                  */
 1562                 if ((p->p_ptevents & PTRACE_SYSCALL) != 0)
 1563                         has_sig += 1;
 1564         }
 1565         PROC_UNLOCK(p);
 1566         td->td_errno = EINTR;
 1567         td->td_pflags |= TDP_NERRNO;
 1568         return (EJUSTRETURN);
 1569 }
 1570 
 1571 #ifdef COMPAT_43        /* XXX - COMPAT_FBSD3 */
 1572 /*
 1573  * Compatibility sigsuspend call for old binaries.  Note nonstandard calling
 1574  * convention: libc stub passes mask, not pointer, to save a copyin.
 1575  */
 1576 #ifndef _SYS_SYSPROTO_H_
 1577 struct osigsuspend_args {
 1578         osigset_t mask;
 1579 };
 1580 #endif
 1581 /* ARGSUSED */
 1582 int
 1583 osigsuspend(struct thread *td, struct osigsuspend_args *uap)
 1584 {
 1585         sigset_t mask;
 1586 
 1587         OSIG2SIG(uap->mask, mask);
 1588         return (kern_sigsuspend(td, mask));
 1589 }
 1590 #endif /* COMPAT_43 */
 1591 
 1592 #if defined(COMPAT_43)
 1593 #ifndef _SYS_SYSPROTO_H_
 1594 struct osigstack_args {
 1595         struct  sigstack *nss;
 1596         struct  sigstack *oss;
 1597 };
 1598 #endif
 1599 /* ARGSUSED */
 1600 int
 1601 osigstack(struct thread *td, struct osigstack_args *uap)
 1602 {
 1603         struct sigstack nss, oss;
 1604         int error = 0;
 1605 
 1606         if (uap->nss != NULL) {
 1607                 error = copyin(uap->nss, &nss, sizeof(nss));
 1608                 if (error)
 1609                         return (error);
 1610         }
 1611         oss.ss_sp = td->td_sigstk.ss_sp;
 1612         oss.ss_onstack = sigonstack(cpu_getstack(td));
 1613         if (uap->nss != NULL) {
 1614                 td->td_sigstk.ss_sp = nss.ss_sp;
 1615                 td->td_sigstk.ss_size = 0;
 1616                 td->td_sigstk.ss_flags |= nss.ss_onstack & SS_ONSTACK;
 1617                 td->td_pflags |= TDP_ALTSTACK;
 1618         }
 1619         if (uap->oss != NULL)
 1620                 error = copyout(&oss, uap->oss, sizeof(oss));
 1621 
 1622         return (error);
 1623 }
 1624 #endif /* COMPAT_43 */
 1625 
 1626 #ifndef _SYS_SYSPROTO_H_
 1627 struct sigaltstack_args {
 1628         stack_t *ss;
 1629         stack_t *oss;
 1630 };
 1631 #endif
 1632 /* ARGSUSED */
 1633 int
 1634 sys_sigaltstack(struct thread *td, struct sigaltstack_args *uap)
 1635 {
 1636         stack_t ss, oss;
 1637         int error;
 1638 
 1639         if (uap->ss != NULL) {
 1640                 error = copyin(uap->ss, &ss, sizeof(ss));
 1641                 if (error)
 1642                         return (error);
 1643         }
 1644         error = kern_sigaltstack(td, (uap->ss != NULL) ? &ss : NULL,
 1645             (uap->oss != NULL) ? &oss : NULL);
 1646         if (error)
 1647                 return (error);
 1648         if (uap->oss != NULL)
 1649                 error = copyout(&oss, uap->oss, sizeof(stack_t));
 1650         return (error);
 1651 }
 1652 
 1653 int
 1654 kern_sigaltstack(struct thread *td, stack_t *ss, stack_t *oss)
 1655 {
 1656         struct proc *p = td->td_proc;
 1657         int oonstack;
 1658 
 1659         oonstack = sigonstack(cpu_getstack(td));
 1660 
 1661         if (oss != NULL) {
 1662                 *oss = td->td_sigstk;
 1663                 oss->ss_flags = (td->td_pflags & TDP_ALTSTACK)
 1664                     ? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE;
 1665         }
 1666 
 1667         if (ss != NULL) {
 1668                 if (oonstack)
 1669                         return (EPERM);
 1670                 if ((ss->ss_flags & ~SS_DISABLE) != 0)
 1671                         return (EINVAL);
 1672                 if (!(ss->ss_flags & SS_DISABLE)) {
 1673                         if (ss->ss_size < p->p_sysent->sv_minsigstksz)
 1674                                 return (ENOMEM);
 1675 
 1676                         td->td_sigstk = *ss;
 1677                         td->td_pflags |= TDP_ALTSTACK;
 1678                 } else {
 1679                         td->td_pflags &= ~TDP_ALTSTACK;
 1680                 }
 1681         }
 1682         return (0);
 1683 }
 1684 
 1685 /*
 1686  * Common code for kill process group/broadcast kill.
 1687  * cp is calling process.
 1688  */
 1689 static int
 1690 killpg1(struct thread *td, int sig, int pgid, int all, ksiginfo_t *ksi)
 1691 {
 1692         struct proc *p;
 1693         struct pgrp *pgrp;
 1694         int err;
 1695         int ret;
 1696 
 1697         ret = ESRCH;
 1698         if (all) {
 1699                 /*
 1700                  * broadcast
 1701                  */
 1702                 sx_slock(&allproc_lock);
 1703                 FOREACH_PROC_IN_SYSTEM(p) {
 1704                         PROC_LOCK(p);
 1705                         if (p->p_pid <= 1 || p->p_flag & P_SYSTEM ||
 1706                             p == td->td_proc || p->p_state == PRS_NEW) {
 1707                                 PROC_UNLOCK(p);
 1708                                 continue;
 1709                         }
 1710                         err = p_cansignal(td, p, sig);
 1711                         if (err == 0) {
 1712                                 if (sig)
 1713                                         pksignal(p, sig, ksi);
 1714                                 ret = err;
 1715                         }
 1716                         else if (ret == ESRCH)
 1717                                 ret = err;
 1718                         PROC_UNLOCK(p);
 1719                 }
 1720                 sx_sunlock(&allproc_lock);
 1721         } else {
 1722                 sx_slock(&proctree_lock);
 1723                 if (pgid == 0) {
 1724                         /*
 1725                          * zero pgid means send to my process group.
 1726                          */
 1727                         pgrp = td->td_proc->p_pgrp;
 1728                         PGRP_LOCK(pgrp);
 1729                 } else {
 1730                         pgrp = pgfind(pgid);
 1731                         if (pgrp == NULL) {
 1732                                 sx_sunlock(&proctree_lock);
 1733                                 return (ESRCH);
 1734                         }
 1735                 }
 1736                 sx_sunlock(&proctree_lock);
 1737                 LIST_FOREACH(p, &pgrp->pg_members, p_pglist) {
 1738                         PROC_LOCK(p);
 1739                         if (p->p_pid <= 1 || p->p_flag & P_SYSTEM ||
 1740                             p->p_state == PRS_NEW) {
 1741                                 PROC_UNLOCK(p);
 1742                                 continue;
 1743                         }
 1744                         err = p_cansignal(td, p, sig);
 1745                         if (err == 0) {
 1746                                 if (sig)
 1747                                         pksignal(p, sig, ksi);
 1748                                 ret = err;
 1749                         }
 1750                         else if (ret == ESRCH)
 1751                                 ret = err;
 1752                         PROC_UNLOCK(p);
 1753                 }
 1754                 PGRP_UNLOCK(pgrp);
 1755         }
 1756         return (ret);
 1757 }
 1758 
 1759 #ifndef _SYS_SYSPROTO_H_
 1760 struct kill_args {
 1761         int     pid;
 1762         int     signum;
 1763 };
 1764 #endif
 1765 /* ARGSUSED */
 1766 int
 1767 sys_kill(struct thread *td, struct kill_args *uap)
 1768 {
 1769         ksiginfo_t ksi;
 1770         struct proc *p;
 1771         int error;
 1772 
 1773         /*
 1774          * A process in capability mode can send signals only to himself.
 1775          * The main rationale behind this is that abort(3) is implemented as
 1776          * kill(getpid(), SIGABRT).
 1777          */
 1778         if (IN_CAPABILITY_MODE(td) && uap->pid != td->td_proc->p_pid)
 1779                 return (ECAPMODE);
 1780 
 1781         AUDIT_ARG_SIGNUM(uap->signum);
 1782         AUDIT_ARG_PID(uap->pid);
 1783         if ((u_int)uap->signum > _SIG_MAXSIG)
 1784                 return (EINVAL);
 1785 
 1786         ksiginfo_init(&ksi);
 1787         ksi.ksi_signo = uap->signum;
 1788         ksi.ksi_code = SI_USER;
 1789         ksi.ksi_pid = td->td_proc->p_pid;
 1790         ksi.ksi_uid = td->td_ucred->cr_ruid;
 1791 
 1792         if (uap->pid > 0) {
 1793                 /* kill single process */
 1794                 if ((p = pfind(uap->pid)) == NULL) {
 1795                         if ((p = zpfind(uap->pid)) == NULL)
 1796                                 return (ESRCH);
 1797                 }
 1798                 AUDIT_ARG_PROCESS(p);
 1799                 error = p_cansignal(td, p, uap->signum);
 1800                 if (error == 0 && uap->signum)
 1801                         pksignal(p, uap->signum, &ksi);
 1802                 PROC_UNLOCK(p);
 1803                 return (error);
 1804         }
 1805         switch (uap->pid) {
 1806         case -1:                /* broadcast signal */
 1807                 return (killpg1(td, uap->signum, 0, 1, &ksi));
 1808         case 0:                 /* signal own process group */
 1809                 return (killpg1(td, uap->signum, 0, 0, &ksi));
 1810         default:                /* negative explicit process group */
 1811                 return (killpg1(td, uap->signum, -uap->pid, 0, &ksi));
 1812         }
 1813         /* NOTREACHED */
 1814 }
 1815 
 1816 int
 1817 sys_pdkill(struct thread *td, struct pdkill_args *uap)
 1818 {
 1819         struct proc *p;
 1820         cap_rights_t rights;
 1821         int error;
 1822 
 1823         AUDIT_ARG_SIGNUM(uap->signum);
 1824         AUDIT_ARG_FD(uap->fd);
 1825         if ((u_int)uap->signum > _SIG_MAXSIG)
 1826                 return (EINVAL);
 1827 
 1828         error = procdesc_find(td, uap->fd,
 1829             cap_rights_init(&rights, CAP_PDKILL), &p);
 1830         if (error)
 1831                 return (error);
 1832         AUDIT_ARG_PROCESS(p);
 1833         error = p_cansignal(td, p, uap->signum);
 1834         if (error == 0 && uap->signum)
 1835                 kern_psignal(p, uap->signum);
 1836         PROC_UNLOCK(p);
 1837         return (error);
 1838 }
 1839 
 1840 #if defined(COMPAT_43)
 1841 #ifndef _SYS_SYSPROTO_H_
 1842 struct okillpg_args {
 1843         int     pgid;
 1844         int     signum;
 1845 };
 1846 #endif
 1847 /* ARGSUSED */
 1848 int
 1849 okillpg(struct thread *td, struct okillpg_args *uap)
 1850 {
 1851         ksiginfo_t ksi;
 1852 
 1853         AUDIT_ARG_SIGNUM(uap->signum);
 1854         AUDIT_ARG_PID(uap->pgid);
 1855         if ((u_int)uap->signum > _SIG_MAXSIG)
 1856                 return (EINVAL);
 1857 
 1858         ksiginfo_init(&ksi);
 1859         ksi.ksi_signo = uap->signum;
 1860         ksi.ksi_code = SI_USER;
 1861         ksi.ksi_pid = td->td_proc->p_pid;
 1862         ksi.ksi_uid = td->td_ucred->cr_ruid;
 1863         return (killpg1(td, uap->signum, uap->pgid, 0, &ksi));
 1864 }
 1865 #endif /* COMPAT_43 */
 1866 
 1867 #ifndef _SYS_SYSPROTO_H_
 1868 struct sigqueue_args {
 1869         pid_t pid;
 1870         int signum;
 1871         /* union sigval */ void *value;
 1872 };
 1873 #endif
 1874 int
 1875 sys_sigqueue(struct thread *td, struct sigqueue_args *uap)
 1876 {
 1877         union sigval sv;
 1878 
 1879         sv.sival_ptr = uap->value;
 1880 
 1881         return (kern_sigqueue(td, uap->pid, uap->signum, &sv));
 1882 }
 1883 
 1884 int
 1885 kern_sigqueue(struct thread *td, pid_t pid, int signum, union sigval *value)
 1886 {
 1887         ksiginfo_t ksi;
 1888         struct proc *p;
 1889         int error;
 1890 
 1891         if ((u_int)signum > _SIG_MAXSIG)
 1892                 return (EINVAL);
 1893 
 1894         /*
 1895          * Specification says sigqueue can only send signal to
 1896          * single process.
 1897          */
 1898         if (pid <= 0)
 1899                 return (EINVAL);
 1900 
 1901         if ((p = pfind(pid)) == NULL) {
 1902                 if ((p = zpfind(pid)) == NULL)
 1903                         return (ESRCH);
 1904         }
 1905         error = p_cansignal(td, p, signum);
 1906         if (error == 0 && signum != 0) {
 1907                 ksiginfo_init(&ksi);
 1908                 ksi.ksi_flags = KSI_SIGQ;
 1909                 ksi.ksi_signo = signum;
 1910                 ksi.ksi_code = SI_QUEUE;
 1911                 ksi.ksi_pid = td->td_proc->p_pid;
 1912                 ksi.ksi_uid = td->td_ucred->cr_ruid;
 1913                 ksi.ksi_value = *value;
 1914                 error = pksignal(p, ksi.ksi_signo, &ksi);
 1915         }
 1916         PROC_UNLOCK(p);
 1917         return (error);
 1918 }
 1919 
 1920 /*
 1921  * Send a signal to a process group.
 1922  */
 1923 void
 1924 gsignal(int pgid, int sig, ksiginfo_t *ksi)
 1925 {
 1926         struct pgrp *pgrp;
 1927 
 1928         if (pgid != 0) {
 1929                 sx_slock(&proctree_lock);
 1930                 pgrp = pgfind(pgid);
 1931                 sx_sunlock(&proctree_lock);
 1932                 if (pgrp != NULL) {
 1933                         pgsignal(pgrp, sig, 0, ksi);
 1934                         PGRP_UNLOCK(pgrp);
 1935                 }
 1936         }
 1937 }
 1938 
 1939 /*
 1940  * Send a signal to a process group.  If checktty is 1,
 1941  * limit to members which have a controlling terminal.
 1942  */
 1943 void
 1944 pgsignal(struct pgrp *pgrp, int sig, int checkctty, ksiginfo_t *ksi)
 1945 {
 1946         struct proc *p;
 1947 
 1948         if (pgrp) {
 1949                 PGRP_LOCK_ASSERT(pgrp, MA_OWNED);
 1950                 LIST_FOREACH(p, &pgrp->pg_members, p_pglist) {
 1951                         PROC_LOCK(p);
 1952                         if (p->p_state == PRS_NORMAL &&
 1953                             (checkctty == 0 || p->p_flag & P_CONTROLT))
 1954                                 pksignal(p, sig, ksi);
 1955                         PROC_UNLOCK(p);
 1956                 }
 1957         }
 1958 }
 1959 
 1960 
 1961 /*
 1962  * Recalculate the signal mask and reset the signal disposition after
 1963  * usermode frame for delivery is formed.  Should be called after
 1964  * mach-specific routine, because sysent->sv_sendsig() needs correct
 1965  * ps_siginfo and signal mask.
 1966  */
 1967 static void
 1968 postsig_done(int sig, struct thread *td, struct sigacts *ps)
 1969 {
 1970         sigset_t mask;
 1971 
 1972         mtx_assert(&ps->ps_mtx, MA_OWNED);
 1973         td->td_ru.ru_nsignals++;
 1974         mask = ps->ps_catchmask[_SIG_IDX(sig)];
 1975         if (!SIGISMEMBER(ps->ps_signodefer, sig))
 1976                 SIGADDSET(mask, sig);
 1977         kern_sigprocmask(td, SIG_BLOCK, &mask, NULL,
 1978             SIGPROCMASK_PROC_LOCKED | SIGPROCMASK_PS_LOCKED);
 1979         if (SIGISMEMBER(ps->ps_sigreset, sig))
 1980                 sigdflt(ps, sig);
 1981 }
 1982 
 1983 
 1984 /*
 1985  * Send a signal caused by a trap to the current thread.  If it will be
 1986  * caught immediately, deliver it with correct code.  Otherwise, post it
 1987  * normally.
 1988  */
 1989 void
 1990 trapsignal(struct thread *td, ksiginfo_t *ksi)
 1991 {
 1992         struct sigacts *ps;
 1993         struct proc *p;
 1994         int sig;
 1995         int code;
 1996 
 1997         p = td->td_proc;
 1998         sig = ksi->ksi_signo;
 1999         code = ksi->ksi_code;
 2000         KASSERT(_SIG_VALID(sig), ("invalid signal"));
 2001 
 2002         PROC_LOCK(p);
 2003         ps = p->p_sigacts;
 2004         mtx_lock(&ps->ps_mtx);
 2005         if ((p->p_flag & P_TRACED) == 0 && SIGISMEMBER(ps->ps_sigcatch, sig) &&
 2006             !SIGISMEMBER(td->td_sigmask, sig)) {
 2007 #ifdef KTRACE
 2008                 if (KTRPOINT(curthread, KTR_PSIG))
 2009                         ktrpsig(sig, ps->ps_sigact[_SIG_IDX(sig)],
 2010                             &td->td_sigmask, code);
 2011 #endif
 2012                 (*p->p_sysent->sv_sendsig)(ps->ps_sigact[_SIG_IDX(sig)],
 2013                                 ksi, &td->td_sigmask);
 2014                 postsig_done(sig, td, ps);
 2015                 mtx_unlock(&ps->ps_mtx);
 2016         } else {
 2017                 /*
 2018                  * Avoid a possible infinite loop if the thread
 2019                  * masking the signal or process is ignoring the
 2020                  * signal.
 2021                  */
 2022                 if (kern_forcesigexit &&
 2023                     (SIGISMEMBER(td->td_sigmask, sig) ||
 2024                      ps->ps_sigact[_SIG_IDX(sig)] == SIG_IGN)) {
 2025                         SIGDELSET(td->td_sigmask, sig);
 2026                         SIGDELSET(ps->ps_sigcatch, sig);
 2027                         SIGDELSET(ps->ps_sigignore, sig);
 2028                         ps->ps_sigact[_SIG_IDX(sig)] = SIG_DFL;
 2029                 }
 2030                 mtx_unlock(&ps->ps_mtx);
 2031                 p->p_code = code;       /* XXX for core dump/debugger */
 2032                 p->p_sig = sig;         /* XXX to verify code */
 2033                 tdsendsignal(p, td, sig, ksi);
 2034         }
 2035         PROC_UNLOCK(p);
 2036 }
 2037 
 2038 static struct thread *
 2039 sigtd(struct proc *p, int sig, int prop)
 2040 {
 2041         struct thread *td, *signal_td;
 2042 
 2043         PROC_LOCK_ASSERT(p, MA_OWNED);
 2044 
 2045         /*
 2046          * Check if current thread can handle the signal without
 2047          * switching context to another thread.
 2048          */
 2049         if (curproc == p && !SIGISMEMBER(curthread->td_sigmask, sig))
 2050                 return (curthread);
 2051         signal_td = NULL;
 2052         FOREACH_THREAD_IN_PROC(p, td) {
 2053                 if (!SIGISMEMBER(td->td_sigmask, sig)) {
 2054                         signal_td = td;
 2055                         break;
 2056                 }
 2057         }
 2058         if (signal_td == NULL)
 2059                 signal_td = FIRST_THREAD_IN_PROC(p);
 2060         return (signal_td);
 2061 }
 2062 
 2063 /*
 2064  * Send the signal to the process.  If the signal has an action, the action
 2065  * is usually performed by the target process rather than the caller; we add
 2066  * the signal to the set of pending signals for the process.
 2067  *
 2068  * Exceptions:
 2069  *   o When a stop signal is sent to a sleeping process that takes the
 2070  *     default action, the process is stopped without awakening it.
 2071  *   o SIGCONT restarts stopped processes (or puts them back to sleep)
 2072  *     regardless of the signal action (eg, blocked or ignored).
 2073  *
 2074  * Other ignored signals are discarded immediately.
 2075  *
 2076  * NB: This function may be entered from the debugger via the "kill" DDB
 2077  * command.  There is little that can be done to mitigate the possibly messy
 2078  * side effects of this unwise possibility.
 2079  */
 2080 void
 2081 kern_psignal(struct proc *p, int sig)
 2082 {
 2083         ksiginfo_t ksi;
 2084 
 2085         ksiginfo_init(&ksi);
 2086         ksi.ksi_signo = sig;
 2087         ksi.ksi_code = SI_KERNEL;
 2088         (void) tdsendsignal(p, NULL, sig, &ksi);
 2089 }
 2090 
 2091 int
 2092 pksignal(struct proc *p, int sig, ksiginfo_t *ksi)
 2093 {
 2094 
 2095         return (tdsendsignal(p, NULL, sig, ksi));
 2096 }
 2097 
 2098 /* Utility function for finding a thread to send signal event to. */
 2099 int
 2100 sigev_findtd(struct proc *p ,struct sigevent *sigev, struct thread **ttd)
 2101 {
 2102         struct thread *td;
 2103 
 2104         if (sigev->sigev_notify == SIGEV_THREAD_ID) {
 2105                 td = tdfind(sigev->sigev_notify_thread_id, p->p_pid);
 2106                 if (td == NULL)
 2107                         return (ESRCH);
 2108                 *ttd = td;
 2109         } else {
 2110                 *ttd = NULL;
 2111                 PROC_LOCK(p);
 2112         }
 2113         return (0);
 2114 }
 2115 
 2116 void
 2117 tdsignal(struct thread *td, int sig)
 2118 {
 2119         ksiginfo_t ksi;
 2120 
 2121         ksiginfo_init(&ksi);
 2122         ksi.ksi_signo = sig;
 2123         ksi.ksi_code = SI_KERNEL;
 2124         (void) tdsendsignal(td->td_proc, td, sig, &ksi);
 2125 }
 2126 
 2127 void
 2128 tdksignal(struct thread *td, int sig, ksiginfo_t *ksi)
 2129 {
 2130 
 2131         (void) tdsendsignal(td->td_proc, td, sig, ksi);
 2132 }
 2133 
 2134 int
 2135 tdsendsignal(struct proc *p, struct thread *td, int sig, ksiginfo_t *ksi)
 2136 {
 2137         sig_t action;
 2138         sigqueue_t *sigqueue;
 2139         int prop;
 2140         struct sigacts *ps;
 2141         int intrval;
 2142         int ret = 0;
 2143         int wakeup_swapper;
 2144 
 2145         MPASS(td == NULL || p == td->td_proc);
 2146         PROC_LOCK_ASSERT(p, MA_OWNED);
 2147 
 2148         if (!_SIG_VALID(sig))
 2149                 panic("%s(): invalid signal %d", __func__, sig);
 2150 
 2151         KASSERT(ksi == NULL || !KSI_ONQ(ksi), ("%s: ksi on queue", __func__));
 2152 
 2153         /*
 2154          * IEEE Std 1003.1-2001: return success when killing a zombie.
 2155          */
 2156         if (p->p_state == PRS_ZOMBIE) {
 2157                 if (ksi && (ksi->ksi_flags & KSI_INS))
 2158                         ksiginfo_tryfree(ksi);
 2159                 return (ret);
 2160         }
 2161 
 2162         ps = p->p_sigacts;
 2163         KNOTE_LOCKED(p->p_klist, NOTE_SIGNAL | sig);
 2164         prop = sigprop(sig);
 2165 
 2166         if (td == NULL) {
 2167                 td = sigtd(p, sig, prop);
 2168                 sigqueue = &p->p_sigqueue;
 2169         } else
 2170                 sigqueue = &td->td_sigqueue;
 2171 
 2172         SDT_PROBE3(proc, , , signal__send, td, p, sig);
 2173 
 2174         /*
 2175          * If the signal is being ignored,
 2176          * then we forget about it immediately.
 2177          * (Note: we don't set SIGCONT in ps_sigignore,
 2178          * and if it is set to SIG_IGN,
 2179          * action will be SIG_DFL here.)
 2180          */
 2181         mtx_lock(&ps->ps_mtx);
 2182         if (SIGISMEMBER(ps->ps_sigignore, sig)) {
 2183                 SDT_PROBE3(proc, , , signal__discard, td, p, sig);
 2184 
 2185                 mtx_unlock(&ps->ps_mtx);
 2186                 if (ksi && (ksi->ksi_flags & KSI_INS))
 2187                         ksiginfo_tryfree(ksi);
 2188                 return (ret);
 2189         }
 2190         if (SIGISMEMBER(td->td_sigmask, sig))
 2191                 action = SIG_HOLD;
 2192         else if (SIGISMEMBER(ps->ps_sigcatch, sig))
 2193                 action = SIG_CATCH;
 2194         else
 2195                 action = SIG_DFL;
 2196         if (SIGISMEMBER(ps->ps_sigintr, sig))
 2197                 intrval = EINTR;
 2198         else
 2199                 intrval = ERESTART;
 2200         mtx_unlock(&ps->ps_mtx);
 2201 
 2202         if (prop & SA_CONT)
 2203                 sigqueue_delete_stopmask_proc(p);
 2204         else if (prop & SA_STOP) {
 2205                 /*
 2206                  * If sending a tty stop signal to a member of an orphaned
 2207                  * process group, discard the signal here if the action
 2208                  * is default; don't stop the process below if sleeping,
 2209                  * and don't clear any pending SIGCONT.
 2210                  */
 2211                 if ((prop & SA_TTYSTOP) &&
 2212                     (p->p_pgrp->pg_jobc == 0) &&
 2213                     (action == SIG_DFL)) {
 2214                         if (ksi && (ksi->ksi_flags & KSI_INS))
 2215                                 ksiginfo_tryfree(ksi);
 2216                         return (ret);
 2217                 }
 2218                 sigqueue_delete_proc(p, SIGCONT);
 2219                 if (p->p_flag & P_CONTINUED) {
 2220                         p->p_flag &= ~P_CONTINUED;
 2221                         PROC_LOCK(p->p_pptr);
 2222                         sigqueue_take(p->p_ksi);
 2223                         PROC_UNLOCK(p->p_pptr);
 2224                 }
 2225         }
 2226 
 2227         ret = sigqueue_add(sigqueue, sig, ksi);
 2228         if (ret != 0)
 2229                 return (ret);
 2230         signotify(td);
 2231         /*
 2232          * Defer further processing for signals which are held,
 2233          * except that stopped processes must be continued by SIGCONT.
 2234          */
 2235         if (action == SIG_HOLD &&
 2236             !((prop & SA_CONT) && (p->p_flag & P_STOPPED_SIG)))
 2237                 return (ret);
 2238 
 2239         /* SIGKILL: Remove procfs STOPEVENTs. */
 2240         if (sig == SIGKILL) {
 2241                 /* from procfs_ioctl.c: PIOCBIC */
 2242                 p->p_stops = 0;
 2243                 /* from procfs_ioctl.c: PIOCCONT */
 2244                 p->p_step = 0;
 2245                 wakeup(&p->p_step);
 2246         }
 2247         /*
 2248          * Some signals have a process-wide effect and a per-thread
 2249          * component.  Most processing occurs when the process next
 2250          * tries to cross the user boundary, however there are some
 2251          * times when processing needs to be done immediately, such as
 2252          * waking up threads so that they can cross the user boundary.
 2253          * We try to do the per-process part here.
 2254          */
 2255         if (P_SHOULDSTOP(p)) {
 2256                 KASSERT(!(p->p_flag & P_WEXIT),
 2257                     ("signal to stopped but exiting process"));
 2258                 if (sig == SIGKILL) {
 2259                         /*
 2260                          * If traced process is already stopped,
 2261                          * then no further action is necessary.
 2262                          */
 2263                         if (p->p_flag & P_TRACED)
 2264                                 goto out;
 2265                         /*
 2266                          * SIGKILL sets process running.
 2267                          * It will die elsewhere.
 2268                          * All threads must be restarted.
 2269                          */
 2270                         p->p_flag &= ~P_STOPPED_SIG;
 2271                         goto runfast;
 2272                 }
 2273 
 2274                 if (prop & SA_CONT) {
 2275                         /*
 2276                          * If traced process is already stopped,
 2277                          * then no further action is necessary.
 2278                          */
 2279                         if (p->p_flag & P_TRACED)
 2280                                 goto out;
 2281                         /*
 2282                          * If SIGCONT is default (or ignored), we continue the
 2283                          * process but don't leave the signal in sigqueue as
 2284                          * it has no further action.  If SIGCONT is held, we
 2285                          * continue the process and leave the signal in
 2286                          * sigqueue.  If the process catches SIGCONT, let it
 2287                          * handle the signal itself.  If it isn't waiting on
 2288                          * an event, it goes back to run state.
 2289                          * Otherwise, process goes back to sleep state.
 2290                          */
 2291                         p->p_flag &= ~P_STOPPED_SIG;
 2292                         PROC_SLOCK(p);
 2293                         if (p->p_numthreads == p->p_suspcount) {
 2294                                 PROC_SUNLOCK(p);
 2295                                 p->p_flag |= P_CONTINUED;
 2296                                 p->p_xsig = SIGCONT;
 2297                                 PROC_LOCK(p->p_pptr);
 2298                                 childproc_continued(p);
 2299                                 PROC_UNLOCK(p->p_pptr);
 2300                                 PROC_SLOCK(p);
 2301                         }
 2302                         if (action == SIG_DFL) {
 2303                                 thread_unsuspend(p);
 2304                                 PROC_SUNLOCK(p);
 2305                                 sigqueue_delete(sigqueue, sig);
 2306                                 goto out;
 2307                         }
 2308                         if (action == SIG_CATCH) {
 2309                                 /*
 2310                                  * The process wants to catch it so it needs
 2311                                  * to run at least one thread, but which one?
 2312                                  */
 2313                                 PROC_SUNLOCK(p);
 2314                                 goto runfast;
 2315                         }
 2316                         /*
 2317                          * The signal is not ignored or caught.
 2318                          */
 2319                         thread_unsuspend(p);
 2320                         PROC_SUNLOCK(p);
 2321                         goto out;
 2322                 }
 2323 
 2324                 if (prop & SA_STOP) {
 2325                         /*
 2326                          * If traced process is already stopped,
 2327                          * then no further action is necessary.
 2328                          */
 2329                         if (p->p_flag & P_TRACED)
 2330                                 goto out;
 2331                         /*
 2332                          * Already stopped, don't need to stop again
 2333                          * (If we did the shell could get confused).
 2334                          * Just make sure the signal STOP bit set.
 2335                          */
 2336                         p->p_flag |= P_STOPPED_SIG;
 2337                         sigqueue_delete(sigqueue, sig);
 2338                         goto out;
 2339                 }
 2340 
 2341                 /*
 2342                  * All other kinds of signals:
 2343                  * If a thread is sleeping interruptibly, simulate a
 2344                  * wakeup so that when it is continued it will be made
 2345                  * runnable and can look at the signal.  However, don't make
 2346                  * the PROCESS runnable, leave it stopped.
 2347                  * It may run a bit until it hits a thread_suspend_check().
 2348                  */
 2349                 wakeup_swapper = 0;
 2350                 PROC_SLOCK(p);
 2351                 thread_lock(td);
 2352                 if (TD_ON_SLEEPQ(td) && (td->td_flags & TDF_SINTR))
 2353                         wakeup_swapper = sleepq_abort(td, intrval);
 2354                 thread_unlock(td);
 2355                 PROC_SUNLOCK(p);
 2356                 if (wakeup_swapper)
 2357                         kick_proc0();
 2358                 goto out;
 2359                 /*
 2360                  * Mutexes are short lived. Threads waiting on them will
 2361                  * hit thread_suspend_check() soon.
 2362                  */
 2363         } else if (p->p_state == PRS_NORMAL) {
 2364                 if (p->p_flag & P_TRACED || action == SIG_CATCH) {
 2365                         tdsigwakeup(td, sig, action, intrval);
 2366                         goto out;
 2367                 }
 2368 
 2369                 MPASS(action == SIG_DFL);
 2370 
 2371                 if (prop & SA_STOP) {
 2372                         if (p->p_flag & (P_PPWAIT|P_WEXIT))
 2373                                 goto out;
 2374                         p->p_flag |= P_STOPPED_SIG;
 2375                         p->p_xsig = sig;
 2376                         PROC_SLOCK(p);
 2377                         wakeup_swapper = sig_suspend_threads(td, p, 1);
 2378                         if (p->p_numthreads == p->p_suspcount) {
 2379                                 /*
 2380                                  * only thread sending signal to another
 2381                                  * process can reach here, if thread is sending
 2382                                  * signal to its process, because thread does
 2383                                  * not suspend itself here, p_numthreads
 2384                                  * should never be equal to p_suspcount.
 2385                                  */
 2386                                 thread_stopped(p);
 2387                                 PROC_SUNLOCK(p);
 2388                                 sigqueue_delete_proc(p, p->p_xsig);
 2389                         } else
 2390                                 PROC_SUNLOCK(p);
 2391                         if (wakeup_swapper)
 2392                                 kick_proc0();
 2393                         goto out;
 2394                 }
 2395         } else {
 2396                 /* Not in "NORMAL" state. discard the signal. */
 2397                 sigqueue_delete(sigqueue, sig);
 2398                 goto out;
 2399         }
 2400 
 2401         /*
 2402          * The process is not stopped so we need to apply the signal to all the
 2403          * running threads.
 2404          */
 2405 runfast:
 2406         tdsigwakeup(td, sig, action, intrval);
 2407         PROC_SLOCK(p);
 2408         thread_unsuspend(p);
 2409         PROC_SUNLOCK(p);
 2410 out:
 2411         /* If we jump here, proc slock should not be owned. */
 2412         PROC_SLOCK_ASSERT(p, MA_NOTOWNED);
 2413         return (ret);
 2414 }
 2415 
 2416 /*
 2417  * The force of a signal has been directed against a single
 2418  * thread.  We need to see what we can do about knocking it
 2419  * out of any sleep it may be in etc.
 2420  */
 2421 static void
 2422 tdsigwakeup(struct thread *td, int sig, sig_t action, int intrval)
 2423 {
 2424         struct proc *p = td->td_proc;
 2425         int prop;
 2426         int wakeup_swapper;
 2427 
 2428         wakeup_swapper = 0;
 2429         PROC_LOCK_ASSERT(p, MA_OWNED);
 2430         prop = sigprop(sig);
 2431 
 2432         PROC_SLOCK(p);
 2433         thread_lock(td);
 2434         /*
 2435          * Bring the priority of a thread up if we want it to get
 2436          * killed in this lifetime.  Be careful to avoid bumping the
 2437          * priority of the idle thread, since we still allow to signal
 2438          * kernel processes.
 2439          */
 2440         if (action == SIG_DFL && (prop & SA_KILL) != 0 &&
 2441             td->td_priority > PUSER && !TD_IS_IDLETHREAD(td))
 2442                 sched_prio(td, PUSER);
 2443         if (TD_ON_SLEEPQ(td)) {
 2444                 /*
 2445                  * If thread is sleeping uninterruptibly
 2446                  * we can't interrupt the sleep... the signal will
 2447                  * be noticed when the process returns through
 2448                  * trap() or syscall().
 2449                  */
 2450                 if ((td->td_flags & TDF_SINTR) == 0)
 2451                         goto out;
 2452                 /*
 2453                  * If SIGCONT is default (or ignored) and process is
 2454                  * asleep, we are finished; the process should not
 2455                  * be awakened.
 2456                  */
 2457                 if ((prop & SA_CONT) && action == SIG_DFL) {
 2458                         thread_unlock(td);
 2459                         PROC_SUNLOCK(p);
 2460                         sigqueue_delete(&p->p_sigqueue, sig);
 2461                         /*
 2462                          * It may be on either list in this state.
 2463                          * Remove from both for now.
 2464                          */
 2465                         sigqueue_delete(&td->td_sigqueue, sig);
 2466                         return;
 2467                 }
 2468 
 2469                 /*
 2470                  * Don't awaken a sleeping thread for SIGSTOP if the
 2471                  * STOP signal is deferred.
 2472                  */
 2473                 if ((prop & SA_STOP) != 0 && (td->td_flags & (TDF_SBDRY |
 2474                     TDF_SERESTART | TDF_SEINTR)) == TDF_SBDRY)
 2475                         goto out;
 2476 
 2477                 /*
 2478                  * Give low priority threads a better chance to run.
 2479                  */
 2480                 if (td->td_priority > PUSER && !TD_IS_IDLETHREAD(td))
 2481                         sched_prio(td, PUSER);
 2482 
 2483                 wakeup_swapper = sleepq_abort(td, intrval);
 2484         } else {
 2485                 /*
 2486                  * Other states do nothing with the signal immediately,
 2487                  * other than kicking ourselves if we are running.
 2488                  * It will either never be noticed, or noticed very soon.
 2489                  */
 2490 #ifdef SMP
 2491                 if (TD_IS_RUNNING(td) && td != curthread)
 2492                         forward_signal(td);
 2493 #endif
 2494         }
 2495 out:
 2496         PROC_SUNLOCK(p);
 2497         thread_unlock(td);
 2498         if (wakeup_swapper)
 2499                 kick_proc0();
 2500 }
 2501 
 2502 static int
 2503 sig_suspend_threads(struct thread *td, struct proc *p, int sending)
 2504 {
 2505         struct thread *td2;
 2506         int wakeup_swapper;
 2507 
 2508         PROC_LOCK_ASSERT(p, MA_OWNED);
 2509         PROC_SLOCK_ASSERT(p, MA_OWNED);
 2510         MPASS(sending || td == curthread);
 2511 
 2512         wakeup_swapper = 0;
 2513         FOREACH_THREAD_IN_PROC(p, td2) {
 2514                 thread_lock(td2);
 2515                 td2->td_flags |= TDF_ASTPENDING | TDF_NEEDSUSPCHK;
 2516                 if ((TD_IS_SLEEPING(td2) || TD_IS_SWAPPED(td2)) &&
 2517                     (td2->td_flags & TDF_SINTR)) {
 2518                         if (td2->td_flags & TDF_SBDRY) {
 2519                                 /*
 2520                                  * Once a thread is asleep with
 2521                                  * TDF_SBDRY and without TDF_SERESTART
 2522                                  * or TDF_SEINTR set, it should never
 2523                                  * become suspended due to this check.
 2524                                  */
 2525                                 KASSERT(!TD_IS_SUSPENDED(td2),
 2526                                     ("thread with deferred stops suspended"));
 2527                                 if (TD_SBDRY_INTR(td2))
 2528                                         wakeup_swapper |= sleepq_abort(td2,
 2529                                             TD_SBDRY_ERRNO(td2));
 2530                         } else if (!TD_IS_SUSPENDED(td2)) {
 2531                                 thread_suspend_one(td2);
 2532                         }
 2533                 } else if (!TD_IS_SUSPENDED(td2)) {
 2534                         if (sending || td != td2)
 2535                                 td2->td_flags |= TDF_ASTPENDING;
 2536 #ifdef SMP
 2537                         if (TD_IS_RUNNING(td2) && td2 != td)
 2538                                 forward_signal(td2);
 2539 #endif
 2540                 }
 2541                 thread_unlock(td2);
 2542         }
 2543         return (wakeup_swapper);
 2544 }
 2545 
 2546 /*
 2547  * Stop the process for an event deemed interesting to the debugger. If si is
 2548  * non-NULL, this is a signal exchange; the new signal requested by the
 2549  * debugger will be returned for handling. If si is NULL, this is some other
 2550  * type of interesting event. The debugger may request a signal be delivered in
 2551  * that case as well, however it will be deferred until it can be handled.
 2552  */
 2553 int
 2554 ptracestop(struct thread *td, int sig, ksiginfo_t *si)
 2555 {
 2556         struct proc *p = td->td_proc;
 2557         struct thread *td2;
 2558         ksiginfo_t ksi;
 2559         int prop;
 2560 
 2561         PROC_LOCK_ASSERT(p, MA_OWNED);
 2562         KASSERT(!(p->p_flag & P_WEXIT), ("Stopping exiting process"));
 2563         WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK,
 2564             &p->p_mtx.lock_object, "Stopping for traced signal");
 2565 
 2566         td->td_xsig = sig;
 2567 
 2568         if (si == NULL || (si->ksi_flags & KSI_PTRACE) == 0) {
 2569                 td->td_dbgflags |= TDB_XSIG;
 2570                 CTR4(KTR_PTRACE, "ptracestop: tid %d (pid %d) flags %#x sig %d",
 2571                     td->td_tid, p->p_pid, td->td_dbgflags, sig);
 2572                 PROC_SLOCK(p);
 2573                 while ((p->p_flag & P_TRACED) && (td->td_dbgflags & TDB_XSIG)) {
 2574                         if (P_KILLED(p)) {
 2575                                 /*
 2576                                  * Ensure that, if we've been PT_KILLed, the
 2577                                  * exit status reflects that. Another thread
 2578                                  * may also be in ptracestop(), having just
 2579                                  * received the SIGKILL, but this thread was
 2580                                  * unsuspended first.
 2581                                  */
 2582                                 td->td_dbgflags &= ~TDB_XSIG;
 2583                                 td->td_xsig = SIGKILL;
 2584                                 p->p_ptevents = 0;
 2585                                 break;
 2586                         }
 2587                         if (p->p_flag & P_SINGLE_EXIT &&
 2588                             !(td->td_dbgflags & TDB_EXIT)) {
 2589                                 /*
 2590                                  * Ignore ptrace stops except for thread exit
 2591                                  * events when the process exits.
 2592                                  */
 2593                                 td->td_dbgflags &= ~TDB_XSIG;
 2594                                 PROC_SUNLOCK(p);
 2595                                 return (0);
 2596                         }
 2597 
 2598                         /*
 2599                          * Make wait(2) work.  Ensure that right after the
 2600                          * attach, the thread which was decided to become the
 2601                          * leader of attach gets reported to the waiter.
 2602                          * Otherwise, just avoid overwriting another thread's
 2603                          * assignment to p_xthread.  If another thread has
 2604                          * already set p_xthread, the current thread will get
 2605                          * a chance to report itself upon the next iteration.
 2606                          */
 2607                         if ((td->td_dbgflags & TDB_FSTP) != 0 ||
 2608                             ((p->p_flag2 & P2_PTRACE_FSTP) == 0 &&
 2609                             p->p_xthread == NULL)) {
 2610                                 p->p_xsig = sig;
 2611                                 p->p_xthread = td;
 2612 
 2613                                 /*
 2614                                  * If we are on sleepqueue already,
 2615                                  * let sleepqueue code decide if it
 2616                                  * needs to go sleep after attach.
 2617                                  */
 2618                                 if (td->td_wchan == NULL)
 2619                                         td->td_dbgflags &= ~TDB_FSTP;
 2620 
 2621                                 p->p_flag2 &= ~P2_PTRACE_FSTP;
 2622                                 p->p_flag |= P_STOPPED_SIG | P_STOPPED_TRACE;
 2623                                 sig_suspend_threads(td, p, 0);
 2624                         }
 2625                         if ((td->td_dbgflags & TDB_STOPATFORK) != 0) {
 2626                                 td->td_dbgflags &= ~TDB_STOPATFORK;
 2627                         }
 2628 stopme:
 2629                         thread_suspend_switch(td, p);
 2630                         if (p->p_xthread == td)
 2631                                 p->p_xthread = NULL;
 2632                         if (!(p->p_flag & P_TRACED))
 2633                                 break;
 2634                         if (td->td_dbgflags & TDB_SUSPEND) {
 2635                                 if (p->p_flag & P_SINGLE_EXIT)
 2636                                         break;
 2637                                 goto stopme;
 2638                         }
 2639                 }
 2640                 PROC_SUNLOCK(p);
 2641         }
 2642 
 2643         if (si != NULL && sig == td->td_xsig) {
 2644                 /* Parent wants us to take the original signal unchanged. */
 2645                 si->ksi_flags |= KSI_HEAD;
 2646                 if (sigqueue_add(&td->td_sigqueue, sig, si) != 0)
 2647                         si->ksi_signo = 0;
 2648         } else if (td->td_xsig != 0) {
 2649                 /*
 2650                  * If parent wants us to take a new signal, then it will leave
 2651                  * it in td->td_xsig; otherwise we just look for signals again.
 2652                  */
 2653                 ksiginfo_init(&ksi);
 2654                 ksi.ksi_signo = td->td_xsig;
 2655                 ksi.ksi_flags |= KSI_PTRACE;
 2656                 prop = sigprop(td->td_xsig);
 2657                 td2 = sigtd(p, td->td_xsig, prop);
 2658                 tdsendsignal(p, td2, td->td_xsig, &ksi);
 2659                 if (td != td2)
 2660                         return (0);
 2661         }
 2662 
 2663         return (td->td_xsig);
 2664 }
 2665 
 2666 static void
 2667 reschedule_signals(struct proc *p, sigset_t block, int flags)
 2668 {
 2669         struct sigacts *ps;
 2670         struct thread *td;
 2671         int sig;
 2672 
 2673         PROC_LOCK_ASSERT(p, MA_OWNED);
 2674         ps = p->p_sigacts;
 2675         mtx_assert(&ps->ps_mtx, (flags & SIGPROCMASK_PS_LOCKED) != 0 ?
 2676             MA_OWNED : MA_NOTOWNED);
 2677         if (SIGISEMPTY(p->p_siglist))
 2678                 return;
 2679         SIGSETAND(block, p->p_siglist);
 2680         while ((sig = sig_ffs(&block)) != 0) {
 2681                 SIGDELSET(block, sig);
 2682                 td = sigtd(p, sig, 0);
 2683                 signotify(td);
 2684                 if (!(flags & SIGPROCMASK_PS_LOCKED))
 2685                         mtx_lock(&ps->ps_mtx);
 2686                 if (p->p_flag & P_TRACED ||
 2687                     (SIGISMEMBER(ps->ps_sigcatch, sig) &&
 2688                     !SIGISMEMBER(td->td_sigmask, sig)))
 2689                         tdsigwakeup(td, sig, SIG_CATCH,
 2690                             (SIGISMEMBER(ps->ps_sigintr, sig) ? EINTR :
 2691                              ERESTART));
 2692                 if (!(flags & SIGPROCMASK_PS_LOCKED))
 2693                         mtx_unlock(&ps->ps_mtx);
 2694         }
 2695 }
 2696 
 2697 void
 2698 tdsigcleanup(struct thread *td)
 2699 {
 2700         struct proc *p;
 2701         sigset_t unblocked;
 2702 
 2703         p = td->td_proc;
 2704         PROC_LOCK_ASSERT(p, MA_OWNED);
 2705 
 2706         sigqueue_flush(&td->td_sigqueue);
 2707         if (p->p_numthreads == 1)
 2708                 return;
 2709 
 2710         /*
 2711          * Since we cannot handle signals, notify signal post code
 2712          * about this by filling the sigmask.
 2713          *
 2714          * Also, if needed, wake up thread(s) that do not block the
 2715          * same signals as the exiting thread, since the thread might
 2716          * have been selected for delivery and woken up.
 2717          */
 2718         SIGFILLSET(unblocked);
 2719         SIGSETNAND(unblocked, td->td_sigmask);
 2720         SIGFILLSET(td->td_sigmask);
 2721         reschedule_signals(p, unblocked, 0);
 2722 
 2723 }
 2724 
 2725 static int
 2726 sigdeferstop_curr_flags(int cflags)
 2727 {
 2728 
 2729         MPASS((cflags & (TDF_SEINTR | TDF_SERESTART)) == 0 ||
 2730             (cflags & TDF_SBDRY) != 0);
 2731         return (cflags & (TDF_SBDRY | TDF_SEINTR | TDF_SERESTART));
 2732 }
 2733 
 2734 /*
 2735  * Defer the delivery of SIGSTOP for the current thread, according to
 2736  * the requested mode.  Returns previous flags, which must be restored
 2737  * by sigallowstop().
 2738  *
 2739  * TDF_SBDRY, TDF_SEINTR, and TDF_SERESTART flags are only set and
 2740  * cleared by the current thread, which allow the lock-less read-only
 2741  * accesses below.
 2742  */
 2743 int
 2744 sigdeferstop_impl(int mode)
 2745 {
 2746         struct thread *td;
 2747         int cflags, nflags;
 2748 
 2749         td = curthread;
 2750         cflags = sigdeferstop_curr_flags(td->td_flags);
 2751         switch (mode) {
 2752         case SIGDEFERSTOP_NOP:
 2753                 nflags = cflags;
 2754                 break;
 2755         case SIGDEFERSTOP_OFF:
 2756                 nflags = 0;
 2757                 break;
 2758         case SIGDEFERSTOP_SILENT:
 2759                 nflags = (cflags | TDF_SBDRY) & ~(TDF_SEINTR | TDF_SERESTART);
 2760                 break;
 2761         case SIGDEFERSTOP_EINTR:
 2762                 nflags = (cflags | TDF_SBDRY | TDF_SEINTR) & ~TDF_SERESTART;
 2763                 break;
 2764         case SIGDEFERSTOP_ERESTART:
 2765                 nflags = (cflags | TDF_SBDRY | TDF_SERESTART) & ~TDF_SEINTR;
 2766                 break;
 2767         default:
 2768                 panic("sigdeferstop: invalid mode %x", mode);
 2769                 break;
 2770         }
 2771         if (cflags == nflags)
 2772                 return (SIGDEFERSTOP_VAL_NCHG);
 2773         thread_lock(td);
 2774         td->td_flags = (td->td_flags & ~cflags) | nflags;
 2775         thread_unlock(td);
 2776         return (cflags);
 2777 }
 2778 
 2779 /*
 2780  * Restores the STOP handling mode, typically permitting the delivery
 2781  * of SIGSTOP for the current thread.  This does not immediately
 2782  * suspend if a stop was posted.  Instead, the thread will suspend
 2783  * either via ast() or a subsequent interruptible sleep.
 2784  */
 2785 void
 2786 sigallowstop_impl(int prev)
 2787 {
 2788         struct thread *td;
 2789         int cflags;
 2790 
 2791         KASSERT(prev != SIGDEFERSTOP_VAL_NCHG, ("failed sigallowstop"));
 2792         KASSERT((prev & ~(TDF_SBDRY | TDF_SEINTR | TDF_SERESTART)) == 0,
 2793             ("sigallowstop: incorrect previous mode %x", prev));
 2794         td = curthread;
 2795         cflags = sigdeferstop_curr_flags(td->td_flags);
 2796         if (cflags != prev) {
 2797                 thread_lock(td);
 2798                 td->td_flags = (td->td_flags & ~cflags) | prev;
 2799                 thread_unlock(td);
 2800         }
 2801 }
 2802 
 2803 /*
 2804  * If the current process has received a signal (should be caught or cause
 2805  * termination, should interrupt current syscall), return the signal number.
 2806  * Stop signals with default action are processed immediately, then cleared;
 2807  * they aren't returned.  This is checked after each entry to the system for
 2808  * a syscall or trap (though this can usually be done without calling issignal
 2809  * by checking the pending signal masks in cursig.) The normal call
 2810  * sequence is
 2811  *
 2812  *      while (sig = cursig(curthread))
 2813  *              postsig(sig);
 2814  */
 2815 static int
 2816 issignal(struct thread *td)
 2817 {
 2818         struct proc *p;
 2819         struct sigacts *ps;
 2820         struct sigqueue *queue;
 2821         sigset_t sigpending;
 2822         int prop, sig, traced;
 2823         ksiginfo_t ksi;
 2824 
 2825         p = td->td_proc;
 2826         ps = p->p_sigacts;
 2827         mtx_assert(&ps->ps_mtx, MA_OWNED);
 2828         PROC_LOCK_ASSERT(p, MA_OWNED);
 2829         for (;;) {
 2830                 traced = (p->p_flag & P_TRACED) || (p->p_stops & S_SIG);
 2831 
 2832                 sigpending = td->td_sigqueue.sq_signals;
 2833                 SIGSETOR(sigpending, p->p_sigqueue.sq_signals);
 2834                 SIGSETNAND(sigpending, td->td_sigmask);
 2835 
 2836                 if ((p->p_flag & P_PPWAIT) != 0 || (td->td_flags &
 2837                     (TDF_SBDRY | TDF_SERESTART | TDF_SEINTR)) == TDF_SBDRY)
 2838                         SIG_STOPSIGMASK(sigpending);
 2839                 if (SIGISEMPTY(sigpending))     /* no signal to send */
 2840                         return (0);
 2841                 if ((p->p_flag & (P_TRACED | P_PPTRACE)) == P_TRACED &&
 2842                     (p->p_flag2 & P2_PTRACE_FSTP) != 0 &&
 2843                     SIGISMEMBER(sigpending, SIGSTOP)) {
 2844                         /*
 2845                          * If debugger just attached, always consume
 2846                          * SIGSTOP from ptrace(PT_ATTACH) first, to
 2847                          * execute the debugger attach ritual in
 2848                          * order.
 2849                          */
 2850                         sig = SIGSTOP;
 2851                         td->td_dbgflags |= TDB_FSTP;
 2852                 } else {
 2853                         sig = sig_ffs(&sigpending);
 2854                 }
 2855 
 2856                 if (p->p_stops & S_SIG) {
 2857                         mtx_unlock(&ps->ps_mtx);
 2858                         stopevent(p, S_SIG, sig);
 2859                         mtx_lock(&ps->ps_mtx);
 2860                 }
 2861 
 2862                 /*
 2863                  * We should see pending but ignored signals
 2864                  * only if P_TRACED was on when they were posted.
 2865                  */
 2866                 if (SIGISMEMBER(ps->ps_sigignore, sig) && (traced == 0)) {
 2867                         sigqueue_delete(&td->td_sigqueue, sig);
 2868                         sigqueue_delete(&p->p_sigqueue, sig);
 2869                         continue;
 2870                 }
 2871                 if ((p->p_flag & (P_TRACED | P_PPTRACE)) == P_TRACED) {
 2872                         /*
 2873                          * If traced, always stop.
 2874                          * Remove old signal from queue before the stop.
 2875                          * XXX shrug off debugger, it causes siginfo to
 2876                          * be thrown away.
 2877                          */
 2878                         queue = &td->td_sigqueue;
 2879                         ksiginfo_init(&ksi);
 2880                         if (sigqueue_get(queue, sig, &ksi) == 0) {
 2881                                 queue = &p->p_sigqueue;
 2882                                 sigqueue_get(queue, sig, &ksi);
 2883                         }
 2884                         td->td_si = ksi.ksi_info;
 2885 
 2886                         mtx_unlock(&ps->ps_mtx);
 2887                         sig = ptracestop(td, sig, &ksi);
 2888                         mtx_lock(&ps->ps_mtx);
 2889 
 2890                         td->td_si.si_signo = 0;
 2891 
 2892                         /* 
 2893                          * Keep looking if the debugger discarded or
 2894                          * replaced the signal.
 2895                          */
 2896                         if (sig == 0)
 2897                                 continue;
 2898 
 2899                         /*
 2900                          * If the signal became masked, re-queue it.
 2901                          */
 2902                         if (SIGISMEMBER(td->td_sigmask, sig)) {
 2903                                 ksi.ksi_flags |= KSI_HEAD;
 2904                                 sigqueue_add(&p->p_sigqueue, sig, &ksi);
 2905                                 continue;
 2906                         }
 2907 
 2908                         /*
 2909                          * If the traced bit got turned off, requeue
 2910                          * the signal and go back up to the top to
 2911                          * rescan signals.  This ensures that p_sig*
 2912                          * and p_sigact are consistent.
 2913                          */
 2914                         if ((p->p_flag & P_TRACED) == 0) {
 2915                                 ksi.ksi_flags |= KSI_HEAD;
 2916                                 sigqueue_add(queue, sig, &ksi);
 2917                                 continue;
 2918                         }
 2919                 }
 2920 
 2921                 prop = sigprop(sig);
 2922 
 2923                 /*
 2924                  * Decide whether the signal should be returned.
 2925                  * Return the signal's number, or fall through
 2926                  * to clear it from the pending mask.
 2927                  */
 2928                 switch ((intptr_t)p->p_sigacts->ps_sigact[_SIG_IDX(sig)]) {
 2929 
 2930                 case (intptr_t)SIG_DFL:
 2931                         /*
 2932                          * Don't take default actions on system processes.
 2933                          */
 2934                         if (p->p_pid <= 1) {
 2935 #ifdef DIAGNOSTIC
 2936                                 /*
 2937                                  * Are you sure you want to ignore SIGSEGV
 2938                                  * in init? XXX
 2939                                  */
 2940                                 printf("Process (pid %lu) got signal %d\n",
 2941                                         (u_long)p->p_pid, sig);
 2942 #endif
 2943                                 break;          /* == ignore */
 2944                         }
 2945                         /*
 2946                          * If there is a pending stop signal to process with
 2947                          * default action, stop here, then clear the signal.
 2948                          * Traced or exiting processes should ignore stops.
 2949                          * Additionally, a member of an orphaned process group
 2950                          * should ignore tty stops.
 2951                          */
 2952                         if (prop & SA_STOP) {
 2953                                 if (p->p_flag &
 2954                                     (P_TRACED | P_WEXIT | P_SINGLE_EXIT) ||
 2955                                     (p->p_pgrp->pg_jobc == 0 &&
 2956                                      prop & SA_TTYSTOP))
 2957                                         break;  /* == ignore */
 2958                                 if (TD_SBDRY_INTR(td)) {
 2959                                         KASSERT((td->td_flags & TDF_SBDRY) != 0,
 2960                                             ("lost TDF_SBDRY"));
 2961                                         return (-1);
 2962                                 }
 2963                                 mtx_unlock(&ps->ps_mtx);
 2964                                 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK,
 2965                                     &p->p_mtx.lock_object, "Catching SIGSTOP");
 2966                                 sigqueue_delete(&td->td_sigqueue, sig);
 2967                                 sigqueue_delete(&p->p_sigqueue, sig);
 2968                                 p->p_flag |= P_STOPPED_SIG;
 2969                                 p->p_xsig = sig;
 2970                                 PROC_SLOCK(p);
 2971                                 sig_suspend_threads(td, p, 0);
 2972                                 thread_suspend_switch(td, p);
 2973                                 PROC_SUNLOCK(p);
 2974                                 mtx_lock(&ps->ps_mtx);
 2975                                 goto next;
 2976                         } else if (prop & SA_IGNORE) {
 2977                                 /*
 2978                                  * Except for SIGCONT, shouldn't get here.
 2979                                  * Default action is to ignore; drop it.
 2980                                  */
 2981                                 break;          /* == ignore */
 2982                         } else
 2983                                 return (sig);
 2984                         /*NOTREACHED*/
 2985 
 2986                 case (intptr_t)SIG_IGN:
 2987                         /*
 2988                          * Masking above should prevent us ever trying
 2989                          * to take action on an ignored signal other
 2990                          * than SIGCONT, unless process is traced.
 2991                          */
 2992                         if ((prop & SA_CONT) == 0 &&
 2993                             (p->p_flag & P_TRACED) == 0)
 2994                                 printf("issignal\n");
 2995                         break;          /* == ignore */
 2996 
 2997                 default:
 2998                         /*
 2999                          * This signal has an action, let
 3000                          * postsig() process it.
 3001                          */
 3002                         return (sig);
 3003                 }
 3004                 sigqueue_delete(&td->td_sigqueue, sig); /* take the signal! */
 3005                 sigqueue_delete(&p->p_sigqueue, sig);
 3006 next:;
 3007         }
 3008         /* NOTREACHED */
 3009 }
 3010 
 3011 void
 3012 thread_stopped(struct proc *p)
 3013 {
 3014         int n;
 3015 
 3016         PROC_LOCK_ASSERT(p, MA_OWNED);
 3017         PROC_SLOCK_ASSERT(p, MA_OWNED);
 3018         n = p->p_suspcount;
 3019         if (p == curproc)
 3020                 n++;
 3021         if ((p->p_flag & P_STOPPED_SIG) && (n == p->p_numthreads)) {
 3022                 PROC_SUNLOCK(p);
 3023                 p->p_flag &= ~P_WAITED;
 3024                 PROC_LOCK(p->p_pptr);
 3025                 childproc_stopped(p, (p->p_flag & P_TRACED) ?
 3026                         CLD_TRAPPED : CLD_STOPPED);
 3027                 PROC_UNLOCK(p->p_pptr);
 3028                 PROC_SLOCK(p);
 3029         }
 3030 }
 3031 
 3032 /*
 3033  * Take the action for the specified signal
 3034  * from the current set of pending signals.
 3035  */
 3036 int
 3037 postsig(int sig)
 3038 {
 3039         struct thread *td;
 3040         struct proc *p;
 3041         struct sigacts *ps;
 3042         sig_t action;
 3043         ksiginfo_t ksi;
 3044         sigset_t returnmask;
 3045 
 3046         KASSERT(sig != 0, ("postsig"));
 3047 
 3048         td = curthread;
 3049         p = td->td_proc;
 3050         PROC_LOCK_ASSERT(p, MA_OWNED);
 3051         ps = p->p_sigacts;
 3052         mtx_assert(&ps->ps_mtx, MA_OWNED);
 3053         ksiginfo_init(&ksi);
 3054         if (sigqueue_get(&td->td_sigqueue, sig, &ksi) == 0 &&
 3055             sigqueue_get(&p->p_sigqueue, sig, &ksi) == 0)
 3056                 return (0);
 3057         ksi.ksi_signo = sig;
 3058         if (ksi.ksi_code == SI_TIMER)
 3059                 itimer_accept(p, ksi.ksi_timerid, &ksi);
 3060         action = ps->ps_sigact[_SIG_IDX(sig)];
 3061 #ifdef KTRACE
 3062         if (KTRPOINT(td, KTR_PSIG))
 3063                 ktrpsig(sig, action, td->td_pflags & TDP_OLDMASK ?
 3064                     &td->td_oldsigmask : &td->td_sigmask, ksi.ksi_code);
 3065 #endif
 3066         if ((p->p_stops & S_SIG) != 0) {
 3067                 mtx_unlock(&ps->ps_mtx);
 3068                 stopevent(p, S_SIG, sig);
 3069                 mtx_lock(&ps->ps_mtx);
 3070         }
 3071 
 3072         if (action == SIG_DFL) {
 3073                 /*
 3074                  * Default action, where the default is to kill
 3075                  * the process.  (Other cases were ignored above.)
 3076                  */
 3077                 mtx_unlock(&ps->ps_mtx);
 3078                 proc_td_siginfo_capture(td, &ksi.ksi_info);
 3079                 sigexit(td, sig);
 3080                 /* NOTREACHED */
 3081         } else {
 3082                 /*
 3083                  * If we get here, the signal must be caught.
 3084                  */
 3085                 KASSERT(action != SIG_IGN, ("postsig action %p", action));
 3086                 KASSERT(!SIGISMEMBER(td->td_sigmask, sig),
 3087                     ("postsig action: blocked sig %d", sig));
 3088 
 3089                 /*
 3090                  * Set the new mask value and also defer further
 3091                  * occurrences of this signal.
 3092                  *
 3093                  * Special case: user has done a sigsuspend.  Here the
 3094                  * current mask is not of interest, but rather the
 3095                  * mask from before the sigsuspend is what we want
 3096                  * restored after the signal processing is completed.
 3097                  */
 3098                 if (td->td_pflags & TDP_OLDMASK) {
 3099                         returnmask = td->td_oldsigmask;
 3100                         td->td_pflags &= ~TDP_OLDMASK;
 3101                 } else
 3102                         returnmask = td->td_sigmask;
 3103 
 3104                 if (p->p_sig == sig) {
 3105                         p->p_code = 0;
 3106                         p->p_sig = 0;
 3107                 }
 3108                 (*p->p_sysent->sv_sendsig)(action, &ksi, &returnmask);
 3109                 postsig_done(sig, td, ps);
 3110         }
 3111         return (1);
 3112 }
 3113 
 3114 void
 3115 proc_wkilled(struct proc *p)
 3116 {
 3117 
 3118         PROC_LOCK_ASSERT(p, MA_OWNED);
 3119         if ((p->p_flag & P_WKILLED) == 0) {
 3120                 p->p_flag |= P_WKILLED;
 3121                 /*
 3122                  * Notify swapper that there is a process to swap in.
 3123                  * The notification is racy, at worst it would take 10
 3124                  * seconds for the swapper process to notice.
 3125                  */
 3126                 if ((p->p_flag & (P_INMEM | P_SWAPPINGIN)) == 0)
 3127                         wakeup(&proc0);
 3128         }
 3129 }
 3130 
 3131 /*
 3132  * Kill the current process for stated reason.
 3133  */
 3134 void
 3135 killproc(struct proc *p, char *why)
 3136 {
 3137 
 3138         PROC_LOCK_ASSERT(p, MA_OWNED);
 3139         CTR3(KTR_PROC, "killproc: proc %p (pid %d, %s)", p, p->p_pid,
 3140             p->p_comm);
 3141         log(LOG_ERR, "pid %d (%s), jid %d, uid %d, was killed: %s\n",
 3142             p->p_pid, p->p_comm, p->p_ucred->cr_prison->pr_id,
 3143             p->p_ucred ? p->p_ucred->cr_uid : -1, why);
 3144         proc_wkilled(p);
 3145         kern_psignal(p, SIGKILL);
 3146 }
 3147 
 3148 /*
 3149  * Force the current process to exit with the specified signal, dumping core
 3150  * if appropriate.  We bypass the normal tests for masked and caught signals,
 3151  * allowing unrecoverable failures to terminate the process without changing
 3152  * signal state.  Mark the accounting record with the signal termination.
 3153  * If dumping core, save the signal number for the debugger.  Calls exit and
 3154  * does not return.
 3155  */
 3156 void
 3157 sigexit(struct thread *td, int sig)
 3158 {
 3159         struct proc *p = td->td_proc;
 3160 
 3161         PROC_LOCK_ASSERT(p, MA_OWNED);
 3162         p->p_acflag |= AXSIG;
 3163         /*
 3164          * We must be single-threading to generate a core dump.  This
 3165          * ensures that the registers in the core file are up-to-date.
 3166          * Also, the ELF dump handler assumes that the thread list doesn't
 3167          * change out from under it.
 3168          *
 3169          * XXX If another thread attempts to single-thread before us
 3170          *     (e.g. via fork()), we won't get a dump at all.
 3171          */
 3172         if ((sigprop(sig) & SA_CORE) && thread_single(p, SINGLE_NO_EXIT) == 0) {
 3173                 p->p_sig = sig;
 3174                 /*
 3175                  * Log signals which would cause core dumps
 3176                  * (Log as LOG_INFO to appease those who don't want
 3177                  * these messages.)
 3178                  * XXX : Todo, as well as euid, write out ruid too
 3179                  * Note that coredump() drops proc lock.
 3180                  */
 3181                 if (coredump(td) == 0)
 3182                         sig |= WCOREFLAG;
 3183                 if (kern_logsigexit)
 3184                         log(LOG_INFO,
 3185                             "pid %d (%s), jid %d, uid %d: exited on "
 3186                             "signal %d%s\n", p->p_pid, p->p_comm,
 3187                             p->p_ucred->cr_prison->pr_id,
 3188                             td->td_ucred ? td->td_ucred->cr_uid : -1,
 3189                             sig &~ WCOREFLAG,
 3190                             sig & WCOREFLAG ? " (core dumped)" : "");
 3191         } else
 3192                 PROC_UNLOCK(p);
 3193         exit1(td, 0, sig);
 3194         /* NOTREACHED */
 3195 }
 3196 
 3197 /*
 3198  * Send queued SIGCHLD to parent when child process's state
 3199  * is changed.
 3200  */
 3201 static void
 3202 sigparent(struct proc *p, int reason, int status)
 3203 {
 3204         PROC_LOCK_ASSERT(p, MA_OWNED);
 3205         PROC_LOCK_ASSERT(p->p_pptr, MA_OWNED);
 3206 
 3207         if (p->p_ksi != NULL) {
 3208                 p->p_ksi->ksi_signo  = SIGCHLD;
 3209                 p->p_ksi->ksi_code   = reason;
 3210                 p->p_ksi->ksi_status = status;
 3211                 p->p_ksi->ksi_pid    = p->p_pid;
 3212                 p->p_ksi->ksi_uid    = p->p_ucred->cr_ruid;
 3213                 if (KSI_ONQ(p->p_ksi))
 3214                         return;
 3215         }
 3216         pksignal(p->p_pptr, SIGCHLD, p->p_ksi);
 3217 }
 3218 
 3219 static void
 3220 childproc_jobstate(struct proc *p, int reason, int sig)
 3221 {
 3222         struct sigacts *ps;
 3223 
 3224         PROC_LOCK_ASSERT(p, MA_OWNED);
 3225         PROC_LOCK_ASSERT(p->p_pptr, MA_OWNED);
 3226 
 3227         /*
 3228          * Wake up parent sleeping in kern_wait(), also send
 3229          * SIGCHLD to parent, but SIGCHLD does not guarantee
 3230          * that parent will awake, because parent may masked
 3231          * the signal.
 3232          */
 3233         p->p_pptr->p_flag |= P_STATCHILD;
 3234         wakeup(p->p_pptr);
 3235 
 3236         ps = p->p_pptr->p_sigacts;
 3237         mtx_lock(&ps->ps_mtx);
 3238         if ((ps->ps_flag & PS_NOCLDSTOP) == 0) {
 3239                 mtx_unlock(&ps->ps_mtx);
 3240                 sigparent(p, reason, sig);
 3241         } else
 3242                 mtx_unlock(&ps->ps_mtx);
 3243 }
 3244 
 3245 void
 3246 childproc_stopped(struct proc *p, int reason)
 3247 {
 3248 
 3249         childproc_jobstate(p, reason, p->p_xsig);
 3250 }
 3251 
 3252 void
 3253 childproc_continued(struct proc *p)
 3254 {
 3255         childproc_jobstate(p, CLD_CONTINUED, SIGCONT);
 3256 }
 3257 
 3258 void
 3259 childproc_exited(struct proc *p)
 3260 {
 3261         int reason, status;
 3262 
 3263         if (WCOREDUMP(p->p_xsig)) {
 3264                 reason = CLD_DUMPED;
 3265                 status = WTERMSIG(p->p_xsig);
 3266         } else if (WIFSIGNALED(p->p_xsig)) {
 3267                 reason = CLD_KILLED;
 3268                 status = WTERMSIG(p->p_xsig);
 3269         } else {
 3270                 reason = CLD_EXITED;
 3271                 status = p->p_xexit;
 3272         }
 3273         /*
 3274          * XXX avoid calling wakeup(p->p_pptr), the work is
 3275          * done in exit1().
 3276          */
 3277         sigparent(p, reason, status);
 3278 }
 3279 
 3280 /*
 3281  * We only have 1 character for the core count in the format
 3282  * string, so the range will be 0-9
 3283  */
 3284 #define MAX_NUM_CORES 10
 3285 static int num_cores = 5;
 3286 
 3287 static int
 3288 sysctl_debug_num_cores_check (SYSCTL_HANDLER_ARGS)
 3289 {
 3290         int error;
 3291         int new_val;
 3292 
 3293         new_val = num_cores;
 3294         error = sysctl_handle_int(oidp, &new_val, 0, req);
 3295         if (error != 0 || req->newptr == NULL)
 3296                 return (error);
 3297         if (new_val > MAX_NUM_CORES)
 3298                 new_val = MAX_NUM_CORES;
 3299         if (new_val < 0)
 3300                 new_val = 0;
 3301         num_cores = new_val;
 3302         return (0);
 3303 }
 3304 SYSCTL_PROC(_debug, OID_AUTO, ncores, CTLTYPE_INT|CTLFLAG_RW,
 3305             0, sizeof(int), sysctl_debug_num_cores_check, "I", "");
 3306 
 3307 #define GZ_SUFFIX       ".gz"
 3308 
 3309 #ifdef GZIO
 3310 static int compress_user_cores = 1;
 3311 SYSCTL_INT(_kern, OID_AUTO, compress_user_cores, CTLFLAG_RWTUN,
 3312     &compress_user_cores, 0, "Compression of user corefiles");
 3313 
 3314 int compress_user_cores_gzlevel = 6;
 3315 SYSCTL_INT(_kern, OID_AUTO, compress_user_cores_gzlevel, CTLFLAG_RWTUN,
 3316     &compress_user_cores_gzlevel, 0, "Corefile gzip compression level");
 3317 #else
 3318 static int compress_user_cores = 0;
 3319 #endif
 3320 
 3321 /*
 3322  * Protect the access to corefilename[] by allproc_lock.
 3323  */
 3324 #define corefilename_lock       allproc_lock
 3325 
 3326 static char corefilename[MAXPATHLEN] = {"%N.core"};
 3327 TUNABLE_STR("kern.corefile", corefilename, sizeof(corefilename));
 3328 
 3329 static int
 3330 sysctl_kern_corefile(SYSCTL_HANDLER_ARGS)
 3331 {
 3332         int error;
 3333 
 3334         sx_xlock(&corefilename_lock);
 3335         error = sysctl_handle_string(oidp, corefilename, sizeof(corefilename),
 3336             req);
 3337         sx_xunlock(&corefilename_lock);
 3338 
 3339         return (error);
 3340 }
 3341 SYSCTL_PROC(_kern, OID_AUTO, corefile, CTLTYPE_STRING | CTLFLAG_RW |
 3342     CTLFLAG_MPSAFE, 0, 0, sysctl_kern_corefile, "A",
 3343     "Process corefile name format string");
 3344 
 3345 static void
 3346 vnode_close_locked(struct thread *td, struct vnode *vp)
 3347 {
 3348 
 3349         VOP_UNLOCK(vp, 0);
 3350         vn_close(vp, FWRITE, td->td_ucred, td);
 3351 }
 3352 
 3353 /*
 3354  * If the core format has a %I in it, then we need to check
 3355  * for existing corefiles before defining a name.
 3356  * To do this we iterate over 0..num_cores to find a
 3357  * non-existing core file name to use. If all core files are
 3358  * already used we choose the oldest one.
 3359  */
 3360 static int
 3361 corefile_open_last(struct thread *td, char *name, int indexpos,
 3362     struct vnode **vpp)
 3363 {
 3364         struct vnode *oldvp, *nextvp, *vp;
 3365         struct vattr vattr;
 3366         struct nameidata nd;
 3367         int error, i, flags, oflags, cmode;
 3368         struct timespec lasttime;
 3369 
 3370         nextvp = oldvp = NULL;
 3371         cmode = S_IRUSR | S_IWUSR;
 3372         oflags = VN_OPEN_NOAUDIT | VN_OPEN_NAMECACHE |
 3373             (capmode_coredump ? VN_OPEN_NOCAPCHECK : 0);
 3374 
 3375         for (i = 0; i < num_cores; i++) {
 3376                 flags = O_CREAT | FWRITE | O_NOFOLLOW;
 3377                 name[indexpos] = '' + i;
 3378 
 3379                 NDINIT(&nd, LOOKUP, NOFOLLOW, UIO_SYSSPACE, name, td);
 3380                 error = vn_open_cred(&nd, &flags, cmode, oflags, td->td_ucred,
 3381                     NULL);
 3382                 if (error != 0)
 3383                         break;
 3384 
 3385                 vp = nd.ni_vp;
 3386                 NDFREE(&nd, NDF_ONLY_PNBUF);
 3387                 if ((flags & O_CREAT) == O_CREAT) {
 3388                         nextvp = vp;
 3389                         break;
 3390                 }
 3391 
 3392                 error = VOP_GETATTR(vp, &vattr, td->td_ucred);
 3393                 if (error != 0) {
 3394                         vnode_close_locked(td, vp);
 3395                         break;
 3396                 }
 3397 
 3398                 if (oldvp == NULL ||
 3399                     lasttime.tv_sec > vattr.va_mtime.tv_sec ||
 3400                     (lasttime.tv_sec == vattr.va_mtime.tv_sec &&
 3401                     lasttime.tv_nsec >= vattr.va_mtime.tv_nsec)) {
 3402                         if (oldvp != NULL)
 3403                                 vnode_close_locked(td, oldvp);
 3404                         oldvp = vp;
 3405                         lasttime = vattr.va_mtime;
 3406                 } else {
 3407                         vnode_close_locked(td, vp);
 3408                 }
 3409         }
 3410 
 3411         if (oldvp != NULL) {
 3412                 if (nextvp == NULL) {
 3413                         if ((td->td_proc->p_flag & P_SUGID) != 0) {
 3414                                 error = EFAULT;
 3415                                 vnode_close_locked(td, oldvp);
 3416                         } else {
 3417                                 nextvp = oldvp;
 3418                         }
 3419                 } else {
 3420                         vnode_close_locked(td, oldvp);
 3421                 }
 3422         }
 3423         if (error != 0) {
 3424                 if (nextvp != NULL)
 3425                         vnode_close_locked(td, oldvp);
 3426         } else {
 3427                 *vpp = nextvp;
 3428         }
 3429 
 3430         return (error);
 3431 }
 3432 
 3433 /*
 3434  * corefile_open(comm, uid, pid, td, compress, vpp, namep)
 3435  * Expand the name described in corefilename, using name, uid, and pid
 3436  * and open/create core file.
 3437  * corefilename is a printf-like string, with three format specifiers:
 3438  *      %N      name of process ("name")
 3439  *      %P      process id (pid)
 3440  *      %U      user id (uid)
 3441  * For example, "%N.core" is the default; they can be disabled completely
 3442  * by using "/dev/null", or all core files can be stored in "/cores/%U/%N-%P".
 3443  * This is controlled by the sysctl variable kern.corefile (see above).
 3444  */
 3445 static int
 3446 corefile_open(const char *comm, uid_t uid, pid_t pid, struct thread *td,
 3447     int compress, struct vnode **vpp, char **namep)
 3448 {
 3449         struct sbuf sb;
 3450         struct nameidata nd;
 3451         const char *format;
 3452         char *hostname, *name;
 3453         int cmode, error, flags, i, indexpos, oflags;
 3454 
 3455         hostname = NULL;
 3456         format = corefilename;
 3457         name = malloc(MAXPATHLEN, M_TEMP, M_WAITOK | M_ZERO);
 3458         indexpos = -1;
 3459         (void)sbuf_new(&sb, name, MAXPATHLEN, SBUF_FIXEDLEN);
 3460         sx_slock(&corefilename_lock);
 3461         for (i = 0; format[i] != '\0'; i++) {
 3462                 switch (format[i]) {
 3463                 case '%':       /* Format character */
 3464                         i++;
 3465                         switch (format[i]) {
 3466                         case '%':
 3467                                 sbuf_putc(&sb, '%');
 3468                                 break;
 3469                         case 'H':       /* hostname */
 3470                                 if (hostname == NULL) {
 3471                                         hostname = malloc(MAXHOSTNAMELEN,
 3472                                             M_TEMP, M_WAITOK);
 3473                                 }
 3474                                 getcredhostname(td->td_ucred, hostname,
 3475                                     MAXHOSTNAMELEN);
 3476                                 sbuf_printf(&sb, "%s", hostname);
 3477                                 break;
 3478                         case 'I':       /* autoincrementing index */
 3479                                 sbuf_printf(&sb, "");
 3480                                 indexpos = sbuf_len(&sb) - 1;
 3481                                 break;
 3482                         case 'N':       /* process name */
 3483                                 sbuf_printf(&sb, "%s", comm);
 3484                                 break;
 3485                         case 'P':       /* process id */
 3486                                 sbuf_printf(&sb, "%u", pid);
 3487                                 break;
 3488                         case 'U':       /* user id */
 3489                                 sbuf_printf(&sb, "%u", uid);
 3490                                 break;
 3491                         default:
 3492                                 log(LOG_ERR,
 3493                                     "Unknown format character %c in "
 3494                                     "corename `%s'\n", format[i], format);
 3495                                 break;
 3496                         }
 3497                         break;
 3498                 default:
 3499                         sbuf_putc(&sb, format[i]);
 3500                         break;
 3501                 }
 3502         }
 3503         sx_sunlock(&corefilename_lock);
 3504         free(hostname, M_TEMP);
 3505         if (compress)
 3506                 sbuf_printf(&sb, GZ_SUFFIX);
 3507         if (sbuf_error(&sb) != 0) {
 3508                 log(LOG_ERR, "pid %ld (%s), uid (%lu): corename is too "
 3509                     "long\n", (long)pid, comm, (u_long)uid);
 3510                 sbuf_delete(&sb);
 3511                 free(name, M_TEMP);
 3512                 return (ENOMEM);
 3513         }
 3514         sbuf_finish(&sb);
 3515         sbuf_delete(&sb);
 3516 
 3517         if (indexpos != -1) {
 3518                 error = corefile_open_last(td, name, indexpos, vpp);
 3519                 if (error != 0) {
 3520                         log(LOG_ERR,
 3521                             "pid %d (%s), uid (%u):  Path `%s' failed "
 3522                             "on initial open test, error = %d\n",
 3523                             pid, comm, uid, name, error);
 3524                 }
 3525         } else {
 3526                 cmode = S_IRUSR | S_IWUSR;
 3527                 oflags = VN_OPEN_NOAUDIT | VN_OPEN_NAMECACHE |
 3528                     (capmode_coredump ? VN_OPEN_NOCAPCHECK : 0);
 3529                 flags = O_CREAT | FWRITE | O_NOFOLLOW;
 3530                 if ((td->td_proc->p_flag & P_SUGID) != 0)
 3531                         flags |= O_EXCL;
 3532 
 3533                 NDINIT(&nd, LOOKUP, NOFOLLOW, UIO_SYSSPACE, name, td);
 3534                 error = vn_open_cred(&nd, &flags, cmode, oflags, td->td_ucred,
 3535                     NULL);
 3536                 if (error == 0) {
 3537                         *vpp = nd.ni_vp;
 3538                         NDFREE(&nd, NDF_ONLY_PNBUF);
 3539                 }
 3540         }
 3541 
 3542         if (error != 0) {
 3543 #ifdef AUDIT
 3544                 audit_proc_coredump(td, name, error);
 3545 #endif
 3546                 free(name, M_TEMP);
 3547                 return (error);
 3548         }
 3549         *namep = name;
 3550         return (0);
 3551 }
 3552 
 3553 static int
 3554 coredump_sanitise_path(const char *path)
 3555 {
 3556         size_t i;
 3557 
 3558         /*
 3559          * Only send a subset of ASCII to devd(8) because it
 3560          * might pass these strings to sh -c.
 3561          */
 3562         for (i = 0; path[i]; i++)
 3563                 if (!(isalpha(path[i]) || isdigit(path[i])) &&
 3564                     path[i] != '/' && path[i] != '.' &&
 3565                     path[i] != '-')
 3566                         return (0);
 3567 
 3568         return (1);
 3569 }
 3570 
 3571 /*
 3572  * Dump a process' core.  The main routine does some
 3573  * policy checking, and creates the name of the coredump;
 3574  * then it passes on a vnode and a size limit to the process-specific
 3575  * coredump routine if there is one; if there _is not_ one, it returns
 3576  * ENOSYS; otherwise it returns the error from the process-specific routine.
 3577  */
 3578 
 3579 static int
 3580 coredump(struct thread *td)
 3581 {
 3582         struct proc *p = td->td_proc;
 3583         struct ucred *cred = td->td_ucred;
 3584         struct vnode *vp;
 3585         struct flock lf;
 3586         struct vattr vattr;
 3587         int error, error1, locked;
 3588         char *name;                     /* name of corefile */
 3589         void *rl_cookie;
 3590         off_t limit;
 3591         char *data = NULL;
 3592         char *fullpath, *freepath = NULL;
 3593         size_t len;
 3594         static const char comm_name[] = "comm=";
 3595         static const char core_name[] = "core=";
 3596 
 3597         PROC_LOCK_ASSERT(p, MA_OWNED);
 3598         MPASS((p->p_flag & P_HADTHREADS) == 0 || p->p_singlethread == td);
 3599         _STOPEVENT(p, S_CORE, 0);
 3600 
 3601         if (!do_coredump || (!sugid_coredump && (p->p_flag & P_SUGID) != 0) ||
 3602             (p->p_flag2 & P2_NOTRACE) != 0) {
 3603                 PROC_UNLOCK(p);
 3604                 return (EFAULT);
 3605         }
 3606 
 3607         /*
 3608          * Note that the bulk of limit checking is done after
 3609          * the corefile is created.  The exception is if the limit
 3610          * for corefiles is 0, in which case we don't bother
 3611          * creating the corefile at all.  This layout means that
 3612          * a corefile is truncated instead of not being created,
 3613          * if it is larger than the limit.
 3614          */
 3615         limit = (off_t)lim_cur(td, RLIMIT_CORE);
 3616         if (limit == 0 || racct_get_available(p, RACCT_CORE) == 0) {
 3617                 PROC_UNLOCK(p);
 3618                 return (EFBIG);
 3619         }
 3620         PROC_UNLOCK(p);
 3621 
 3622         error = corefile_open(p->p_comm, cred->cr_uid, p->p_pid, td,
 3623             compress_user_cores, &vp, &name);
 3624         if (error != 0)
 3625                 return (error);
 3626 
 3627         /*
 3628          * Don't dump to non-regular files or files with links.
 3629          * Do not dump into system files. Effective user must own the corefile.
 3630          */
 3631         if (vp->v_type != VREG || VOP_GETATTR(vp, &vattr, cred) != 0 ||
 3632             vattr.va_nlink != 1 || (vp->v_vflag & VV_SYSTEM) != 0 ||
 3633             vattr.va_uid != cred->cr_uid) {
 3634                 VOP_UNLOCK(vp, 0);
 3635                 error = EFAULT;
 3636                 goto out;
 3637         }
 3638 
 3639         VOP_UNLOCK(vp, 0);
 3640 
 3641         /* Postpone other writers, including core dumps of other processes. */
 3642         rl_cookie = vn_rangelock_wlock(vp, 0, OFF_MAX);
 3643 
 3644         lf.l_whence = SEEK_SET;
 3645         lf.l_start = 0;
 3646         lf.l_len = 0;
 3647         lf.l_type = F_WRLCK;
 3648         locked = (VOP_ADVLOCK(vp, (caddr_t)p, F_SETLK, &lf, F_FLOCK) == 0);
 3649 
 3650         VATTR_NULL(&vattr);
 3651         vattr.va_size = 0;
 3652         if (set_core_nodump_flag)
 3653                 vattr.va_flags = UF_NODUMP;
 3654         vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
 3655         VOP_SETATTR(vp, &vattr, cred);
 3656         VOP_UNLOCK(vp, 0);
 3657         PROC_LOCK(p);
 3658         p->p_acflag |= ACORE;
 3659         PROC_UNLOCK(p);
 3660 
 3661         if (p->p_sysent->sv_coredump != NULL) {
 3662                 error = p->p_sysent->sv_coredump(td, vp, limit,
 3663                     compress_user_cores ? IMGACT_CORE_COMPRESS : 0);
 3664         } else {
 3665                 error = ENOSYS;
 3666         }
 3667 
 3668         if (locked) {
 3669                 lf.l_type = F_UNLCK;
 3670                 VOP_ADVLOCK(vp, (caddr_t)p, F_UNLCK, &lf, F_FLOCK);
 3671         }
 3672         vn_rangelock_unlock(vp, rl_cookie);
 3673 
 3674         /*
 3675          * Notify the userland helper that a process triggered a core dump.
 3676          * This allows the helper to run an automated debugging session.
 3677          */
 3678         if (error != 0 || coredump_devctl == 0)
 3679                 goto out;
 3680         len = MAXPATHLEN * 2 + sizeof(comm_name) - 1 +
 3681             sizeof(' ') + sizeof(core_name) - 1;
 3682         data = malloc(len, M_TEMP, M_WAITOK);
 3683         if (vn_fullpath_global(td, p->p_textvp, &fullpath, &freepath) != 0)
 3684                 goto out;
 3685         if (!coredump_sanitise_path(fullpath))
 3686                 goto out;
 3687         snprintf(data, len, "%s%s ", comm_name, fullpath);
 3688         free(freepath, M_TEMP);
 3689         freepath = NULL;
 3690         if (vn_fullpath_global(td, vp, &fullpath, &freepath) != 0)
 3691                 goto out;
 3692         if (!coredump_sanitise_path(fullpath))
 3693                 goto out;
 3694         strlcat(data, core_name, len);
 3695         strlcat(data, fullpath, len);
 3696         devctl_notify("kernel", "signal", "coredump", data);
 3697 out:
 3698         error1 = vn_close(vp, FWRITE, cred, td);
 3699         if (error == 0)
 3700                 error = error1;
 3701 #ifdef AUDIT
 3702         audit_proc_coredump(td, name, error);
 3703 #endif
 3704         free(freepath, M_TEMP);
 3705         free(data, M_TEMP);
 3706         free(name, M_TEMP);
 3707         return (error);
 3708 }
 3709 
 3710 /*
 3711  * Nonexistent system call-- signal process (may want to handle it).  Flag
 3712  * error in case process won't see signal immediately (blocked or ignored).
 3713  */
 3714 #ifndef _SYS_SYSPROTO_H_
 3715 struct nosys_args {
 3716         int     dummy;
 3717 };
 3718 #endif
 3719 /* ARGSUSED */
 3720 int
 3721 nosys(struct thread *td, struct nosys_args *args)
 3722 {
 3723         struct proc *p;
 3724 
 3725         p = td->td_proc;
 3726 
 3727         PROC_LOCK(p);
 3728         tdsignal(td, SIGSYS);
 3729         PROC_UNLOCK(p);
 3730         if (kern_lognosys == 1 || kern_lognosys == 3) {
 3731                 uprintf("pid %d comm %s: nosys %d\n", p->p_pid, p->p_comm,
 3732                     td->td_sa.code);
 3733         }
 3734         if (kern_lognosys == 2 || kern_lognosys == 3 ||
 3735             (p->p_pid == 1 && (kern_lognosys & 3) == 0)) {
 3736                 printf("pid %d comm %s: nosys %d\n", p->p_pid, p->p_comm,
 3737                     td->td_sa.code);
 3738         }
 3739         return (ENOSYS);
 3740 }
 3741 
 3742 /*
 3743  * Send a SIGIO or SIGURG signal to a process or process group using stored
 3744  * credentials rather than those of the current process.
 3745  */
 3746 void
 3747 pgsigio(struct sigio **sigiop, int sig, int checkctty)
 3748 {
 3749         ksiginfo_t ksi;
 3750         struct sigio *sigio;
 3751 
 3752         ksiginfo_init(&ksi);
 3753         ksi.ksi_signo = sig;
 3754         ksi.ksi_code = SI_KERNEL;
 3755 
 3756         SIGIO_LOCK();
 3757         sigio = *sigiop;
 3758         if (sigio == NULL) {
 3759                 SIGIO_UNLOCK();
 3760                 return;
 3761         }
 3762         if (sigio->sio_pgid > 0) {
 3763                 PROC_LOCK(sigio->sio_proc);
 3764                 if (CANSIGIO(sigio->sio_ucred, sigio->sio_proc->p_ucred))
 3765                         kern_psignal(sigio->sio_proc, sig);
 3766                 PROC_UNLOCK(sigio->sio_proc);
 3767         } else if (sigio->sio_pgid < 0) {
 3768                 struct proc *p;
 3769 
 3770                 PGRP_LOCK(sigio->sio_pgrp);
 3771                 LIST_FOREACH(p, &sigio->sio_pgrp->pg_members, p_pglist) {
 3772                         PROC_LOCK(p);
 3773                         if (p->p_state == PRS_NORMAL &&
 3774                             CANSIGIO(sigio->sio_ucred, p->p_ucred) &&
 3775                             (checkctty == 0 || (p->p_flag & P_CONTROLT)))
 3776                                 kern_psignal(p, sig);
 3777                         PROC_UNLOCK(p);
 3778                 }
 3779                 PGRP_UNLOCK(sigio->sio_pgrp);
 3780         }
 3781         SIGIO_UNLOCK();
 3782 }
 3783 
 3784 static int
 3785 filt_sigattach(struct knote *kn)
 3786 {
 3787         struct proc *p = curproc;
 3788 
 3789         kn->kn_ptr.p_proc = p;
 3790         kn->kn_flags |= EV_CLEAR;               /* automatically set */
 3791 
 3792         knlist_add(p->p_klist, kn, 0);
 3793 
 3794         return (0);
 3795 }
 3796 
 3797 static void
 3798 filt_sigdetach(struct knote *kn)
 3799 {
 3800         struct proc *p = kn->kn_ptr.p_proc;
 3801 
 3802         knlist_remove(p->p_klist, kn, 0);
 3803 }
 3804 
 3805 /*
 3806  * signal knotes are shared with proc knotes, so we apply a mask to
 3807  * the hint in order to differentiate them from process hints.  This
 3808  * could be avoided by using a signal-specific knote list, but probably
 3809  * isn't worth the trouble.
 3810  */
 3811 static int
 3812 filt_signal(struct knote *kn, long hint)
 3813 {
 3814 
 3815         if (hint & NOTE_SIGNAL) {
 3816                 hint &= ~NOTE_SIGNAL;
 3817 
 3818                 if (kn->kn_id == hint)
 3819                         kn->kn_data++;
 3820         }
 3821         return (kn->kn_data != 0);
 3822 }
 3823 
 3824 struct sigacts *
 3825 sigacts_alloc(void)
 3826 {
 3827         struct sigacts *ps;
 3828 
 3829         ps = malloc(sizeof(struct sigacts), M_SUBPROC, M_WAITOK | M_ZERO);
 3830         refcount_init(&ps->ps_refcnt, 1);
 3831         mtx_init(&ps->ps_mtx, "sigacts", NULL, MTX_DEF);
 3832         return (ps);
 3833 }
 3834 
 3835 void
 3836 sigacts_free(struct sigacts *ps)
 3837 {
 3838 
 3839         if (refcount_release(&ps->ps_refcnt) == 0)
 3840                 return;
 3841         mtx_destroy(&ps->ps_mtx);
 3842         free(ps, M_SUBPROC);
 3843 }
 3844 
 3845 struct sigacts *
 3846 sigacts_hold(struct sigacts *ps)
 3847 {
 3848 
 3849         refcount_acquire(&ps->ps_refcnt);
 3850         return (ps);
 3851 }
 3852 
 3853 void
 3854 sigacts_copy(struct sigacts *dest, struct sigacts *src)
 3855 {
 3856 
 3857         KASSERT(dest->ps_refcnt == 1, ("sigacts_copy to shared dest"));
 3858         mtx_lock(&src->ps_mtx);
 3859         bcopy(src, dest, offsetof(struct sigacts, ps_refcnt));
 3860         mtx_unlock(&src->ps_mtx);
 3861 }
 3862 
 3863 int
 3864 sigacts_shared(struct sigacts *ps)
 3865 {
 3866 
 3867         return (ps->ps_refcnt > 1);
 3868 }
 3869 
 3870 void
 3871 sig_drop_caught(struct proc *p)
 3872 {
 3873         int sig;
 3874         struct sigacts *ps;
 3875 
 3876         ps = p->p_sigacts;
 3877         PROC_LOCK_ASSERT(p, MA_OWNED);
 3878         mtx_assert(&ps->ps_mtx, MA_OWNED);
 3879         while (SIGNOTEMPTY(ps->ps_sigcatch)) {
 3880                 sig = sig_ffs(&ps->ps_sigcatch);
 3881                 sigdflt(ps, sig);
 3882                 if ((sigprop(sig) & SA_IGNORE) != 0)
 3883                         sigqueue_delete_proc(p, sig);
 3884         }
 3885 }

Cache object: 69017a1d266ceb632d4109e832133c3e


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.