The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_sig.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 1982, 1986, 1989, 1991, 1993
    3  *      The Regents of the University of California.  All rights reserved.
    4  * (c) UNIX System Laboratories, Inc.
    5  * All or some portions of this file are derived from material licensed
    6  * to the University of California by American Telephone and Telegraph
    7  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
    8  * the permission of UNIX System Laboratories, Inc.
    9  *
   10  * Redistribution and use in source and binary forms, with or without
   11  * modification, are permitted provided that the following conditions
   12  * are met:
   13  * 1. Redistributions of source code must retain the above copyright
   14  *    notice, this list of conditions and the following disclaimer.
   15  * 2. Redistributions in binary form must reproduce the above copyright
   16  *    notice, this list of conditions and the following disclaimer in the
   17  *    documentation and/or other materials provided with the distribution.
   18  * 4. Neither the name of the University nor the names of its contributors
   19  *    may be used to endorse or promote products derived from this software
   20  *    without specific prior written permission.
   21  *
   22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
   23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   32  * SUCH DAMAGE.
   33  *
   34  *      @(#)kern_sig.c  8.7 (Berkeley) 4/18/94
   35  */
   36 
   37 #include <sys/cdefs.h>
   38 __FBSDID("$FreeBSD: releng/11.1/sys/kern/kern_sig.c 318845 2017-05-25 01:09:45Z markj $");
   39 
   40 #include "opt_compat.h"
   41 #include "opt_gzio.h"
   42 #include "opt_ktrace.h"
   43 
   44 #include <sys/param.h>
   45 #include <sys/ctype.h>
   46 #include <sys/systm.h>
   47 #include <sys/signalvar.h>
   48 #include <sys/vnode.h>
   49 #include <sys/acct.h>
   50 #include <sys/bus.h>
   51 #include <sys/capsicum.h>
   52 #include <sys/condvar.h>
   53 #include <sys/event.h>
   54 #include <sys/fcntl.h>
   55 #include <sys/imgact.h>
   56 #include <sys/kernel.h>
   57 #include <sys/ktr.h>
   58 #include <sys/ktrace.h>
   59 #include <sys/lock.h>
   60 #include <sys/malloc.h>
   61 #include <sys/mutex.h>
   62 #include <sys/refcount.h>
   63 #include <sys/namei.h>
   64 #include <sys/proc.h>
   65 #include <sys/procdesc.h>
   66 #include <sys/posix4.h>
   67 #include <sys/pioctl.h>
   68 #include <sys/racct.h>
   69 #include <sys/resourcevar.h>
   70 #include <sys/sdt.h>
   71 #include <sys/sbuf.h>
   72 #include <sys/sleepqueue.h>
   73 #include <sys/smp.h>
   74 #include <sys/stat.h>
   75 #include <sys/sx.h>
   76 #include <sys/syscallsubr.h>
   77 #include <sys/sysctl.h>
   78 #include <sys/sysent.h>
   79 #include <sys/syslog.h>
   80 #include <sys/sysproto.h>
   81 #include <sys/timers.h>
   82 #include <sys/unistd.h>
   83 #include <sys/wait.h>
   84 #include <vm/vm.h>
   85 #include <vm/vm_extern.h>
   86 #include <vm/uma.h>
   87 
   88 #include <sys/jail.h>
   89 
   90 #include <machine/cpu.h>
   91 
   92 #include <security/audit/audit.h>
   93 
   94 #define ONSIG   32              /* NSIG for osig* syscalls.  XXX. */
   95 
   96 SDT_PROVIDER_DECLARE(proc);
   97 SDT_PROBE_DEFINE3(proc, , , signal__send,
   98     "struct thread *", "struct proc *", "int");
   99 SDT_PROBE_DEFINE2(proc, , , signal__clear,
  100     "int", "ksiginfo_t *");
  101 SDT_PROBE_DEFINE3(proc, , , signal__discard,
  102     "struct thread *", "struct proc *", "int");
  103 
  104 static int      coredump(struct thread *);
  105 static int      killpg1(struct thread *td, int sig, int pgid, int all,
  106                     ksiginfo_t *ksi);
  107 static int      issignal(struct thread *td);
  108 static int      sigprop(int sig);
  109 static void     tdsigwakeup(struct thread *, int, sig_t, int);
  110 static int      sig_suspend_threads(struct thread *, struct proc *, int);
  111 static int      filt_sigattach(struct knote *kn);
  112 static void     filt_sigdetach(struct knote *kn);
  113 static int      filt_signal(struct knote *kn, long hint);
  114 static struct thread *sigtd(struct proc *p, int sig, int prop);
  115 static void     sigqueue_start(void);
  116 
  117 static uma_zone_t       ksiginfo_zone = NULL;
  118 struct filterops sig_filtops = {
  119         .f_isfd = 0,
  120         .f_attach = filt_sigattach,
  121         .f_detach = filt_sigdetach,
  122         .f_event = filt_signal,
  123 };
  124 
  125 static int      kern_logsigexit = 1;
  126 SYSCTL_INT(_kern, KERN_LOGSIGEXIT, logsigexit, CTLFLAG_RW,
  127     &kern_logsigexit, 0,
  128     "Log processes quitting on abnormal signals to syslog(3)");
  129 
  130 static int      kern_forcesigexit = 1;
  131 SYSCTL_INT(_kern, OID_AUTO, forcesigexit, CTLFLAG_RW,
  132     &kern_forcesigexit, 0, "Force trap signal to be handled");
  133 
  134 static SYSCTL_NODE(_kern, OID_AUTO, sigqueue, CTLFLAG_RW, 0,
  135     "POSIX real time signal");
  136 
  137 static int      max_pending_per_proc = 128;
  138 SYSCTL_INT(_kern_sigqueue, OID_AUTO, max_pending_per_proc, CTLFLAG_RW,
  139     &max_pending_per_proc, 0, "Max pending signals per proc");
  140 
  141 static int      preallocate_siginfo = 1024;
  142 SYSCTL_INT(_kern_sigqueue, OID_AUTO, preallocate, CTLFLAG_RDTUN,
  143     &preallocate_siginfo, 0, "Preallocated signal memory size");
  144 
  145 static int      signal_overflow = 0;
  146 SYSCTL_INT(_kern_sigqueue, OID_AUTO, overflow, CTLFLAG_RD,
  147     &signal_overflow, 0, "Number of signals overflew");
  148 
  149 static int      signal_alloc_fail = 0;
  150 SYSCTL_INT(_kern_sigqueue, OID_AUTO, alloc_fail, CTLFLAG_RD,
  151     &signal_alloc_fail, 0, "signals failed to be allocated");
  152 
  153 SYSINIT(signal, SI_SUB_P1003_1B, SI_ORDER_FIRST+3, sigqueue_start, NULL);
  154 
  155 /*
  156  * Policy -- Can ucred cr1 send SIGIO to process cr2?
  157  * Should use cr_cansignal() once cr_cansignal() allows SIGIO and SIGURG
  158  * in the right situations.
  159  */
  160 #define CANSIGIO(cr1, cr2) \
  161         ((cr1)->cr_uid == 0 || \
  162             (cr1)->cr_ruid == (cr2)->cr_ruid || \
  163             (cr1)->cr_uid == (cr2)->cr_ruid || \
  164             (cr1)->cr_ruid == (cr2)->cr_uid || \
  165             (cr1)->cr_uid == (cr2)->cr_uid)
  166 
  167 static int      sugid_coredump;
  168 SYSCTL_INT(_kern, OID_AUTO, sugid_coredump, CTLFLAG_RWTUN,
  169     &sugid_coredump, 0, "Allow setuid and setgid processes to dump core");
  170 
  171 static int      capmode_coredump;
  172 SYSCTL_INT(_kern, OID_AUTO, capmode_coredump, CTLFLAG_RWTUN,
  173     &capmode_coredump, 0, "Allow processes in capability mode to dump core");
  174 
  175 static int      do_coredump = 1;
  176 SYSCTL_INT(_kern, OID_AUTO, coredump, CTLFLAG_RW,
  177         &do_coredump, 0, "Enable/Disable coredumps");
  178 
  179 static int      set_core_nodump_flag = 0;
  180 SYSCTL_INT(_kern, OID_AUTO, nodump_coredump, CTLFLAG_RW, &set_core_nodump_flag,
  181         0, "Enable setting the NODUMP flag on coredump files");
  182 
  183 static int      coredump_devctl = 0;
  184 SYSCTL_INT(_kern, OID_AUTO, coredump_devctl, CTLFLAG_RW, &coredump_devctl,
  185         0, "Generate a devctl notification when processes coredump");
  186 
  187 /*
  188  * Signal properties and actions.
  189  * The array below categorizes the signals and their default actions
  190  * according to the following properties:
  191  */
  192 #define SA_KILL         0x01            /* terminates process by default */
  193 #define SA_CORE         0x02            /* ditto and coredumps */
  194 #define SA_STOP         0x04            /* suspend process */
  195 #define SA_TTYSTOP      0x08            /* ditto, from tty */
  196 #define SA_IGNORE       0x10            /* ignore by default */
  197 #define SA_CONT         0x20            /* continue if suspended */
  198 #define SA_CANTMASK     0x40            /* non-maskable, catchable */
  199 
  200 static int sigproptbl[NSIG] = {
  201         SA_KILL,                        /* SIGHUP */
  202         SA_KILL,                        /* SIGINT */
  203         SA_KILL|SA_CORE,                /* SIGQUIT */
  204         SA_KILL|SA_CORE,                /* SIGILL */
  205         SA_KILL|SA_CORE,                /* SIGTRAP */
  206         SA_KILL|SA_CORE,                /* SIGABRT */
  207         SA_KILL|SA_CORE,                /* SIGEMT */
  208         SA_KILL|SA_CORE,                /* SIGFPE */
  209         SA_KILL,                        /* SIGKILL */
  210         SA_KILL|SA_CORE,                /* SIGBUS */
  211         SA_KILL|SA_CORE,                /* SIGSEGV */
  212         SA_KILL|SA_CORE,                /* SIGSYS */
  213         SA_KILL,                        /* SIGPIPE */
  214         SA_KILL,                        /* SIGALRM */
  215         SA_KILL,                        /* SIGTERM */
  216         SA_IGNORE,                      /* SIGURG */
  217         SA_STOP,                        /* SIGSTOP */
  218         SA_STOP|SA_TTYSTOP,             /* SIGTSTP */
  219         SA_IGNORE|SA_CONT,              /* SIGCONT */
  220         SA_IGNORE,                      /* SIGCHLD */
  221         SA_STOP|SA_TTYSTOP,             /* SIGTTIN */
  222         SA_STOP|SA_TTYSTOP,             /* SIGTTOU */
  223         SA_IGNORE,                      /* SIGIO */
  224         SA_KILL,                        /* SIGXCPU */
  225         SA_KILL,                        /* SIGXFSZ */
  226         SA_KILL,                        /* SIGVTALRM */
  227         SA_KILL,                        /* SIGPROF */
  228         SA_IGNORE,                      /* SIGWINCH  */
  229         SA_IGNORE,                      /* SIGINFO */
  230         SA_KILL,                        /* SIGUSR1 */
  231         SA_KILL,                        /* SIGUSR2 */
  232 };
  233 
  234 static void reschedule_signals(struct proc *p, sigset_t block, int flags);
  235 
  236 static void
  237 sigqueue_start(void)
  238 {
  239         ksiginfo_zone = uma_zcreate("ksiginfo", sizeof(ksiginfo_t),
  240                 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
  241         uma_prealloc(ksiginfo_zone, preallocate_siginfo);
  242         p31b_setcfg(CTL_P1003_1B_REALTIME_SIGNALS, _POSIX_REALTIME_SIGNALS);
  243         p31b_setcfg(CTL_P1003_1B_RTSIG_MAX, SIGRTMAX - SIGRTMIN + 1);
  244         p31b_setcfg(CTL_P1003_1B_SIGQUEUE_MAX, max_pending_per_proc);
  245 }
  246 
  247 ksiginfo_t *
  248 ksiginfo_alloc(int wait)
  249 {
  250         int flags;
  251 
  252         flags = M_ZERO;
  253         if (! wait)
  254                 flags |= M_NOWAIT;
  255         if (ksiginfo_zone != NULL)
  256                 return ((ksiginfo_t *)uma_zalloc(ksiginfo_zone, flags));
  257         return (NULL);
  258 }
  259 
  260 void
  261 ksiginfo_free(ksiginfo_t *ksi)
  262 {
  263         uma_zfree(ksiginfo_zone, ksi);
  264 }
  265 
  266 static __inline int
  267 ksiginfo_tryfree(ksiginfo_t *ksi)
  268 {
  269         if (!(ksi->ksi_flags & KSI_EXT)) {
  270                 uma_zfree(ksiginfo_zone, ksi);
  271                 return (1);
  272         }
  273         return (0);
  274 }
  275 
  276 void
  277 sigqueue_init(sigqueue_t *list, struct proc *p)
  278 {
  279         SIGEMPTYSET(list->sq_signals);
  280         SIGEMPTYSET(list->sq_kill);
  281         SIGEMPTYSET(list->sq_ptrace);
  282         TAILQ_INIT(&list->sq_list);
  283         list->sq_proc = p;
  284         list->sq_flags = SQ_INIT;
  285 }
  286 
  287 /*
  288  * Get a signal's ksiginfo.
  289  * Return:
  290  *      0       -       signal not found
  291  *      others  -       signal number
  292  */
  293 static int
  294 sigqueue_get(sigqueue_t *sq, int signo, ksiginfo_t *si)
  295 {
  296         struct proc *p = sq->sq_proc;
  297         struct ksiginfo *ksi, *next;
  298         int count = 0;
  299 
  300         KASSERT(sq->sq_flags & SQ_INIT, ("sigqueue not inited"));
  301 
  302         if (!SIGISMEMBER(sq->sq_signals, signo))
  303                 return (0);
  304 
  305         if (SIGISMEMBER(sq->sq_ptrace, signo)) {
  306                 count++;
  307                 SIGDELSET(sq->sq_ptrace, signo);
  308                 si->ksi_flags |= KSI_PTRACE;
  309         }
  310         if (SIGISMEMBER(sq->sq_kill, signo)) {
  311                 count++;
  312                 if (count == 1)
  313                         SIGDELSET(sq->sq_kill, signo);
  314         }
  315 
  316         TAILQ_FOREACH_SAFE(ksi, &sq->sq_list, ksi_link, next) {
  317                 if (ksi->ksi_signo == signo) {
  318                         if (count == 0) {
  319                                 TAILQ_REMOVE(&sq->sq_list, ksi, ksi_link);
  320                                 ksi->ksi_sigq = NULL;
  321                                 ksiginfo_copy(ksi, si);
  322                                 if (ksiginfo_tryfree(ksi) && p != NULL)
  323                                         p->p_pendingcnt--;
  324                         }
  325                         if (++count > 1)
  326                                 break;
  327                 }
  328         }
  329 
  330         if (count <= 1)
  331                 SIGDELSET(sq->sq_signals, signo);
  332         si->ksi_signo = signo;
  333         return (signo);
  334 }
  335 
  336 void
  337 sigqueue_take(ksiginfo_t *ksi)
  338 {
  339         struct ksiginfo *kp;
  340         struct proc     *p;
  341         sigqueue_t      *sq;
  342 
  343         if (ksi == NULL || (sq = ksi->ksi_sigq) == NULL)
  344                 return;
  345 
  346         p = sq->sq_proc;
  347         TAILQ_REMOVE(&sq->sq_list, ksi, ksi_link);
  348         ksi->ksi_sigq = NULL;
  349         if (!(ksi->ksi_flags & KSI_EXT) && p != NULL)
  350                 p->p_pendingcnt--;
  351 
  352         for (kp = TAILQ_FIRST(&sq->sq_list); kp != NULL;
  353              kp = TAILQ_NEXT(kp, ksi_link)) {
  354                 if (kp->ksi_signo == ksi->ksi_signo)
  355                         break;
  356         }
  357         if (kp == NULL && !SIGISMEMBER(sq->sq_kill, ksi->ksi_signo) &&
  358             !SIGISMEMBER(sq->sq_ptrace, ksi->ksi_signo))
  359                 SIGDELSET(sq->sq_signals, ksi->ksi_signo);
  360 }
  361 
  362 static int
  363 sigqueue_add(sigqueue_t *sq, int signo, ksiginfo_t *si)
  364 {
  365         struct proc *p = sq->sq_proc;
  366         struct ksiginfo *ksi;
  367         int ret = 0;
  368 
  369         KASSERT(sq->sq_flags & SQ_INIT, ("sigqueue not inited"));
  370 
  371         /*
  372          * SIGKILL/SIGSTOP cannot be caught or masked, so take the fast path
  373          * for these signals.
  374          */
  375         if (signo == SIGKILL || signo == SIGSTOP || si == NULL) {
  376                 SIGADDSET(sq->sq_kill, signo);
  377                 goto out_set_bit;
  378         }
  379 
  380         /* directly insert the ksi, don't copy it */
  381         if (si->ksi_flags & KSI_INS) {
  382                 if (si->ksi_flags & KSI_HEAD)
  383                         TAILQ_INSERT_HEAD(&sq->sq_list, si, ksi_link);
  384                 else
  385                         TAILQ_INSERT_TAIL(&sq->sq_list, si, ksi_link);
  386                 si->ksi_sigq = sq;
  387                 goto out_set_bit;
  388         }
  389 
  390         if (__predict_false(ksiginfo_zone == NULL)) {
  391                 SIGADDSET(sq->sq_kill, signo);
  392                 goto out_set_bit;
  393         }
  394 
  395         if (p != NULL && p->p_pendingcnt >= max_pending_per_proc) {
  396                 signal_overflow++;
  397                 ret = EAGAIN;
  398         } else if ((ksi = ksiginfo_alloc(0)) == NULL) {
  399                 signal_alloc_fail++;
  400                 ret = EAGAIN;
  401         } else {
  402                 if (p != NULL)
  403                         p->p_pendingcnt++;
  404                 ksiginfo_copy(si, ksi);
  405                 ksi->ksi_signo = signo;
  406                 if (si->ksi_flags & KSI_HEAD)
  407                         TAILQ_INSERT_HEAD(&sq->sq_list, ksi, ksi_link);
  408                 else
  409                         TAILQ_INSERT_TAIL(&sq->sq_list, ksi, ksi_link);
  410                 ksi->ksi_sigq = sq;
  411         }
  412 
  413         if (ret != 0) {
  414                 if ((si->ksi_flags & KSI_PTRACE) != 0) {
  415                         SIGADDSET(sq->sq_ptrace, signo);
  416                         ret = 0;
  417                         goto out_set_bit;
  418                 } else if ((si->ksi_flags & KSI_TRAP) != 0 ||
  419                     (si->ksi_flags & KSI_SIGQ) == 0) {
  420                         SIGADDSET(sq->sq_kill, signo);
  421                         ret = 0;
  422                         goto out_set_bit;
  423                 }
  424                 return (ret);
  425         }
  426 
  427 out_set_bit:
  428         SIGADDSET(sq->sq_signals, signo);
  429         return (ret);
  430 }
  431 
  432 void
  433 sigqueue_flush(sigqueue_t *sq)
  434 {
  435         struct proc *p = sq->sq_proc;
  436         ksiginfo_t *ksi;
  437 
  438         KASSERT(sq->sq_flags & SQ_INIT, ("sigqueue not inited"));
  439 
  440         if (p != NULL)
  441                 PROC_LOCK_ASSERT(p, MA_OWNED);
  442 
  443         while ((ksi = TAILQ_FIRST(&sq->sq_list)) != NULL) {
  444                 TAILQ_REMOVE(&sq->sq_list, ksi, ksi_link);
  445                 ksi->ksi_sigq = NULL;
  446                 if (ksiginfo_tryfree(ksi) && p != NULL)
  447                         p->p_pendingcnt--;
  448         }
  449 
  450         SIGEMPTYSET(sq->sq_signals);
  451         SIGEMPTYSET(sq->sq_kill);
  452         SIGEMPTYSET(sq->sq_ptrace);
  453 }
  454 
  455 static void
  456 sigqueue_move_set(sigqueue_t *src, sigqueue_t *dst, const sigset_t *set)
  457 {
  458         sigset_t tmp;
  459         struct proc *p1, *p2;
  460         ksiginfo_t *ksi, *next;
  461 
  462         KASSERT(src->sq_flags & SQ_INIT, ("src sigqueue not inited"));
  463         KASSERT(dst->sq_flags & SQ_INIT, ("dst sigqueue not inited"));
  464         p1 = src->sq_proc;
  465         p2 = dst->sq_proc;
  466         /* Move siginfo to target list */
  467         TAILQ_FOREACH_SAFE(ksi, &src->sq_list, ksi_link, next) {
  468                 if (SIGISMEMBER(*set, ksi->ksi_signo)) {
  469                         TAILQ_REMOVE(&src->sq_list, ksi, ksi_link);
  470                         if (p1 != NULL)
  471                                 p1->p_pendingcnt--;
  472                         TAILQ_INSERT_TAIL(&dst->sq_list, ksi, ksi_link);
  473                         ksi->ksi_sigq = dst;
  474                         if (p2 != NULL)
  475                                 p2->p_pendingcnt++;
  476                 }
  477         }
  478 
  479         /* Move pending bits to target list */
  480         tmp = src->sq_kill;
  481         SIGSETAND(tmp, *set);
  482         SIGSETOR(dst->sq_kill, tmp);
  483         SIGSETNAND(src->sq_kill, tmp);
  484 
  485         tmp = src->sq_ptrace;
  486         SIGSETAND(tmp, *set);
  487         SIGSETOR(dst->sq_ptrace, tmp);
  488         SIGSETNAND(src->sq_ptrace, tmp);
  489 
  490         tmp = src->sq_signals;
  491         SIGSETAND(tmp, *set);
  492         SIGSETOR(dst->sq_signals, tmp);
  493         SIGSETNAND(src->sq_signals, tmp);
  494 }
  495 
  496 #if 0
  497 static void
  498 sigqueue_move(sigqueue_t *src, sigqueue_t *dst, int signo)
  499 {
  500         sigset_t set;
  501 
  502         SIGEMPTYSET(set);
  503         SIGADDSET(set, signo);
  504         sigqueue_move_set(src, dst, &set);
  505 }
  506 #endif
  507 
  508 static void
  509 sigqueue_delete_set(sigqueue_t *sq, const sigset_t *set)
  510 {
  511         struct proc *p = sq->sq_proc;
  512         ksiginfo_t *ksi, *next;
  513 
  514         KASSERT(sq->sq_flags & SQ_INIT, ("src sigqueue not inited"));
  515 
  516         /* Remove siginfo queue */
  517         TAILQ_FOREACH_SAFE(ksi, &sq->sq_list, ksi_link, next) {
  518                 if (SIGISMEMBER(*set, ksi->ksi_signo)) {
  519                         TAILQ_REMOVE(&sq->sq_list, ksi, ksi_link);
  520                         ksi->ksi_sigq = NULL;
  521                         if (ksiginfo_tryfree(ksi) && p != NULL)
  522                                 p->p_pendingcnt--;
  523                 }
  524         }
  525         SIGSETNAND(sq->sq_kill, *set);
  526         SIGSETNAND(sq->sq_ptrace, *set);
  527         SIGSETNAND(sq->sq_signals, *set);
  528 }
  529 
  530 void
  531 sigqueue_delete(sigqueue_t *sq, int signo)
  532 {
  533         sigset_t set;
  534 
  535         SIGEMPTYSET(set);
  536         SIGADDSET(set, signo);
  537         sigqueue_delete_set(sq, &set);
  538 }
  539 
  540 /* Remove a set of signals for a process */
  541 static void
  542 sigqueue_delete_set_proc(struct proc *p, const sigset_t *set)
  543 {
  544         sigqueue_t worklist;
  545         struct thread *td0;
  546 
  547         PROC_LOCK_ASSERT(p, MA_OWNED);
  548 
  549         sigqueue_init(&worklist, NULL);
  550         sigqueue_move_set(&p->p_sigqueue, &worklist, set);
  551 
  552         FOREACH_THREAD_IN_PROC(p, td0)
  553                 sigqueue_move_set(&td0->td_sigqueue, &worklist, set);
  554 
  555         sigqueue_flush(&worklist);
  556 }
  557 
  558 void
  559 sigqueue_delete_proc(struct proc *p, int signo)
  560 {
  561         sigset_t set;
  562 
  563         SIGEMPTYSET(set);
  564         SIGADDSET(set, signo);
  565         sigqueue_delete_set_proc(p, &set);
  566 }
  567 
  568 static void
  569 sigqueue_delete_stopmask_proc(struct proc *p)
  570 {
  571         sigset_t set;
  572 
  573         SIGEMPTYSET(set);
  574         SIGADDSET(set, SIGSTOP);
  575         SIGADDSET(set, SIGTSTP);
  576         SIGADDSET(set, SIGTTIN);
  577         SIGADDSET(set, SIGTTOU);
  578         sigqueue_delete_set_proc(p, &set);
  579 }
  580 
  581 /*
  582  * Determine signal that should be delivered to thread td, the current
  583  * thread, 0 if none.  If there is a pending stop signal with default
  584  * action, the process stops in issignal().
  585  */
  586 int
  587 cursig(struct thread *td)
  588 {
  589         PROC_LOCK_ASSERT(td->td_proc, MA_OWNED);
  590         mtx_assert(&td->td_proc->p_sigacts->ps_mtx, MA_OWNED);
  591         THREAD_LOCK_ASSERT(td, MA_NOTOWNED);
  592         return (SIGPENDING(td) ? issignal(td) : 0);
  593 }
  594 
  595 /*
  596  * Arrange for ast() to handle unmasked pending signals on return to user
  597  * mode.  This must be called whenever a signal is added to td_sigqueue or
  598  * unmasked in td_sigmask.
  599  */
  600 void
  601 signotify(struct thread *td)
  602 {
  603         struct proc *p;
  604 
  605         p = td->td_proc;
  606 
  607         PROC_LOCK_ASSERT(p, MA_OWNED);
  608 
  609         if (SIGPENDING(td)) {
  610                 thread_lock(td);
  611                 td->td_flags |= TDF_NEEDSIGCHK | TDF_ASTPENDING;
  612                 thread_unlock(td);
  613         }
  614 }
  615 
  616 int
  617 sigonstack(size_t sp)
  618 {
  619         struct thread *td = curthread;
  620 
  621         return ((td->td_pflags & TDP_ALTSTACK) ?
  622 #if defined(COMPAT_43)
  623             ((td->td_sigstk.ss_size == 0) ?
  624                 (td->td_sigstk.ss_flags & SS_ONSTACK) :
  625                 ((sp - (size_t)td->td_sigstk.ss_sp) < td->td_sigstk.ss_size))
  626 #else
  627             ((sp - (size_t)td->td_sigstk.ss_sp) < td->td_sigstk.ss_size)
  628 #endif
  629             : 0);
  630 }
  631 
  632 static __inline int
  633 sigprop(int sig)
  634 {
  635 
  636         if (sig > 0 && sig < NSIG)
  637                 return (sigproptbl[_SIG_IDX(sig)]);
  638         return (0);
  639 }
  640 
  641 int
  642 sig_ffs(sigset_t *set)
  643 {
  644         int i;
  645 
  646         for (i = 0; i < _SIG_WORDS; i++)
  647                 if (set->__bits[i])
  648                         return (ffs(set->__bits[i]) + (i * 32));
  649         return (0);
  650 }
  651 
  652 static bool
  653 sigact_flag_test(const struct sigaction *act, int flag)
  654 {
  655 
  656         /*
  657          * SA_SIGINFO is reset when signal disposition is set to
  658          * ignore or default.  Other flags are kept according to user
  659          * settings.
  660          */
  661         return ((act->sa_flags & flag) != 0 && (flag != SA_SIGINFO ||
  662             ((__sighandler_t *)act->sa_sigaction != SIG_IGN &&
  663             (__sighandler_t *)act->sa_sigaction != SIG_DFL)));
  664 }
  665 
  666 /*
  667  * kern_sigaction
  668  * sigaction
  669  * freebsd4_sigaction
  670  * osigaction
  671  */
  672 int
  673 kern_sigaction(struct thread *td, int sig, const struct sigaction *act,
  674     struct sigaction *oact, int flags)
  675 {
  676         struct sigacts *ps;
  677         struct proc *p = td->td_proc;
  678 
  679         if (!_SIG_VALID(sig))
  680                 return (EINVAL);
  681         if (act != NULL && act->sa_handler != SIG_DFL &&
  682             act->sa_handler != SIG_IGN && (act->sa_flags & ~(SA_ONSTACK |
  683             SA_RESTART | SA_RESETHAND | SA_NOCLDSTOP | SA_NODEFER |
  684             SA_NOCLDWAIT | SA_SIGINFO)) != 0)
  685                 return (EINVAL);
  686 
  687         PROC_LOCK(p);
  688         ps = p->p_sigacts;
  689         mtx_lock(&ps->ps_mtx);
  690         if (oact) {
  691                 oact->sa_mask = ps->ps_catchmask[_SIG_IDX(sig)];
  692                 oact->sa_flags = 0;
  693                 if (SIGISMEMBER(ps->ps_sigonstack, sig))
  694                         oact->sa_flags |= SA_ONSTACK;
  695                 if (!SIGISMEMBER(ps->ps_sigintr, sig))
  696                         oact->sa_flags |= SA_RESTART;
  697                 if (SIGISMEMBER(ps->ps_sigreset, sig))
  698                         oact->sa_flags |= SA_RESETHAND;
  699                 if (SIGISMEMBER(ps->ps_signodefer, sig))
  700                         oact->sa_flags |= SA_NODEFER;
  701                 if (SIGISMEMBER(ps->ps_siginfo, sig)) {
  702                         oact->sa_flags |= SA_SIGINFO;
  703                         oact->sa_sigaction =
  704                             (__siginfohandler_t *)ps->ps_sigact[_SIG_IDX(sig)];
  705                 } else
  706                         oact->sa_handler = ps->ps_sigact[_SIG_IDX(sig)];
  707                 if (sig == SIGCHLD && ps->ps_flag & PS_NOCLDSTOP)
  708                         oact->sa_flags |= SA_NOCLDSTOP;
  709                 if (sig == SIGCHLD && ps->ps_flag & PS_NOCLDWAIT)
  710                         oact->sa_flags |= SA_NOCLDWAIT;
  711         }
  712         if (act) {
  713                 if ((sig == SIGKILL || sig == SIGSTOP) &&
  714                     act->sa_handler != SIG_DFL) {
  715                         mtx_unlock(&ps->ps_mtx);
  716                         PROC_UNLOCK(p);
  717                         return (EINVAL);
  718                 }
  719 
  720                 /*
  721                  * Change setting atomically.
  722                  */
  723 
  724                 ps->ps_catchmask[_SIG_IDX(sig)] = act->sa_mask;
  725                 SIG_CANTMASK(ps->ps_catchmask[_SIG_IDX(sig)]);
  726                 if (sigact_flag_test(act, SA_SIGINFO)) {
  727                         ps->ps_sigact[_SIG_IDX(sig)] =
  728                             (__sighandler_t *)act->sa_sigaction;
  729                         SIGADDSET(ps->ps_siginfo, sig);
  730                 } else {
  731                         ps->ps_sigact[_SIG_IDX(sig)] = act->sa_handler;
  732                         SIGDELSET(ps->ps_siginfo, sig);
  733                 }
  734                 if (!sigact_flag_test(act, SA_RESTART))
  735                         SIGADDSET(ps->ps_sigintr, sig);
  736                 else
  737                         SIGDELSET(ps->ps_sigintr, sig);
  738                 if (sigact_flag_test(act, SA_ONSTACK))
  739                         SIGADDSET(ps->ps_sigonstack, sig);
  740                 else
  741                         SIGDELSET(ps->ps_sigonstack, sig);
  742                 if (sigact_flag_test(act, SA_RESETHAND))
  743                         SIGADDSET(ps->ps_sigreset, sig);
  744                 else
  745                         SIGDELSET(ps->ps_sigreset, sig);
  746                 if (sigact_flag_test(act, SA_NODEFER))
  747                         SIGADDSET(ps->ps_signodefer, sig);
  748                 else
  749                         SIGDELSET(ps->ps_signodefer, sig);
  750                 if (sig == SIGCHLD) {
  751                         if (act->sa_flags & SA_NOCLDSTOP)
  752                                 ps->ps_flag |= PS_NOCLDSTOP;
  753                         else
  754                                 ps->ps_flag &= ~PS_NOCLDSTOP;
  755                         if (act->sa_flags & SA_NOCLDWAIT) {
  756                                 /*
  757                                  * Paranoia: since SA_NOCLDWAIT is implemented
  758                                  * by reparenting the dying child to PID 1 (and
  759                                  * trust it to reap the zombie), PID 1 itself
  760                                  * is forbidden to set SA_NOCLDWAIT.
  761                                  */
  762                                 if (p->p_pid == 1)
  763                                         ps->ps_flag &= ~PS_NOCLDWAIT;
  764                                 else
  765                                         ps->ps_flag |= PS_NOCLDWAIT;
  766                         } else
  767                                 ps->ps_flag &= ~PS_NOCLDWAIT;
  768                         if (ps->ps_sigact[_SIG_IDX(SIGCHLD)] == SIG_IGN)
  769                                 ps->ps_flag |= PS_CLDSIGIGN;
  770                         else
  771                                 ps->ps_flag &= ~PS_CLDSIGIGN;
  772                 }
  773                 /*
  774                  * Set bit in ps_sigignore for signals that are set to SIG_IGN,
  775                  * and for signals set to SIG_DFL where the default is to
  776                  * ignore. However, don't put SIGCONT in ps_sigignore, as we
  777                  * have to restart the process.
  778                  */
  779                 if (ps->ps_sigact[_SIG_IDX(sig)] == SIG_IGN ||
  780                     (sigprop(sig) & SA_IGNORE &&
  781                      ps->ps_sigact[_SIG_IDX(sig)] == SIG_DFL)) {
  782                         /* never to be seen again */
  783                         sigqueue_delete_proc(p, sig);
  784                         if (sig != SIGCONT)
  785                                 /* easier in psignal */
  786                                 SIGADDSET(ps->ps_sigignore, sig);
  787                         SIGDELSET(ps->ps_sigcatch, sig);
  788                 } else {
  789                         SIGDELSET(ps->ps_sigignore, sig);
  790                         if (ps->ps_sigact[_SIG_IDX(sig)] == SIG_DFL)
  791                                 SIGDELSET(ps->ps_sigcatch, sig);
  792                         else
  793                                 SIGADDSET(ps->ps_sigcatch, sig);
  794                 }
  795 #ifdef COMPAT_FREEBSD4
  796                 if (ps->ps_sigact[_SIG_IDX(sig)] == SIG_IGN ||
  797                     ps->ps_sigact[_SIG_IDX(sig)] == SIG_DFL ||
  798                     (flags & KSA_FREEBSD4) == 0)
  799                         SIGDELSET(ps->ps_freebsd4, sig);
  800                 else
  801                         SIGADDSET(ps->ps_freebsd4, sig);
  802 #endif
  803 #ifdef COMPAT_43
  804                 if (ps->ps_sigact[_SIG_IDX(sig)] == SIG_IGN ||
  805                     ps->ps_sigact[_SIG_IDX(sig)] == SIG_DFL ||
  806                     (flags & KSA_OSIGSET) == 0)
  807                         SIGDELSET(ps->ps_osigset, sig);
  808                 else
  809                         SIGADDSET(ps->ps_osigset, sig);
  810 #endif
  811         }
  812         mtx_unlock(&ps->ps_mtx);
  813         PROC_UNLOCK(p);
  814         return (0);
  815 }
  816 
  817 #ifndef _SYS_SYSPROTO_H_
  818 struct sigaction_args {
  819         int     sig;
  820         struct  sigaction *act;
  821         struct  sigaction *oact;
  822 };
  823 #endif
  824 int
  825 sys_sigaction(td, uap)
  826         struct thread *td;
  827         register struct sigaction_args *uap;
  828 {
  829         struct sigaction act, oact;
  830         register struct sigaction *actp, *oactp;
  831         int error;
  832 
  833         actp = (uap->act != NULL) ? &act : NULL;
  834         oactp = (uap->oact != NULL) ? &oact : NULL;
  835         if (actp) {
  836                 error = copyin(uap->act, actp, sizeof(act));
  837                 if (error)
  838                         return (error);
  839         }
  840         error = kern_sigaction(td, uap->sig, actp, oactp, 0);
  841         if (oactp && !error)
  842                 error = copyout(oactp, uap->oact, sizeof(oact));
  843         return (error);
  844 }
  845 
  846 #ifdef COMPAT_FREEBSD4
  847 #ifndef _SYS_SYSPROTO_H_
  848 struct freebsd4_sigaction_args {
  849         int     sig;
  850         struct  sigaction *act;
  851         struct  sigaction *oact;
  852 };
  853 #endif
  854 int
  855 freebsd4_sigaction(td, uap)
  856         struct thread *td;
  857         register struct freebsd4_sigaction_args *uap;
  858 {
  859         struct sigaction act, oact;
  860         register struct sigaction *actp, *oactp;
  861         int error;
  862 
  863 
  864         actp = (uap->act != NULL) ? &act : NULL;
  865         oactp = (uap->oact != NULL) ? &oact : NULL;
  866         if (actp) {
  867                 error = copyin(uap->act, actp, sizeof(act));
  868                 if (error)
  869                         return (error);
  870         }
  871         error = kern_sigaction(td, uap->sig, actp, oactp, KSA_FREEBSD4);
  872         if (oactp && !error)
  873                 error = copyout(oactp, uap->oact, sizeof(oact));
  874         return (error);
  875 }
  876 #endif  /* COMAPT_FREEBSD4 */
  877 
  878 #ifdef COMPAT_43        /* XXX - COMPAT_FBSD3 */
  879 #ifndef _SYS_SYSPROTO_H_
  880 struct osigaction_args {
  881         int     signum;
  882         struct  osigaction *nsa;
  883         struct  osigaction *osa;
  884 };
  885 #endif
  886 int
  887 osigaction(td, uap)
  888         struct thread *td;
  889         register struct osigaction_args *uap;
  890 {
  891         struct osigaction sa;
  892         struct sigaction nsa, osa;
  893         register struct sigaction *nsap, *osap;
  894         int error;
  895 
  896         if (uap->signum <= 0 || uap->signum >= ONSIG)
  897                 return (EINVAL);
  898 
  899         nsap = (uap->nsa != NULL) ? &nsa : NULL;
  900         osap = (uap->osa != NULL) ? &osa : NULL;
  901 
  902         if (nsap) {
  903                 error = copyin(uap->nsa, &sa, sizeof(sa));
  904                 if (error)
  905                         return (error);
  906                 nsap->sa_handler = sa.sa_handler;
  907                 nsap->sa_flags = sa.sa_flags;
  908                 OSIG2SIG(sa.sa_mask, nsap->sa_mask);
  909         }
  910         error = kern_sigaction(td, uap->signum, nsap, osap, KSA_OSIGSET);
  911         if (osap && !error) {
  912                 sa.sa_handler = osap->sa_handler;
  913                 sa.sa_flags = osap->sa_flags;
  914                 SIG2OSIG(osap->sa_mask, sa.sa_mask);
  915                 error = copyout(&sa, uap->osa, sizeof(sa));
  916         }
  917         return (error);
  918 }
  919 
  920 #if !defined(__i386__)
  921 /* Avoid replicating the same stub everywhere */
  922 int
  923 osigreturn(td, uap)
  924         struct thread *td;
  925         struct osigreturn_args *uap;
  926 {
  927 
  928         return (nosys(td, (struct nosys_args *)uap));
  929 }
  930 #endif
  931 #endif /* COMPAT_43 */
  932 
  933 /*
  934  * Initialize signal state for process 0;
  935  * set to ignore signals that are ignored by default.
  936  */
  937 void
  938 siginit(p)
  939         struct proc *p;
  940 {
  941         register int i;
  942         struct sigacts *ps;
  943 
  944         PROC_LOCK(p);
  945         ps = p->p_sigacts;
  946         mtx_lock(&ps->ps_mtx);
  947         for (i = 1; i <= NSIG; i++) {
  948                 if (sigprop(i) & SA_IGNORE && i != SIGCONT) {
  949                         SIGADDSET(ps->ps_sigignore, i);
  950                 }
  951         }
  952         mtx_unlock(&ps->ps_mtx);
  953         PROC_UNLOCK(p);
  954 }
  955 
  956 /*
  957  * Reset specified signal to the default disposition.
  958  */
  959 static void
  960 sigdflt(struct sigacts *ps, int sig)
  961 {
  962 
  963         mtx_assert(&ps->ps_mtx, MA_OWNED);
  964         SIGDELSET(ps->ps_sigcatch, sig);
  965         if ((sigprop(sig) & SA_IGNORE) != 0 && sig != SIGCONT)
  966                 SIGADDSET(ps->ps_sigignore, sig);
  967         ps->ps_sigact[_SIG_IDX(sig)] = SIG_DFL;
  968         SIGDELSET(ps->ps_siginfo, sig);
  969 }
  970 
  971 /*
  972  * Reset signals for an exec of the specified process.
  973  */
  974 void
  975 execsigs(struct proc *p)
  976 {
  977         sigset_t osigignore;
  978         struct sigacts *ps;
  979         int sig;
  980         struct thread *td;
  981 
  982         /*
  983          * Reset caught signals.  Held signals remain held
  984          * through td_sigmask (unless they were caught,
  985          * and are now ignored by default).
  986          */
  987         PROC_LOCK_ASSERT(p, MA_OWNED);
  988         ps = p->p_sigacts;
  989         mtx_lock(&ps->ps_mtx);
  990         while (SIGNOTEMPTY(ps->ps_sigcatch)) {
  991                 sig = sig_ffs(&ps->ps_sigcatch);
  992                 sigdflt(ps, sig);
  993                 if ((sigprop(sig) & SA_IGNORE) != 0)
  994                         sigqueue_delete_proc(p, sig);
  995         }
  996 
  997         /*
  998          * As CloudABI processes cannot modify signal handlers, fully
  999          * reset all signals to their default behavior. Do ignore
 1000          * SIGPIPE, as it would otherwise be impossible to recover from
 1001          * writes to broken pipes and sockets.
 1002          */
 1003         if (SV_PROC_ABI(p) == SV_ABI_CLOUDABI) {
 1004                 osigignore = ps->ps_sigignore;
 1005                 while (SIGNOTEMPTY(osigignore)) {
 1006                         sig = sig_ffs(&osigignore);
 1007                         SIGDELSET(osigignore, sig);
 1008                         if (sig != SIGPIPE)
 1009                                 sigdflt(ps, sig);
 1010                 }
 1011                 SIGADDSET(ps->ps_sigignore, SIGPIPE);
 1012         }
 1013 
 1014         /*
 1015          * Reset stack state to the user stack.
 1016          * Clear set of signals caught on the signal stack.
 1017          */
 1018         td = curthread;
 1019         MPASS(td->td_proc == p);
 1020         td->td_sigstk.ss_flags = SS_DISABLE;
 1021         td->td_sigstk.ss_size = 0;
 1022         td->td_sigstk.ss_sp = 0;
 1023         td->td_pflags &= ~TDP_ALTSTACK;
 1024         /*
 1025          * Reset no zombies if child dies flag as Solaris does.
 1026          */
 1027         ps->ps_flag &= ~(PS_NOCLDWAIT | PS_CLDSIGIGN);
 1028         if (ps->ps_sigact[_SIG_IDX(SIGCHLD)] == SIG_IGN)
 1029                 ps->ps_sigact[_SIG_IDX(SIGCHLD)] = SIG_DFL;
 1030         mtx_unlock(&ps->ps_mtx);
 1031 }
 1032 
 1033 /*
 1034  * kern_sigprocmask()
 1035  *
 1036  *      Manipulate signal mask.
 1037  */
 1038 int
 1039 kern_sigprocmask(struct thread *td, int how, sigset_t *set, sigset_t *oset,
 1040     int flags)
 1041 {
 1042         sigset_t new_block, oset1;
 1043         struct proc *p;
 1044         int error;
 1045 
 1046         p = td->td_proc;
 1047         if ((flags & SIGPROCMASK_PROC_LOCKED) != 0)
 1048                 PROC_LOCK_ASSERT(p, MA_OWNED);
 1049         else
 1050                 PROC_LOCK(p);
 1051         mtx_assert(&p->p_sigacts->ps_mtx, (flags & SIGPROCMASK_PS_LOCKED) != 0
 1052             ? MA_OWNED : MA_NOTOWNED);
 1053         if (oset != NULL)
 1054                 *oset = td->td_sigmask;
 1055 
 1056         error = 0;
 1057         if (set != NULL) {
 1058                 switch (how) {
 1059                 case SIG_BLOCK:
 1060                         SIG_CANTMASK(*set);
 1061                         oset1 = td->td_sigmask;
 1062                         SIGSETOR(td->td_sigmask, *set);
 1063                         new_block = td->td_sigmask;
 1064                         SIGSETNAND(new_block, oset1);
 1065                         break;
 1066                 case SIG_UNBLOCK:
 1067                         SIGSETNAND(td->td_sigmask, *set);
 1068                         signotify(td);
 1069                         goto out;
 1070                 case SIG_SETMASK:
 1071                         SIG_CANTMASK(*set);
 1072                         oset1 = td->td_sigmask;
 1073                         if (flags & SIGPROCMASK_OLD)
 1074                                 SIGSETLO(td->td_sigmask, *set);
 1075                         else
 1076                                 td->td_sigmask = *set;
 1077                         new_block = td->td_sigmask;
 1078                         SIGSETNAND(new_block, oset1);
 1079                         signotify(td);
 1080                         break;
 1081                 default:
 1082                         error = EINVAL;
 1083                         goto out;
 1084                 }
 1085 
 1086                 /*
 1087                  * The new_block set contains signals that were not previously
 1088                  * blocked, but are blocked now.
 1089                  *
 1090                  * In case we block any signal that was not previously blocked
 1091                  * for td, and process has the signal pending, try to schedule
 1092                  * signal delivery to some thread that does not block the
 1093                  * signal, possibly waking it up.
 1094                  */
 1095                 if (p->p_numthreads != 1)
 1096                         reschedule_signals(p, new_block, flags);
 1097         }
 1098 
 1099 out:
 1100         if (!(flags & SIGPROCMASK_PROC_LOCKED))
 1101                 PROC_UNLOCK(p);
 1102         return (error);
 1103 }
 1104 
 1105 #ifndef _SYS_SYSPROTO_H_
 1106 struct sigprocmask_args {
 1107         int     how;
 1108         const sigset_t *set;
 1109         sigset_t *oset;
 1110 };
 1111 #endif
 1112 int
 1113 sys_sigprocmask(td, uap)
 1114         register struct thread *td;
 1115         struct sigprocmask_args *uap;
 1116 {
 1117         sigset_t set, oset;
 1118         sigset_t *setp, *osetp;
 1119         int error;
 1120 
 1121         setp = (uap->set != NULL) ? &set : NULL;
 1122         osetp = (uap->oset != NULL) ? &oset : NULL;
 1123         if (setp) {
 1124                 error = copyin(uap->set, setp, sizeof(set));
 1125                 if (error)
 1126                         return (error);
 1127         }
 1128         error = kern_sigprocmask(td, uap->how, setp, osetp, 0);
 1129         if (osetp && !error) {
 1130                 error = copyout(osetp, uap->oset, sizeof(oset));
 1131         }
 1132         return (error);
 1133 }
 1134 
 1135 #ifdef COMPAT_43        /* XXX - COMPAT_FBSD3 */
 1136 #ifndef _SYS_SYSPROTO_H_
 1137 struct osigprocmask_args {
 1138         int     how;
 1139         osigset_t mask;
 1140 };
 1141 #endif
 1142 int
 1143 osigprocmask(td, uap)
 1144         register struct thread *td;
 1145         struct osigprocmask_args *uap;
 1146 {
 1147         sigset_t set, oset;
 1148         int error;
 1149 
 1150         OSIG2SIG(uap->mask, set);
 1151         error = kern_sigprocmask(td, uap->how, &set, &oset, 1);
 1152         SIG2OSIG(oset, td->td_retval[0]);
 1153         return (error);
 1154 }
 1155 #endif /* COMPAT_43 */
 1156 
 1157 int
 1158 sys_sigwait(struct thread *td, struct sigwait_args *uap)
 1159 {
 1160         ksiginfo_t ksi;
 1161         sigset_t set;
 1162         int error;
 1163 
 1164         error = copyin(uap->set, &set, sizeof(set));
 1165         if (error) {
 1166                 td->td_retval[0] = error;
 1167                 return (0);
 1168         }
 1169 
 1170         error = kern_sigtimedwait(td, set, &ksi, NULL);
 1171         if (error) {
 1172                 if (error == EINTR && td->td_proc->p_osrel < P_OSREL_SIGWAIT)
 1173                         error = ERESTART;
 1174                 if (error == ERESTART)
 1175                         return (error);
 1176                 td->td_retval[0] = error;
 1177                 return (0);
 1178         }
 1179 
 1180         error = copyout(&ksi.ksi_signo, uap->sig, sizeof(ksi.ksi_signo));
 1181         td->td_retval[0] = error;
 1182         return (0);
 1183 }
 1184 
 1185 int
 1186 sys_sigtimedwait(struct thread *td, struct sigtimedwait_args *uap)
 1187 {
 1188         struct timespec ts;
 1189         struct timespec *timeout;
 1190         sigset_t set;
 1191         ksiginfo_t ksi;
 1192         int error;
 1193 
 1194         if (uap->timeout) {
 1195                 error = copyin(uap->timeout, &ts, sizeof(ts));
 1196                 if (error)
 1197                         return (error);
 1198 
 1199                 timeout = &ts;
 1200         } else
 1201                 timeout = NULL;
 1202 
 1203         error = copyin(uap->set, &set, sizeof(set));
 1204         if (error)
 1205                 return (error);
 1206 
 1207         error = kern_sigtimedwait(td, set, &ksi, timeout);
 1208         if (error)
 1209                 return (error);
 1210 
 1211         if (uap->info)
 1212                 error = copyout(&ksi.ksi_info, uap->info, sizeof(siginfo_t));
 1213 
 1214         if (error == 0)
 1215                 td->td_retval[0] = ksi.ksi_signo;
 1216         return (error);
 1217 }
 1218 
 1219 int
 1220 sys_sigwaitinfo(struct thread *td, struct sigwaitinfo_args *uap)
 1221 {
 1222         ksiginfo_t ksi;
 1223         sigset_t set;
 1224         int error;
 1225 
 1226         error = copyin(uap->set, &set, sizeof(set));
 1227         if (error)
 1228                 return (error);
 1229 
 1230         error = kern_sigtimedwait(td, set, &ksi, NULL);
 1231         if (error)
 1232                 return (error);
 1233 
 1234         if (uap->info)
 1235                 error = copyout(&ksi.ksi_info, uap->info, sizeof(siginfo_t));
 1236 
 1237         if (error == 0)
 1238                 td->td_retval[0] = ksi.ksi_signo;
 1239         return (error);
 1240 }
 1241 
 1242 int
 1243 kern_sigtimedwait(struct thread *td, sigset_t waitset, ksiginfo_t *ksi,
 1244         struct timespec *timeout)
 1245 {
 1246         struct sigacts *ps;
 1247         sigset_t saved_mask, new_block;
 1248         struct proc *p;
 1249         int error, sig, timo, timevalid = 0;
 1250         struct timespec rts, ets, ts;
 1251         struct timeval tv;
 1252 
 1253         p = td->td_proc;
 1254         error = 0;
 1255         ets.tv_sec = 0;
 1256         ets.tv_nsec = 0;
 1257 
 1258         if (timeout != NULL) {
 1259                 if (timeout->tv_nsec >= 0 && timeout->tv_nsec < 1000000000) {
 1260                         timevalid = 1;
 1261                         getnanouptime(&rts);
 1262                         ets = rts;
 1263                         timespecadd(&ets, timeout);
 1264                 }
 1265         }
 1266         ksiginfo_init(ksi);
 1267         /* Some signals can not be waited for. */
 1268         SIG_CANTMASK(waitset);
 1269         ps = p->p_sigacts;
 1270         PROC_LOCK(p);
 1271         saved_mask = td->td_sigmask;
 1272         SIGSETNAND(td->td_sigmask, waitset);
 1273         for (;;) {
 1274                 mtx_lock(&ps->ps_mtx);
 1275                 sig = cursig(td);
 1276                 mtx_unlock(&ps->ps_mtx);
 1277                 KASSERT(sig >= 0, ("sig %d", sig));
 1278                 if (sig != 0 && SIGISMEMBER(waitset, sig)) {
 1279                         if (sigqueue_get(&td->td_sigqueue, sig, ksi) != 0 ||
 1280                             sigqueue_get(&p->p_sigqueue, sig, ksi) != 0) {
 1281                                 error = 0;
 1282                                 break;
 1283                         }
 1284                 }
 1285 
 1286                 if (error != 0)
 1287                         break;
 1288 
 1289                 /*
 1290                  * POSIX says this must be checked after looking for pending
 1291                  * signals.
 1292                  */
 1293                 if (timeout != NULL) {
 1294                         if (!timevalid) {
 1295                                 error = EINVAL;
 1296                                 break;
 1297                         }
 1298                         getnanouptime(&rts);
 1299                         if (timespeccmp(&rts, &ets, >=)) {
 1300                                 error = EAGAIN;
 1301                                 break;
 1302                         }
 1303                         ts = ets;
 1304                         timespecsub(&ts, &rts);
 1305                         TIMESPEC_TO_TIMEVAL(&tv, &ts);
 1306                         timo = tvtohz(&tv);
 1307                 } else {
 1308                         timo = 0;
 1309                 }
 1310 
 1311                 error = msleep(ps, &p->p_mtx, PPAUSE|PCATCH, "sigwait", timo);
 1312 
 1313                 if (timeout != NULL) {
 1314                         if (error == ERESTART) {
 1315                                 /* Timeout can not be restarted. */
 1316                                 error = EINTR;
 1317                         } else if (error == EAGAIN) {
 1318                                 /* We will calculate timeout by ourself. */
 1319                                 error = 0;
 1320                         }
 1321                 }
 1322         }
 1323 
 1324         new_block = saved_mask;
 1325         SIGSETNAND(new_block, td->td_sigmask);
 1326         td->td_sigmask = saved_mask;
 1327         /*
 1328          * Fewer signals can be delivered to us, reschedule signal
 1329          * notification.
 1330          */
 1331         if (p->p_numthreads != 1)
 1332                 reschedule_signals(p, new_block, 0);
 1333 
 1334         if (error == 0) {
 1335                 SDT_PROBE2(proc, , , signal__clear, sig, ksi);
 1336 
 1337                 if (ksi->ksi_code == SI_TIMER)
 1338                         itimer_accept(p, ksi->ksi_timerid, ksi);
 1339 
 1340 #ifdef KTRACE
 1341                 if (KTRPOINT(td, KTR_PSIG)) {
 1342                         sig_t action;
 1343 
 1344                         mtx_lock(&ps->ps_mtx);
 1345                         action = ps->ps_sigact[_SIG_IDX(sig)];
 1346                         mtx_unlock(&ps->ps_mtx);
 1347                         ktrpsig(sig, action, &td->td_sigmask, ksi->ksi_code);
 1348                 }
 1349 #endif
 1350                 if (sig == SIGKILL)
 1351                         sigexit(td, sig);
 1352         }
 1353         PROC_UNLOCK(p);
 1354         return (error);
 1355 }
 1356 
 1357 #ifndef _SYS_SYSPROTO_H_
 1358 struct sigpending_args {
 1359         sigset_t        *set;
 1360 };
 1361 #endif
 1362 int
 1363 sys_sigpending(td, uap)
 1364         struct thread *td;
 1365         struct sigpending_args *uap;
 1366 {
 1367         struct proc *p = td->td_proc;
 1368         sigset_t pending;
 1369 
 1370         PROC_LOCK(p);
 1371         pending = p->p_sigqueue.sq_signals;
 1372         SIGSETOR(pending, td->td_sigqueue.sq_signals);
 1373         PROC_UNLOCK(p);
 1374         return (copyout(&pending, uap->set, sizeof(sigset_t)));
 1375 }
 1376 
 1377 #ifdef COMPAT_43        /* XXX - COMPAT_FBSD3 */
 1378 #ifndef _SYS_SYSPROTO_H_
 1379 struct osigpending_args {
 1380         int     dummy;
 1381 };
 1382 #endif
 1383 int
 1384 osigpending(td, uap)
 1385         struct thread *td;
 1386         struct osigpending_args *uap;
 1387 {
 1388         struct proc *p = td->td_proc;
 1389         sigset_t pending;
 1390 
 1391         PROC_LOCK(p);
 1392         pending = p->p_sigqueue.sq_signals;
 1393         SIGSETOR(pending, td->td_sigqueue.sq_signals);
 1394         PROC_UNLOCK(p);
 1395         SIG2OSIG(pending, td->td_retval[0]);
 1396         return (0);
 1397 }
 1398 #endif /* COMPAT_43 */
 1399 
 1400 #if defined(COMPAT_43)
 1401 /*
 1402  * Generalized interface signal handler, 4.3-compatible.
 1403  */
 1404 #ifndef _SYS_SYSPROTO_H_
 1405 struct osigvec_args {
 1406         int     signum;
 1407         struct  sigvec *nsv;
 1408         struct  sigvec *osv;
 1409 };
 1410 #endif
 1411 /* ARGSUSED */
 1412 int
 1413 osigvec(td, uap)
 1414         struct thread *td;
 1415         register struct osigvec_args *uap;
 1416 {
 1417         struct sigvec vec;
 1418         struct sigaction nsa, osa;
 1419         register struct sigaction *nsap, *osap;
 1420         int error;
 1421 
 1422         if (uap->signum <= 0 || uap->signum >= ONSIG)
 1423                 return (EINVAL);
 1424         nsap = (uap->nsv != NULL) ? &nsa : NULL;
 1425         osap = (uap->osv != NULL) ? &osa : NULL;
 1426         if (nsap) {
 1427                 error = copyin(uap->nsv, &vec, sizeof(vec));
 1428                 if (error)
 1429                         return (error);
 1430                 nsap->sa_handler = vec.sv_handler;
 1431                 OSIG2SIG(vec.sv_mask, nsap->sa_mask);
 1432                 nsap->sa_flags = vec.sv_flags;
 1433                 nsap->sa_flags ^= SA_RESTART;   /* opposite of SV_INTERRUPT */
 1434         }
 1435         error = kern_sigaction(td, uap->signum, nsap, osap, KSA_OSIGSET);
 1436         if (osap && !error) {
 1437                 vec.sv_handler = osap->sa_handler;
 1438                 SIG2OSIG(osap->sa_mask, vec.sv_mask);
 1439                 vec.sv_flags = osap->sa_flags;
 1440                 vec.sv_flags &= ~SA_NOCLDWAIT;
 1441                 vec.sv_flags ^= SA_RESTART;
 1442                 error = copyout(&vec, uap->osv, sizeof(vec));
 1443         }
 1444         return (error);
 1445 }
 1446 
 1447 #ifndef _SYS_SYSPROTO_H_
 1448 struct osigblock_args {
 1449         int     mask;
 1450 };
 1451 #endif
 1452 int
 1453 osigblock(td, uap)
 1454         register struct thread *td;
 1455         struct osigblock_args *uap;
 1456 {
 1457         sigset_t set, oset;
 1458 
 1459         OSIG2SIG(uap->mask, set);
 1460         kern_sigprocmask(td, SIG_BLOCK, &set, &oset, 0);
 1461         SIG2OSIG(oset, td->td_retval[0]);
 1462         return (0);
 1463 }
 1464 
 1465 #ifndef _SYS_SYSPROTO_H_
 1466 struct osigsetmask_args {
 1467         int     mask;
 1468 };
 1469 #endif
 1470 int
 1471 osigsetmask(td, uap)
 1472         struct thread *td;
 1473         struct osigsetmask_args *uap;
 1474 {
 1475         sigset_t set, oset;
 1476 
 1477         OSIG2SIG(uap->mask, set);
 1478         kern_sigprocmask(td, SIG_SETMASK, &set, &oset, 0);
 1479         SIG2OSIG(oset, td->td_retval[0]);
 1480         return (0);
 1481 }
 1482 #endif /* COMPAT_43 */
 1483 
 1484 /*
 1485  * Suspend calling thread until signal, providing mask to be set in the
 1486  * meantime.
 1487  */
 1488 #ifndef _SYS_SYSPROTO_H_
 1489 struct sigsuspend_args {
 1490         const sigset_t *sigmask;
 1491 };
 1492 #endif
 1493 /* ARGSUSED */
 1494 int
 1495 sys_sigsuspend(td, uap)
 1496         struct thread *td;
 1497         struct sigsuspend_args *uap;
 1498 {
 1499         sigset_t mask;
 1500         int error;
 1501 
 1502         error = copyin(uap->sigmask, &mask, sizeof(mask));
 1503         if (error)
 1504                 return (error);
 1505         return (kern_sigsuspend(td, mask));
 1506 }
 1507 
 1508 int
 1509 kern_sigsuspend(struct thread *td, sigset_t mask)
 1510 {
 1511         struct proc *p = td->td_proc;
 1512         int has_sig, sig;
 1513 
 1514         /*
 1515          * When returning from sigsuspend, we want
 1516          * the old mask to be restored after the
 1517          * signal handler has finished.  Thus, we
 1518          * save it here and mark the sigacts structure
 1519          * to indicate this.
 1520          */
 1521         PROC_LOCK(p);
 1522         kern_sigprocmask(td, SIG_SETMASK, &mask, &td->td_oldsigmask,
 1523             SIGPROCMASK_PROC_LOCKED);
 1524         td->td_pflags |= TDP_OLDMASK;
 1525 
 1526         /*
 1527          * Process signals now. Otherwise, we can get spurious wakeup
 1528          * due to signal entered process queue, but delivered to other
 1529          * thread. But sigsuspend should return only on signal
 1530          * delivery.
 1531          */
 1532         (p->p_sysent->sv_set_syscall_retval)(td, EINTR);
 1533         for (has_sig = 0; !has_sig;) {
 1534                 while (msleep(&p->p_sigacts, &p->p_mtx, PPAUSE|PCATCH, "pause",
 1535                         0) == 0)
 1536                         /* void */;
 1537                 thread_suspend_check(0);
 1538                 mtx_lock(&p->p_sigacts->ps_mtx);
 1539                 while ((sig = cursig(td)) != 0) {
 1540                         KASSERT(sig >= 0, ("sig %d", sig));
 1541                         has_sig += postsig(sig);
 1542                 }
 1543                 mtx_unlock(&p->p_sigacts->ps_mtx);
 1544         }
 1545         PROC_UNLOCK(p);
 1546         td->td_errno = EINTR;
 1547         td->td_pflags |= TDP_NERRNO;
 1548         return (EJUSTRETURN);
 1549 }
 1550 
 1551 #ifdef COMPAT_43        /* XXX - COMPAT_FBSD3 */
 1552 /*
 1553  * Compatibility sigsuspend call for old binaries.  Note nonstandard calling
 1554  * convention: libc stub passes mask, not pointer, to save a copyin.
 1555  */
 1556 #ifndef _SYS_SYSPROTO_H_
 1557 struct osigsuspend_args {
 1558         osigset_t mask;
 1559 };
 1560 #endif
 1561 /* ARGSUSED */
 1562 int
 1563 osigsuspend(td, uap)
 1564         struct thread *td;
 1565         struct osigsuspend_args *uap;
 1566 {
 1567         sigset_t mask;
 1568 
 1569         OSIG2SIG(uap->mask, mask);
 1570         return (kern_sigsuspend(td, mask));
 1571 }
 1572 #endif /* COMPAT_43 */
 1573 
 1574 #if defined(COMPAT_43)
 1575 #ifndef _SYS_SYSPROTO_H_
 1576 struct osigstack_args {
 1577         struct  sigstack *nss;
 1578         struct  sigstack *oss;
 1579 };
 1580 #endif
 1581 /* ARGSUSED */
 1582 int
 1583 osigstack(td, uap)
 1584         struct thread *td;
 1585         register struct osigstack_args *uap;
 1586 {
 1587         struct sigstack nss, oss;
 1588         int error = 0;
 1589 
 1590         if (uap->nss != NULL) {
 1591                 error = copyin(uap->nss, &nss, sizeof(nss));
 1592                 if (error)
 1593                         return (error);
 1594         }
 1595         oss.ss_sp = td->td_sigstk.ss_sp;
 1596         oss.ss_onstack = sigonstack(cpu_getstack(td));
 1597         if (uap->nss != NULL) {
 1598                 td->td_sigstk.ss_sp = nss.ss_sp;
 1599                 td->td_sigstk.ss_size = 0;
 1600                 td->td_sigstk.ss_flags |= nss.ss_onstack & SS_ONSTACK;
 1601                 td->td_pflags |= TDP_ALTSTACK;
 1602         }
 1603         if (uap->oss != NULL)
 1604                 error = copyout(&oss, uap->oss, sizeof(oss));
 1605 
 1606         return (error);
 1607 }
 1608 #endif /* COMPAT_43 */
 1609 
 1610 #ifndef _SYS_SYSPROTO_H_
 1611 struct sigaltstack_args {
 1612         stack_t *ss;
 1613         stack_t *oss;
 1614 };
 1615 #endif
 1616 /* ARGSUSED */
 1617 int
 1618 sys_sigaltstack(td, uap)
 1619         struct thread *td;
 1620         register struct sigaltstack_args *uap;
 1621 {
 1622         stack_t ss, oss;
 1623         int error;
 1624 
 1625         if (uap->ss != NULL) {
 1626                 error = copyin(uap->ss, &ss, sizeof(ss));
 1627                 if (error)
 1628                         return (error);
 1629         }
 1630         error = kern_sigaltstack(td, (uap->ss != NULL) ? &ss : NULL,
 1631             (uap->oss != NULL) ? &oss : NULL);
 1632         if (error)
 1633                 return (error);
 1634         if (uap->oss != NULL)
 1635                 error = copyout(&oss, uap->oss, sizeof(stack_t));
 1636         return (error);
 1637 }
 1638 
 1639 int
 1640 kern_sigaltstack(struct thread *td, stack_t *ss, stack_t *oss)
 1641 {
 1642         struct proc *p = td->td_proc;
 1643         int oonstack;
 1644 
 1645         oonstack = sigonstack(cpu_getstack(td));
 1646 
 1647         if (oss != NULL) {
 1648                 *oss = td->td_sigstk;
 1649                 oss->ss_flags = (td->td_pflags & TDP_ALTSTACK)
 1650                     ? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE;
 1651         }
 1652 
 1653         if (ss != NULL) {
 1654                 if (oonstack)
 1655                         return (EPERM);
 1656                 if ((ss->ss_flags & ~SS_DISABLE) != 0)
 1657                         return (EINVAL);
 1658                 if (!(ss->ss_flags & SS_DISABLE)) {
 1659                         if (ss->ss_size < p->p_sysent->sv_minsigstksz)
 1660                                 return (ENOMEM);
 1661 
 1662                         td->td_sigstk = *ss;
 1663                         td->td_pflags |= TDP_ALTSTACK;
 1664                 } else {
 1665                         td->td_pflags &= ~TDP_ALTSTACK;
 1666                 }
 1667         }
 1668         return (0);
 1669 }
 1670 
 1671 /*
 1672  * Common code for kill process group/broadcast kill.
 1673  * cp is calling process.
 1674  */
 1675 static int
 1676 killpg1(struct thread *td, int sig, int pgid, int all, ksiginfo_t *ksi)
 1677 {
 1678         struct proc *p;
 1679         struct pgrp *pgrp;
 1680         int err;
 1681         int ret;
 1682 
 1683         ret = ESRCH;
 1684         if (all) {
 1685                 /*
 1686                  * broadcast
 1687                  */
 1688                 sx_slock(&allproc_lock);
 1689                 FOREACH_PROC_IN_SYSTEM(p) {
 1690                         PROC_LOCK(p);
 1691                         if (p->p_pid <= 1 || p->p_flag & P_SYSTEM ||
 1692                             p == td->td_proc || p->p_state == PRS_NEW) {
 1693                                 PROC_UNLOCK(p);
 1694                                 continue;
 1695                         }
 1696                         err = p_cansignal(td, p, sig);
 1697                         if (err == 0) {
 1698                                 if (sig)
 1699                                         pksignal(p, sig, ksi);
 1700                                 ret = err;
 1701                         }
 1702                         else if (ret == ESRCH)
 1703                                 ret = err;
 1704                         PROC_UNLOCK(p);
 1705                 }
 1706                 sx_sunlock(&allproc_lock);
 1707         } else {
 1708                 sx_slock(&proctree_lock);
 1709                 if (pgid == 0) {
 1710                         /*
 1711                          * zero pgid means send to my process group.
 1712                          */
 1713                         pgrp = td->td_proc->p_pgrp;
 1714                         PGRP_LOCK(pgrp);
 1715                 } else {
 1716                         pgrp = pgfind(pgid);
 1717                         if (pgrp == NULL) {
 1718                                 sx_sunlock(&proctree_lock);
 1719                                 return (ESRCH);
 1720                         }
 1721                 }
 1722                 sx_sunlock(&proctree_lock);
 1723                 LIST_FOREACH(p, &pgrp->pg_members, p_pglist) {
 1724                         PROC_LOCK(p);
 1725                         if (p->p_pid <= 1 || p->p_flag & P_SYSTEM ||
 1726                             p->p_state == PRS_NEW) {
 1727                                 PROC_UNLOCK(p);
 1728                                 continue;
 1729                         }
 1730                         err = p_cansignal(td, p, sig);
 1731                         if (err == 0) {
 1732                                 if (sig)
 1733                                         pksignal(p, sig, ksi);
 1734                                 ret = err;
 1735                         }
 1736                         else if (ret == ESRCH)
 1737                                 ret = err;
 1738                         PROC_UNLOCK(p);
 1739                 }
 1740                 PGRP_UNLOCK(pgrp);
 1741         }
 1742         return (ret);
 1743 }
 1744 
 1745 #ifndef _SYS_SYSPROTO_H_
 1746 struct kill_args {
 1747         int     pid;
 1748         int     signum;
 1749 };
 1750 #endif
 1751 /* ARGSUSED */
 1752 int
 1753 sys_kill(struct thread *td, struct kill_args *uap)
 1754 {
 1755         ksiginfo_t ksi;
 1756         struct proc *p;
 1757         int error;
 1758 
 1759         /*
 1760          * A process in capability mode can send signals only to himself.
 1761          * The main rationale behind this is that abort(3) is implemented as
 1762          * kill(getpid(), SIGABRT).
 1763          */
 1764         if (IN_CAPABILITY_MODE(td) && uap->pid != td->td_proc->p_pid)
 1765                 return (ECAPMODE);
 1766 
 1767         AUDIT_ARG_SIGNUM(uap->signum);
 1768         AUDIT_ARG_PID(uap->pid);
 1769         if ((u_int)uap->signum > _SIG_MAXSIG)
 1770                 return (EINVAL);
 1771 
 1772         ksiginfo_init(&ksi);
 1773         ksi.ksi_signo = uap->signum;
 1774         ksi.ksi_code = SI_USER;
 1775         ksi.ksi_pid = td->td_proc->p_pid;
 1776         ksi.ksi_uid = td->td_ucred->cr_ruid;
 1777 
 1778         if (uap->pid > 0) {
 1779                 /* kill single process */
 1780                 if ((p = pfind(uap->pid)) == NULL) {
 1781                         if ((p = zpfind(uap->pid)) == NULL)
 1782                                 return (ESRCH);
 1783                 }
 1784                 AUDIT_ARG_PROCESS(p);
 1785                 error = p_cansignal(td, p, uap->signum);
 1786                 if (error == 0 && uap->signum)
 1787                         pksignal(p, uap->signum, &ksi);
 1788                 PROC_UNLOCK(p);
 1789                 return (error);
 1790         }
 1791         switch (uap->pid) {
 1792         case -1:                /* broadcast signal */
 1793                 return (killpg1(td, uap->signum, 0, 1, &ksi));
 1794         case 0:                 /* signal own process group */
 1795                 return (killpg1(td, uap->signum, 0, 0, &ksi));
 1796         default:                /* negative explicit process group */
 1797                 return (killpg1(td, uap->signum, -uap->pid, 0, &ksi));
 1798         }
 1799         /* NOTREACHED */
 1800 }
 1801 
 1802 int
 1803 sys_pdkill(td, uap)
 1804         struct thread *td;
 1805         struct pdkill_args *uap;
 1806 {
 1807         struct proc *p;
 1808         cap_rights_t rights;
 1809         int error;
 1810 
 1811         AUDIT_ARG_SIGNUM(uap->signum);
 1812         AUDIT_ARG_FD(uap->fd);
 1813         if ((u_int)uap->signum > _SIG_MAXSIG)
 1814                 return (EINVAL);
 1815 
 1816         error = procdesc_find(td, uap->fd,
 1817             cap_rights_init(&rights, CAP_PDKILL), &p);
 1818         if (error)
 1819                 return (error);
 1820         AUDIT_ARG_PROCESS(p);
 1821         error = p_cansignal(td, p, uap->signum);
 1822         if (error == 0 && uap->signum)
 1823                 kern_psignal(p, uap->signum);
 1824         PROC_UNLOCK(p);
 1825         return (error);
 1826 }
 1827 
 1828 #if defined(COMPAT_43)
 1829 #ifndef _SYS_SYSPROTO_H_
 1830 struct okillpg_args {
 1831         int     pgid;
 1832         int     signum;
 1833 };
 1834 #endif
 1835 /* ARGSUSED */
 1836 int
 1837 okillpg(struct thread *td, struct okillpg_args *uap)
 1838 {
 1839         ksiginfo_t ksi;
 1840 
 1841         AUDIT_ARG_SIGNUM(uap->signum);
 1842         AUDIT_ARG_PID(uap->pgid);
 1843         if ((u_int)uap->signum > _SIG_MAXSIG)
 1844                 return (EINVAL);
 1845 
 1846         ksiginfo_init(&ksi);
 1847         ksi.ksi_signo = uap->signum;
 1848         ksi.ksi_code = SI_USER;
 1849         ksi.ksi_pid = td->td_proc->p_pid;
 1850         ksi.ksi_uid = td->td_ucred->cr_ruid;
 1851         return (killpg1(td, uap->signum, uap->pgid, 0, &ksi));
 1852 }
 1853 #endif /* COMPAT_43 */
 1854 
 1855 #ifndef _SYS_SYSPROTO_H_
 1856 struct sigqueue_args {
 1857         pid_t pid;
 1858         int signum;
 1859         /* union sigval */ void *value;
 1860 };
 1861 #endif
 1862 int
 1863 sys_sigqueue(struct thread *td, struct sigqueue_args *uap)
 1864 {
 1865         union sigval sv;
 1866 
 1867         sv.sival_ptr = uap->value;
 1868 
 1869         return (kern_sigqueue(td, uap->pid, uap->signum, &sv));
 1870 }
 1871 
 1872 int
 1873 kern_sigqueue(struct thread *td, pid_t pid, int signum, union sigval *value)
 1874 {
 1875         ksiginfo_t ksi;
 1876         struct proc *p;
 1877         int error;
 1878 
 1879         if ((u_int)signum > _SIG_MAXSIG)
 1880                 return (EINVAL);
 1881 
 1882         /*
 1883          * Specification says sigqueue can only send signal to
 1884          * single process.
 1885          */
 1886         if (pid <= 0)
 1887                 return (EINVAL);
 1888 
 1889         if ((p = pfind(pid)) == NULL) {
 1890                 if ((p = zpfind(pid)) == NULL)
 1891                         return (ESRCH);
 1892         }
 1893         error = p_cansignal(td, p, signum);
 1894         if (error == 0 && signum != 0) {
 1895                 ksiginfo_init(&ksi);
 1896                 ksi.ksi_flags = KSI_SIGQ;
 1897                 ksi.ksi_signo = signum;
 1898                 ksi.ksi_code = SI_QUEUE;
 1899                 ksi.ksi_pid = td->td_proc->p_pid;
 1900                 ksi.ksi_uid = td->td_ucred->cr_ruid;
 1901                 ksi.ksi_value = *value;
 1902                 error = pksignal(p, ksi.ksi_signo, &ksi);
 1903         }
 1904         PROC_UNLOCK(p);
 1905         return (error);
 1906 }
 1907 
 1908 /*
 1909  * Send a signal to a process group.
 1910  */
 1911 void
 1912 gsignal(int pgid, int sig, ksiginfo_t *ksi)
 1913 {
 1914         struct pgrp *pgrp;
 1915 
 1916         if (pgid != 0) {
 1917                 sx_slock(&proctree_lock);
 1918                 pgrp = pgfind(pgid);
 1919                 sx_sunlock(&proctree_lock);
 1920                 if (pgrp != NULL) {
 1921                         pgsignal(pgrp, sig, 0, ksi);
 1922                         PGRP_UNLOCK(pgrp);
 1923                 }
 1924         }
 1925 }
 1926 
 1927 /*
 1928  * Send a signal to a process group.  If checktty is 1,
 1929  * limit to members which have a controlling terminal.
 1930  */
 1931 void
 1932 pgsignal(struct pgrp *pgrp, int sig, int checkctty, ksiginfo_t *ksi)
 1933 {
 1934         struct proc *p;
 1935 
 1936         if (pgrp) {
 1937                 PGRP_LOCK_ASSERT(pgrp, MA_OWNED);
 1938                 LIST_FOREACH(p, &pgrp->pg_members, p_pglist) {
 1939                         PROC_LOCK(p);
 1940                         if (p->p_state == PRS_NORMAL &&
 1941                             (checkctty == 0 || p->p_flag & P_CONTROLT))
 1942                                 pksignal(p, sig, ksi);
 1943                         PROC_UNLOCK(p);
 1944                 }
 1945         }
 1946 }
 1947 
 1948 
 1949 /*
 1950  * Recalculate the signal mask and reset the signal disposition after
 1951  * usermode frame for delivery is formed.  Should be called after
 1952  * mach-specific routine, because sysent->sv_sendsig() needs correct
 1953  * ps_siginfo and signal mask.
 1954  */
 1955 static void
 1956 postsig_done(int sig, struct thread *td, struct sigacts *ps)
 1957 {
 1958         sigset_t mask;
 1959 
 1960         mtx_assert(&ps->ps_mtx, MA_OWNED);
 1961         td->td_ru.ru_nsignals++;
 1962         mask = ps->ps_catchmask[_SIG_IDX(sig)];
 1963         if (!SIGISMEMBER(ps->ps_signodefer, sig))
 1964                 SIGADDSET(mask, sig);
 1965         kern_sigprocmask(td, SIG_BLOCK, &mask, NULL,
 1966             SIGPROCMASK_PROC_LOCKED | SIGPROCMASK_PS_LOCKED);
 1967         if (SIGISMEMBER(ps->ps_sigreset, sig))
 1968                 sigdflt(ps, sig);
 1969 }
 1970 
 1971 
 1972 /*
 1973  * Send a signal caused by a trap to the current thread.  If it will be
 1974  * caught immediately, deliver it with correct code.  Otherwise, post it
 1975  * normally.
 1976  */
 1977 void
 1978 trapsignal(struct thread *td, ksiginfo_t *ksi)
 1979 {
 1980         struct sigacts *ps;
 1981         struct proc *p;
 1982         int sig;
 1983         int code;
 1984 
 1985         p = td->td_proc;
 1986         sig = ksi->ksi_signo;
 1987         code = ksi->ksi_code;
 1988         KASSERT(_SIG_VALID(sig), ("invalid signal"));
 1989 
 1990         PROC_LOCK(p);
 1991         ps = p->p_sigacts;
 1992         mtx_lock(&ps->ps_mtx);
 1993         if ((p->p_flag & P_TRACED) == 0 && SIGISMEMBER(ps->ps_sigcatch, sig) &&
 1994             !SIGISMEMBER(td->td_sigmask, sig)) {
 1995 #ifdef KTRACE
 1996                 if (KTRPOINT(curthread, KTR_PSIG))
 1997                         ktrpsig(sig, ps->ps_sigact[_SIG_IDX(sig)],
 1998                             &td->td_sigmask, code);
 1999 #endif
 2000                 (*p->p_sysent->sv_sendsig)(ps->ps_sigact[_SIG_IDX(sig)],
 2001                                 ksi, &td->td_sigmask);
 2002                 postsig_done(sig, td, ps);
 2003                 mtx_unlock(&ps->ps_mtx);
 2004         } else {
 2005                 /*
 2006                  * Avoid a possible infinite loop if the thread
 2007                  * masking the signal or process is ignoring the
 2008                  * signal.
 2009                  */
 2010                 if (kern_forcesigexit &&
 2011                     (SIGISMEMBER(td->td_sigmask, sig) ||
 2012                      ps->ps_sigact[_SIG_IDX(sig)] == SIG_IGN)) {
 2013                         SIGDELSET(td->td_sigmask, sig);
 2014                         SIGDELSET(ps->ps_sigcatch, sig);
 2015                         SIGDELSET(ps->ps_sigignore, sig);
 2016                         ps->ps_sigact[_SIG_IDX(sig)] = SIG_DFL;
 2017                 }
 2018                 mtx_unlock(&ps->ps_mtx);
 2019                 p->p_code = code;       /* XXX for core dump/debugger */
 2020                 p->p_sig = sig;         /* XXX to verify code */
 2021                 tdsendsignal(p, td, sig, ksi);
 2022         }
 2023         PROC_UNLOCK(p);
 2024 }
 2025 
 2026 static struct thread *
 2027 sigtd(struct proc *p, int sig, int prop)
 2028 {
 2029         struct thread *td, *signal_td;
 2030 
 2031         PROC_LOCK_ASSERT(p, MA_OWNED);
 2032 
 2033         /*
 2034          * Check if current thread can handle the signal without
 2035          * switching context to another thread.
 2036          */
 2037         if (curproc == p && !SIGISMEMBER(curthread->td_sigmask, sig))
 2038                 return (curthread);
 2039         signal_td = NULL;
 2040         FOREACH_THREAD_IN_PROC(p, td) {
 2041                 if (!SIGISMEMBER(td->td_sigmask, sig)) {
 2042                         signal_td = td;
 2043                         break;
 2044                 }
 2045         }
 2046         if (signal_td == NULL)
 2047                 signal_td = FIRST_THREAD_IN_PROC(p);
 2048         return (signal_td);
 2049 }
 2050 
 2051 /*
 2052  * Send the signal to the process.  If the signal has an action, the action
 2053  * is usually performed by the target process rather than the caller; we add
 2054  * the signal to the set of pending signals for the process.
 2055  *
 2056  * Exceptions:
 2057  *   o When a stop signal is sent to a sleeping process that takes the
 2058  *     default action, the process is stopped without awakening it.
 2059  *   o SIGCONT restarts stopped processes (or puts them back to sleep)
 2060  *     regardless of the signal action (eg, blocked or ignored).
 2061  *
 2062  * Other ignored signals are discarded immediately.
 2063  *
 2064  * NB: This function may be entered from the debugger via the "kill" DDB
 2065  * command.  There is little that can be done to mitigate the possibly messy
 2066  * side effects of this unwise possibility.
 2067  */
 2068 void
 2069 kern_psignal(struct proc *p, int sig)
 2070 {
 2071         ksiginfo_t ksi;
 2072 
 2073         ksiginfo_init(&ksi);
 2074         ksi.ksi_signo = sig;
 2075         ksi.ksi_code = SI_KERNEL;
 2076         (void) tdsendsignal(p, NULL, sig, &ksi);
 2077 }
 2078 
 2079 int
 2080 pksignal(struct proc *p, int sig, ksiginfo_t *ksi)
 2081 {
 2082 
 2083         return (tdsendsignal(p, NULL, sig, ksi));
 2084 }
 2085 
 2086 /* Utility function for finding a thread to send signal event to. */
 2087 int
 2088 sigev_findtd(struct proc *p ,struct sigevent *sigev, struct thread **ttd)
 2089 {
 2090         struct thread *td;
 2091 
 2092         if (sigev->sigev_notify == SIGEV_THREAD_ID) {
 2093                 td = tdfind(sigev->sigev_notify_thread_id, p->p_pid);
 2094                 if (td == NULL)
 2095                         return (ESRCH);
 2096                 *ttd = td;
 2097         } else {
 2098                 *ttd = NULL;
 2099                 PROC_LOCK(p);
 2100         }
 2101         return (0);
 2102 }
 2103 
 2104 void
 2105 tdsignal(struct thread *td, int sig)
 2106 {
 2107         ksiginfo_t ksi;
 2108 
 2109         ksiginfo_init(&ksi);
 2110         ksi.ksi_signo = sig;
 2111         ksi.ksi_code = SI_KERNEL;
 2112         (void) tdsendsignal(td->td_proc, td, sig, &ksi);
 2113 }
 2114 
 2115 void
 2116 tdksignal(struct thread *td, int sig, ksiginfo_t *ksi)
 2117 {
 2118 
 2119         (void) tdsendsignal(td->td_proc, td, sig, ksi);
 2120 }
 2121 
 2122 int
 2123 tdsendsignal(struct proc *p, struct thread *td, int sig, ksiginfo_t *ksi)
 2124 {
 2125         sig_t action;
 2126         sigqueue_t *sigqueue;
 2127         int prop;
 2128         struct sigacts *ps;
 2129         int intrval;
 2130         int ret = 0;
 2131         int wakeup_swapper;
 2132 
 2133         MPASS(td == NULL || p == td->td_proc);
 2134         PROC_LOCK_ASSERT(p, MA_OWNED);
 2135 
 2136         if (!_SIG_VALID(sig))
 2137                 panic("%s(): invalid signal %d", __func__, sig);
 2138 
 2139         KASSERT(ksi == NULL || !KSI_ONQ(ksi), ("%s: ksi on queue", __func__));
 2140 
 2141         /*
 2142          * IEEE Std 1003.1-2001: return success when killing a zombie.
 2143          */
 2144         if (p->p_state == PRS_ZOMBIE) {
 2145                 if (ksi && (ksi->ksi_flags & KSI_INS))
 2146                         ksiginfo_tryfree(ksi);
 2147                 return (ret);
 2148         }
 2149 
 2150         ps = p->p_sigacts;
 2151         KNOTE_LOCKED(p->p_klist, NOTE_SIGNAL | sig);
 2152         prop = sigprop(sig);
 2153 
 2154         if (td == NULL) {
 2155                 td = sigtd(p, sig, prop);
 2156                 sigqueue = &p->p_sigqueue;
 2157         } else
 2158                 sigqueue = &td->td_sigqueue;
 2159 
 2160         SDT_PROBE3(proc, , , signal__send, td, p, sig);
 2161 
 2162         /*
 2163          * If the signal is being ignored,
 2164          * then we forget about it immediately.
 2165          * (Note: we don't set SIGCONT in ps_sigignore,
 2166          * and if it is set to SIG_IGN,
 2167          * action will be SIG_DFL here.)
 2168          */
 2169         mtx_lock(&ps->ps_mtx);
 2170         if (SIGISMEMBER(ps->ps_sigignore, sig)) {
 2171                 SDT_PROBE3(proc, , , signal__discard, td, p, sig);
 2172 
 2173                 mtx_unlock(&ps->ps_mtx);
 2174                 if (ksi && (ksi->ksi_flags & KSI_INS))
 2175                         ksiginfo_tryfree(ksi);
 2176                 return (ret);
 2177         }
 2178         if (SIGISMEMBER(td->td_sigmask, sig))
 2179                 action = SIG_HOLD;
 2180         else if (SIGISMEMBER(ps->ps_sigcatch, sig))
 2181                 action = SIG_CATCH;
 2182         else
 2183                 action = SIG_DFL;
 2184         if (SIGISMEMBER(ps->ps_sigintr, sig))
 2185                 intrval = EINTR;
 2186         else
 2187                 intrval = ERESTART;
 2188         mtx_unlock(&ps->ps_mtx);
 2189 
 2190         if (prop & SA_CONT)
 2191                 sigqueue_delete_stopmask_proc(p);
 2192         else if (prop & SA_STOP) {
 2193                 /*
 2194                  * If sending a tty stop signal to a member of an orphaned
 2195                  * process group, discard the signal here if the action
 2196                  * is default; don't stop the process below if sleeping,
 2197                  * and don't clear any pending SIGCONT.
 2198                  */
 2199                 if ((prop & SA_TTYSTOP) &&
 2200                     (p->p_pgrp->pg_jobc == 0) &&
 2201                     (action == SIG_DFL)) {
 2202                         if (ksi && (ksi->ksi_flags & KSI_INS))
 2203                                 ksiginfo_tryfree(ksi);
 2204                         return (ret);
 2205                 }
 2206                 sigqueue_delete_proc(p, SIGCONT);
 2207                 if (p->p_flag & P_CONTINUED) {
 2208                         p->p_flag &= ~P_CONTINUED;
 2209                         PROC_LOCK(p->p_pptr);
 2210                         sigqueue_take(p->p_ksi);
 2211                         PROC_UNLOCK(p->p_pptr);
 2212                 }
 2213         }
 2214 
 2215         ret = sigqueue_add(sigqueue, sig, ksi);
 2216         if (ret != 0)
 2217                 return (ret);
 2218         signotify(td);
 2219         /*
 2220          * Defer further processing for signals which are held,
 2221          * except that stopped processes must be continued by SIGCONT.
 2222          */
 2223         if (action == SIG_HOLD &&
 2224             !((prop & SA_CONT) && (p->p_flag & P_STOPPED_SIG)))
 2225                 return (ret);
 2226 
 2227         /* SIGKILL: Remove procfs STOPEVENTs. */
 2228         if (sig == SIGKILL) {
 2229                 /* from procfs_ioctl.c: PIOCBIC */
 2230                 p->p_stops = 0;
 2231                 /* from procfs_ioctl.c: PIOCCONT */
 2232                 p->p_step = 0;
 2233                 wakeup(&p->p_step);
 2234         }
 2235         /*
 2236          * Some signals have a process-wide effect and a per-thread
 2237          * component.  Most processing occurs when the process next
 2238          * tries to cross the user boundary, however there are some
 2239          * times when processing needs to be done immediately, such as
 2240          * waking up threads so that they can cross the user boundary.
 2241          * We try to do the per-process part here.
 2242          */
 2243         if (P_SHOULDSTOP(p)) {
 2244                 KASSERT(!(p->p_flag & P_WEXIT),
 2245                     ("signal to stopped but exiting process"));
 2246                 if (sig == SIGKILL) {
 2247                         /*
 2248                          * If traced process is already stopped,
 2249                          * then no further action is necessary.
 2250                          */
 2251                         if (p->p_flag & P_TRACED)
 2252                                 goto out;
 2253                         /*
 2254                          * SIGKILL sets process running.
 2255                          * It will die elsewhere.
 2256                          * All threads must be restarted.
 2257                          */
 2258                         p->p_flag &= ~P_STOPPED_SIG;
 2259                         goto runfast;
 2260                 }
 2261 
 2262                 if (prop & SA_CONT) {
 2263                         /*
 2264                          * If traced process is already stopped,
 2265                          * then no further action is necessary.
 2266                          */
 2267                         if (p->p_flag & P_TRACED)
 2268                                 goto out;
 2269                         /*
 2270                          * If SIGCONT is default (or ignored), we continue the
 2271                          * process but don't leave the signal in sigqueue as
 2272                          * it has no further action.  If SIGCONT is held, we
 2273                          * continue the process and leave the signal in
 2274                          * sigqueue.  If the process catches SIGCONT, let it
 2275                          * handle the signal itself.  If it isn't waiting on
 2276                          * an event, it goes back to run state.
 2277                          * Otherwise, process goes back to sleep state.
 2278                          */
 2279                         p->p_flag &= ~P_STOPPED_SIG;
 2280                         PROC_SLOCK(p);
 2281                         if (p->p_numthreads == p->p_suspcount) {
 2282                                 PROC_SUNLOCK(p);
 2283                                 p->p_flag |= P_CONTINUED;
 2284                                 p->p_xsig = SIGCONT;
 2285                                 PROC_LOCK(p->p_pptr);
 2286                                 childproc_continued(p);
 2287                                 PROC_UNLOCK(p->p_pptr);
 2288                                 PROC_SLOCK(p);
 2289                         }
 2290                         if (action == SIG_DFL) {
 2291                                 thread_unsuspend(p);
 2292                                 PROC_SUNLOCK(p);
 2293                                 sigqueue_delete(sigqueue, sig);
 2294                                 goto out;
 2295                         }
 2296                         if (action == SIG_CATCH) {
 2297                                 /*
 2298                                  * The process wants to catch it so it needs
 2299                                  * to run at least one thread, but which one?
 2300                                  */
 2301                                 PROC_SUNLOCK(p);
 2302                                 goto runfast;
 2303                         }
 2304                         /*
 2305                          * The signal is not ignored or caught.
 2306                          */
 2307                         thread_unsuspend(p);
 2308                         PROC_SUNLOCK(p);
 2309                         goto out;
 2310                 }
 2311 
 2312                 if (prop & SA_STOP) {
 2313                         /*
 2314                          * If traced process is already stopped,
 2315                          * then no further action is necessary.
 2316                          */
 2317                         if (p->p_flag & P_TRACED)
 2318                                 goto out;
 2319                         /*
 2320                          * Already stopped, don't need to stop again
 2321                          * (If we did the shell could get confused).
 2322                          * Just make sure the signal STOP bit set.
 2323                          */
 2324                         p->p_flag |= P_STOPPED_SIG;
 2325                         sigqueue_delete(sigqueue, sig);
 2326                         goto out;
 2327                 }
 2328 
 2329                 /*
 2330                  * All other kinds of signals:
 2331                  * If a thread is sleeping interruptibly, simulate a
 2332                  * wakeup so that when it is continued it will be made
 2333                  * runnable and can look at the signal.  However, don't make
 2334                  * the PROCESS runnable, leave it stopped.
 2335                  * It may run a bit until it hits a thread_suspend_check().
 2336                  */
 2337                 wakeup_swapper = 0;
 2338                 PROC_SLOCK(p);
 2339                 thread_lock(td);
 2340                 if (TD_ON_SLEEPQ(td) && (td->td_flags & TDF_SINTR))
 2341                         wakeup_swapper = sleepq_abort(td, intrval);
 2342                 thread_unlock(td);
 2343                 PROC_SUNLOCK(p);
 2344                 if (wakeup_swapper)
 2345                         kick_proc0();
 2346                 goto out;
 2347                 /*
 2348                  * Mutexes are short lived. Threads waiting on them will
 2349                  * hit thread_suspend_check() soon.
 2350                  */
 2351         } else if (p->p_state == PRS_NORMAL) {
 2352                 if (p->p_flag & P_TRACED || action == SIG_CATCH) {
 2353                         tdsigwakeup(td, sig, action, intrval);
 2354                         goto out;
 2355                 }
 2356 
 2357                 MPASS(action == SIG_DFL);
 2358 
 2359                 if (prop & SA_STOP) {
 2360                         if (p->p_flag & (P_PPWAIT|P_WEXIT))
 2361                                 goto out;
 2362                         p->p_flag |= P_STOPPED_SIG;
 2363                         p->p_xsig = sig;
 2364                         PROC_SLOCK(p);
 2365                         wakeup_swapper = sig_suspend_threads(td, p, 1);
 2366                         if (p->p_numthreads == p->p_suspcount) {
 2367                                 /*
 2368                                  * only thread sending signal to another
 2369                                  * process can reach here, if thread is sending
 2370                                  * signal to its process, because thread does
 2371                                  * not suspend itself here, p_numthreads
 2372                                  * should never be equal to p_suspcount.
 2373                                  */
 2374                                 thread_stopped(p);
 2375                                 PROC_SUNLOCK(p);
 2376                                 sigqueue_delete_proc(p, p->p_xsig);
 2377                         } else
 2378                                 PROC_SUNLOCK(p);
 2379                         if (wakeup_swapper)
 2380                                 kick_proc0();
 2381                         goto out;
 2382                 }
 2383         } else {
 2384                 /* Not in "NORMAL" state. discard the signal. */
 2385                 sigqueue_delete(sigqueue, sig);
 2386                 goto out;
 2387         }
 2388 
 2389         /*
 2390          * The process is not stopped so we need to apply the signal to all the
 2391          * running threads.
 2392          */
 2393 runfast:
 2394         tdsigwakeup(td, sig, action, intrval);
 2395         PROC_SLOCK(p);
 2396         thread_unsuspend(p);
 2397         PROC_SUNLOCK(p);
 2398 out:
 2399         /* If we jump here, proc slock should not be owned. */
 2400         PROC_SLOCK_ASSERT(p, MA_NOTOWNED);
 2401         return (ret);
 2402 }
 2403 
 2404 /*
 2405  * The force of a signal has been directed against a single
 2406  * thread.  We need to see what we can do about knocking it
 2407  * out of any sleep it may be in etc.
 2408  */
 2409 static void
 2410 tdsigwakeup(struct thread *td, int sig, sig_t action, int intrval)
 2411 {
 2412         struct proc *p = td->td_proc;
 2413         register int prop;
 2414         int wakeup_swapper;
 2415 
 2416         wakeup_swapper = 0;
 2417         PROC_LOCK_ASSERT(p, MA_OWNED);
 2418         prop = sigprop(sig);
 2419 
 2420         PROC_SLOCK(p);
 2421         thread_lock(td);
 2422         /*
 2423          * Bring the priority of a thread up if we want it to get
 2424          * killed in this lifetime.  Be careful to avoid bumping the
 2425          * priority of the idle thread, since we still allow to signal
 2426          * kernel processes.
 2427          */
 2428         if (action == SIG_DFL && (prop & SA_KILL) != 0 &&
 2429             td->td_priority > PUSER && !TD_IS_IDLETHREAD(td))
 2430                 sched_prio(td, PUSER);
 2431         if (TD_ON_SLEEPQ(td)) {
 2432                 /*
 2433                  * If thread is sleeping uninterruptibly
 2434                  * we can't interrupt the sleep... the signal will
 2435                  * be noticed when the process returns through
 2436                  * trap() or syscall().
 2437                  */
 2438                 if ((td->td_flags & TDF_SINTR) == 0)
 2439                         goto out;
 2440                 /*
 2441                  * If SIGCONT is default (or ignored) and process is
 2442                  * asleep, we are finished; the process should not
 2443                  * be awakened.
 2444                  */
 2445                 if ((prop & SA_CONT) && action == SIG_DFL) {
 2446                         thread_unlock(td);
 2447                         PROC_SUNLOCK(p);
 2448                         sigqueue_delete(&p->p_sigqueue, sig);
 2449                         /*
 2450                          * It may be on either list in this state.
 2451                          * Remove from both for now.
 2452                          */
 2453                         sigqueue_delete(&td->td_sigqueue, sig);
 2454                         return;
 2455                 }
 2456 
 2457                 /*
 2458                  * Don't awaken a sleeping thread for SIGSTOP if the
 2459                  * STOP signal is deferred.
 2460                  */
 2461                 if ((prop & SA_STOP) != 0 && (td->td_flags & (TDF_SBDRY |
 2462                     TDF_SERESTART | TDF_SEINTR)) == TDF_SBDRY)
 2463                         goto out;
 2464 
 2465                 /*
 2466                  * Give low priority threads a better chance to run.
 2467                  */
 2468                 if (td->td_priority > PUSER && !TD_IS_IDLETHREAD(td))
 2469                         sched_prio(td, PUSER);
 2470 
 2471                 wakeup_swapper = sleepq_abort(td, intrval);
 2472         } else {
 2473                 /*
 2474                  * Other states do nothing with the signal immediately,
 2475                  * other than kicking ourselves if we are running.
 2476                  * It will either never be noticed, or noticed very soon.
 2477                  */
 2478 #ifdef SMP
 2479                 if (TD_IS_RUNNING(td) && td != curthread)
 2480                         forward_signal(td);
 2481 #endif
 2482         }
 2483 out:
 2484         PROC_SUNLOCK(p);
 2485         thread_unlock(td);
 2486         if (wakeup_swapper)
 2487                 kick_proc0();
 2488 }
 2489 
 2490 static int
 2491 sig_suspend_threads(struct thread *td, struct proc *p, int sending)
 2492 {
 2493         struct thread *td2;
 2494         int wakeup_swapper;
 2495 
 2496         PROC_LOCK_ASSERT(p, MA_OWNED);
 2497         PROC_SLOCK_ASSERT(p, MA_OWNED);
 2498         MPASS(sending || td == curthread);
 2499 
 2500         wakeup_swapper = 0;
 2501         FOREACH_THREAD_IN_PROC(p, td2) {
 2502                 thread_lock(td2);
 2503                 td2->td_flags |= TDF_ASTPENDING | TDF_NEEDSUSPCHK;
 2504                 if ((TD_IS_SLEEPING(td2) || TD_IS_SWAPPED(td2)) &&
 2505                     (td2->td_flags & TDF_SINTR)) {
 2506                         if (td2->td_flags & TDF_SBDRY) {
 2507                                 /*
 2508                                  * Once a thread is asleep with
 2509                                  * TDF_SBDRY and without TDF_SERESTART
 2510                                  * or TDF_SEINTR set, it should never
 2511                                  * become suspended due to this check.
 2512                                  */
 2513                                 KASSERT(!TD_IS_SUSPENDED(td2),
 2514                                     ("thread with deferred stops suspended"));
 2515                                 if (TD_SBDRY_INTR(td2))
 2516                                         wakeup_swapper |= sleepq_abort(td2,
 2517                                             TD_SBDRY_ERRNO(td2));
 2518                         } else if (!TD_IS_SUSPENDED(td2)) {
 2519                                 thread_suspend_one(td2);
 2520                         }
 2521                 } else if (!TD_IS_SUSPENDED(td2)) {
 2522                         if (sending || td != td2)
 2523                                 td2->td_flags |= TDF_ASTPENDING;
 2524 #ifdef SMP
 2525                         if (TD_IS_RUNNING(td2) && td2 != td)
 2526                                 forward_signal(td2);
 2527 #endif
 2528                 }
 2529                 thread_unlock(td2);
 2530         }
 2531         return (wakeup_swapper);
 2532 }
 2533 
 2534 /*
 2535  * Stop the process for an event deemed interesting to the debugger. If si is
 2536  * non-NULL, this is a signal exchange; the new signal requested by the
 2537  * debugger will be returned for handling. If si is NULL, this is some other
 2538  * type of interesting event. The debugger may request a signal be delivered in
 2539  * that case as well, however it will be deferred until it can be handled.
 2540  */
 2541 int
 2542 ptracestop(struct thread *td, int sig, ksiginfo_t *si)
 2543 {
 2544         struct proc *p = td->td_proc;
 2545         struct thread *td2;
 2546         ksiginfo_t ksi;
 2547         int prop;
 2548 
 2549         PROC_LOCK_ASSERT(p, MA_OWNED);
 2550         KASSERT(!(p->p_flag & P_WEXIT), ("Stopping exiting process"));
 2551         WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK,
 2552             &p->p_mtx.lock_object, "Stopping for traced signal");
 2553 
 2554         td->td_xsig = sig;
 2555 
 2556         if (si == NULL || (si->ksi_flags & KSI_PTRACE) == 0) {
 2557                 td->td_dbgflags |= TDB_XSIG;
 2558                 CTR4(KTR_PTRACE, "ptracestop: tid %d (pid %d) flags %#x sig %d",
 2559                     td->td_tid, p->p_pid, td->td_dbgflags, sig);
 2560                 PROC_SLOCK(p);
 2561                 while ((p->p_flag & P_TRACED) && (td->td_dbgflags & TDB_XSIG)) {
 2562                         if (P_KILLED(p)) {
 2563                                 /*
 2564                                  * Ensure that, if we've been PT_KILLed, the
 2565                                  * exit status reflects that. Another thread
 2566                                  * may also be in ptracestop(), having just
 2567                                  * received the SIGKILL, but this thread was
 2568                                  * unsuspended first.
 2569                                  */
 2570                                 td->td_dbgflags &= ~TDB_XSIG;
 2571                                 td->td_xsig = SIGKILL;
 2572                                 p->p_ptevents = 0;
 2573                                 break;
 2574                         }
 2575                         if (p->p_flag & P_SINGLE_EXIT &&
 2576                             !(td->td_dbgflags & TDB_EXIT)) {
 2577                                 /*
 2578                                  * Ignore ptrace stops except for thread exit
 2579                                  * events when the process exits.
 2580                                  */
 2581                                 td->td_dbgflags &= ~TDB_XSIG;
 2582                                 PROC_SUNLOCK(p);
 2583                                 return (0);
 2584                         }
 2585 
 2586                         /*
 2587                          * Make wait(2) work.  Ensure that right after the
 2588                          * attach, the thread which was decided to become the
 2589                          * leader of attach gets reported to the waiter.
 2590                          * Otherwise, just avoid overwriting another thread's
 2591                          * assignment to p_xthread.  If another thread has
 2592                          * already set p_xthread, the current thread will get
 2593                          * a chance to report itself upon the next iteration.
 2594                          */
 2595                         if ((td->td_dbgflags & TDB_FSTP) != 0 ||
 2596                             ((p->p_flag2 & P2_PTRACE_FSTP) == 0 &&
 2597                             p->p_xthread == NULL)) {
 2598                                 p->p_xsig = sig;
 2599                                 p->p_xthread = td;
 2600                                 td->td_dbgflags &= ~TDB_FSTP;
 2601                                 p->p_flag2 &= ~P2_PTRACE_FSTP;
 2602                                 p->p_flag |= P_STOPPED_SIG | P_STOPPED_TRACE;
 2603                                 sig_suspend_threads(td, p, 0);
 2604                         }
 2605                         if ((td->td_dbgflags & TDB_STOPATFORK) != 0) {
 2606                                 td->td_dbgflags &= ~TDB_STOPATFORK;
 2607                                 cv_broadcast(&p->p_dbgwait);
 2608                         }
 2609 stopme:
 2610                         thread_suspend_switch(td, p);
 2611                         if (p->p_xthread == td)
 2612                                 p->p_xthread = NULL;
 2613                         if (!(p->p_flag & P_TRACED))
 2614                                 break;
 2615                         if (td->td_dbgflags & TDB_SUSPEND) {
 2616                                 if (p->p_flag & P_SINGLE_EXIT)
 2617                                         break;
 2618                                 goto stopme;
 2619                         }
 2620                 }
 2621                 PROC_SUNLOCK(p);
 2622         }
 2623 
 2624         if (si != NULL && sig == td->td_xsig) {
 2625                 /* Parent wants us to take the original signal unchanged. */
 2626                 si->ksi_flags |= KSI_HEAD;
 2627                 if (sigqueue_add(&td->td_sigqueue, sig, si) != 0)
 2628                         si->ksi_signo = 0;
 2629         } else if (td->td_xsig != 0) {
 2630                 /*
 2631                  * If parent wants us to take a new signal, then it will leave
 2632                  * it in td->td_xsig; otherwise we just look for signals again.
 2633                  */
 2634                 ksiginfo_init(&ksi);
 2635                 ksi.ksi_signo = td->td_xsig;
 2636                 ksi.ksi_flags |= KSI_PTRACE;
 2637                 prop = sigprop(td->td_xsig);
 2638                 td2 = sigtd(p, td->td_xsig, prop);
 2639                 tdsendsignal(p, td2, td->td_xsig, &ksi);
 2640                 if (td != td2)
 2641                         return (0);
 2642         }
 2643 
 2644         return (td->td_xsig);
 2645 }
 2646 
 2647 static void
 2648 reschedule_signals(struct proc *p, sigset_t block, int flags)
 2649 {
 2650         struct sigacts *ps;
 2651         struct thread *td;
 2652         int sig;
 2653 
 2654         PROC_LOCK_ASSERT(p, MA_OWNED);
 2655         ps = p->p_sigacts;
 2656         mtx_assert(&ps->ps_mtx, (flags & SIGPROCMASK_PS_LOCKED) != 0 ?
 2657             MA_OWNED : MA_NOTOWNED);
 2658         if (SIGISEMPTY(p->p_siglist))
 2659                 return;
 2660         SIGSETAND(block, p->p_siglist);
 2661         while ((sig = sig_ffs(&block)) != 0) {
 2662                 SIGDELSET(block, sig);
 2663                 td = sigtd(p, sig, 0);
 2664                 signotify(td);
 2665                 if (!(flags & SIGPROCMASK_PS_LOCKED))
 2666                         mtx_lock(&ps->ps_mtx);
 2667                 if (p->p_flag & P_TRACED ||
 2668                     (SIGISMEMBER(ps->ps_sigcatch, sig) &&
 2669                     !SIGISMEMBER(td->td_sigmask, sig)))
 2670                         tdsigwakeup(td, sig, SIG_CATCH,
 2671                             (SIGISMEMBER(ps->ps_sigintr, sig) ? EINTR :
 2672                              ERESTART));
 2673                 if (!(flags & SIGPROCMASK_PS_LOCKED))
 2674                         mtx_unlock(&ps->ps_mtx);
 2675         }
 2676 }
 2677 
 2678 void
 2679 tdsigcleanup(struct thread *td)
 2680 {
 2681         struct proc *p;
 2682         sigset_t unblocked;
 2683 
 2684         p = td->td_proc;
 2685         PROC_LOCK_ASSERT(p, MA_OWNED);
 2686 
 2687         sigqueue_flush(&td->td_sigqueue);
 2688         if (p->p_numthreads == 1)
 2689                 return;
 2690 
 2691         /*
 2692          * Since we cannot handle signals, notify signal post code
 2693          * about this by filling the sigmask.
 2694          *
 2695          * Also, if needed, wake up thread(s) that do not block the
 2696          * same signals as the exiting thread, since the thread might
 2697          * have been selected for delivery and woken up.
 2698          */
 2699         SIGFILLSET(unblocked);
 2700         SIGSETNAND(unblocked, td->td_sigmask);
 2701         SIGFILLSET(td->td_sigmask);
 2702         reschedule_signals(p, unblocked, 0);
 2703 
 2704 }
 2705 
 2706 static int
 2707 sigdeferstop_curr_flags(int cflags)
 2708 {
 2709 
 2710         MPASS((cflags & (TDF_SEINTR | TDF_SERESTART)) == 0 ||
 2711             (cflags & TDF_SBDRY) != 0);
 2712         return (cflags & (TDF_SBDRY | TDF_SEINTR | TDF_SERESTART));
 2713 }
 2714 
 2715 /*
 2716  * Defer the delivery of SIGSTOP for the current thread, according to
 2717  * the requested mode.  Returns previous flags, which must be restored
 2718  * by sigallowstop().
 2719  *
 2720  * TDF_SBDRY, TDF_SEINTR, and TDF_SERESTART flags are only set and
 2721  * cleared by the current thread, which allow the lock-less read-only
 2722  * accesses below.
 2723  */
 2724 int
 2725 sigdeferstop_impl(int mode)
 2726 {
 2727         struct thread *td;
 2728         int cflags, nflags;
 2729 
 2730         td = curthread;
 2731         cflags = sigdeferstop_curr_flags(td->td_flags);
 2732         switch (mode) {
 2733         case SIGDEFERSTOP_NOP:
 2734                 nflags = cflags;
 2735                 break;
 2736         case SIGDEFERSTOP_OFF:
 2737                 nflags = 0;
 2738                 break;
 2739         case SIGDEFERSTOP_SILENT:
 2740                 nflags = (cflags | TDF_SBDRY) & ~(TDF_SEINTR | TDF_SERESTART);
 2741                 break;
 2742         case SIGDEFERSTOP_EINTR:
 2743                 nflags = (cflags | TDF_SBDRY | TDF_SEINTR) & ~TDF_SERESTART;
 2744                 break;
 2745         case SIGDEFERSTOP_ERESTART:
 2746                 nflags = (cflags | TDF_SBDRY | TDF_SERESTART) & ~TDF_SEINTR;
 2747                 break;
 2748         default:
 2749                 panic("sigdeferstop: invalid mode %x", mode);
 2750                 break;
 2751         }
 2752         if (cflags == nflags)
 2753                 return (SIGDEFERSTOP_VAL_NCHG);
 2754         thread_lock(td);
 2755         td->td_flags = (td->td_flags & ~cflags) | nflags;
 2756         thread_unlock(td);
 2757         return (cflags);
 2758 }
 2759 
 2760 /*
 2761  * Restores the STOP handling mode, typically permitting the delivery
 2762  * of SIGSTOP for the current thread.  This does not immediately
 2763  * suspend if a stop was posted.  Instead, the thread will suspend
 2764  * either via ast() or a subsequent interruptible sleep.
 2765  */
 2766 void
 2767 sigallowstop_impl(int prev)
 2768 {
 2769         struct thread *td;
 2770         int cflags;
 2771 
 2772         KASSERT(prev != SIGDEFERSTOP_VAL_NCHG, ("failed sigallowstop"));
 2773         KASSERT((prev & ~(TDF_SBDRY | TDF_SEINTR | TDF_SERESTART)) == 0,
 2774             ("sigallowstop: incorrect previous mode %x", prev));
 2775         td = curthread;
 2776         cflags = sigdeferstop_curr_flags(td->td_flags);
 2777         if (cflags != prev) {
 2778                 thread_lock(td);
 2779                 td->td_flags = (td->td_flags & ~cflags) | prev;
 2780                 thread_unlock(td);
 2781         }
 2782 }
 2783 
 2784 /*
 2785  * If the current process has received a signal (should be caught or cause
 2786  * termination, should interrupt current syscall), return the signal number.
 2787  * Stop signals with default action are processed immediately, then cleared;
 2788  * they aren't returned.  This is checked after each entry to the system for
 2789  * a syscall or trap (though this can usually be done without calling issignal
 2790  * by checking the pending signal masks in cursig.) The normal call
 2791  * sequence is
 2792  *
 2793  *      while (sig = cursig(curthread))
 2794  *              postsig(sig);
 2795  */
 2796 static int
 2797 issignal(struct thread *td)
 2798 {
 2799         struct proc *p;
 2800         struct sigacts *ps;
 2801         struct sigqueue *queue;
 2802         sigset_t sigpending;
 2803         int sig, prop;
 2804 
 2805         p = td->td_proc;
 2806         ps = p->p_sigacts;
 2807         mtx_assert(&ps->ps_mtx, MA_OWNED);
 2808         PROC_LOCK_ASSERT(p, MA_OWNED);
 2809         for (;;) {
 2810                 int traced = (p->p_flag & P_TRACED) || (p->p_stops & S_SIG);
 2811 
 2812                 sigpending = td->td_sigqueue.sq_signals;
 2813                 SIGSETOR(sigpending, p->p_sigqueue.sq_signals);
 2814                 SIGSETNAND(sigpending, td->td_sigmask);
 2815 
 2816                 if ((p->p_flag & P_PPWAIT) != 0 || (td->td_flags &
 2817                     (TDF_SBDRY | TDF_SERESTART | TDF_SEINTR)) == TDF_SBDRY)
 2818                         SIG_STOPSIGMASK(sigpending);
 2819                 if (SIGISEMPTY(sigpending))     /* no signal to send */
 2820                         return (0);
 2821                 if ((p->p_flag & (P_TRACED | P_PPTRACE)) == P_TRACED &&
 2822                     (p->p_flag2 & P2_PTRACE_FSTP) != 0 &&
 2823                     SIGISMEMBER(sigpending, SIGSTOP)) {
 2824                         /*
 2825                          * If debugger just attached, always consume
 2826                          * SIGSTOP from ptrace(PT_ATTACH) first, to
 2827                          * execute the debugger attach ritual in
 2828                          * order.
 2829                          */
 2830                         sig = SIGSTOP;
 2831                         td->td_dbgflags |= TDB_FSTP;
 2832                 } else {
 2833                         sig = sig_ffs(&sigpending);
 2834                 }
 2835 
 2836                 if (p->p_stops & S_SIG) {
 2837                         mtx_unlock(&ps->ps_mtx);
 2838                         stopevent(p, S_SIG, sig);
 2839                         mtx_lock(&ps->ps_mtx);
 2840                 }
 2841 
 2842                 /*
 2843                  * We should see pending but ignored signals
 2844                  * only if P_TRACED was on when they were posted.
 2845                  */
 2846                 if (SIGISMEMBER(ps->ps_sigignore, sig) && (traced == 0)) {
 2847                         sigqueue_delete(&td->td_sigqueue, sig);
 2848                         sigqueue_delete(&p->p_sigqueue, sig);
 2849                         continue;
 2850                 }
 2851                 if ((p->p_flag & (P_TRACED | P_PPTRACE)) == P_TRACED) {
 2852                         /*
 2853                          * If traced, always stop.
 2854                          * Remove old signal from queue before the stop.
 2855                          * XXX shrug off debugger, it causes siginfo to
 2856                          * be thrown away.
 2857                          */
 2858                         queue = &td->td_sigqueue;
 2859                         td->td_dbgksi.ksi_signo = 0;
 2860                         if (sigqueue_get(queue, sig, &td->td_dbgksi) == 0) {
 2861                                 queue = &p->p_sigqueue;
 2862                                 sigqueue_get(queue, sig, &td->td_dbgksi);
 2863                         }
 2864 
 2865                         mtx_unlock(&ps->ps_mtx);
 2866                         sig = ptracestop(td, sig, &td->td_dbgksi);
 2867                         mtx_lock(&ps->ps_mtx);
 2868 
 2869                         /* 
 2870                          * Keep looking if the debugger discarded the signal
 2871                          * or replaced it with a masked signal.
 2872                          *
 2873                          * If the traced bit got turned off, go back up
 2874                          * to the top to rescan signals.  This ensures
 2875                          * that p_sig* and p_sigact are consistent.
 2876                          */
 2877                         if (sig == 0 || (p->p_flag & P_TRACED) == 0)
 2878                                 continue;
 2879                 }
 2880 
 2881                 prop = sigprop(sig);
 2882 
 2883                 /*
 2884                  * Decide whether the signal should be returned.
 2885                  * Return the signal's number, or fall through
 2886                  * to clear it from the pending mask.
 2887                  */
 2888                 switch ((intptr_t)p->p_sigacts->ps_sigact[_SIG_IDX(sig)]) {
 2889 
 2890                 case (intptr_t)SIG_DFL:
 2891                         /*
 2892                          * Don't take default actions on system processes.
 2893                          */
 2894                         if (p->p_pid <= 1) {
 2895 #ifdef DIAGNOSTIC
 2896                                 /*
 2897                                  * Are you sure you want to ignore SIGSEGV
 2898                                  * in init? XXX
 2899                                  */
 2900                                 printf("Process (pid %lu) got signal %d\n",
 2901                                         (u_long)p->p_pid, sig);
 2902 #endif
 2903                                 break;          /* == ignore */
 2904                         }
 2905                         /*
 2906                          * If there is a pending stop signal to process with
 2907                          * default action, stop here, then clear the signal.
 2908                          * Traced or exiting processes should ignore stops.
 2909                          * Additionally, a member of an orphaned process group
 2910                          * should ignore tty stops.
 2911                          */
 2912                         if (prop & SA_STOP) {
 2913                                 if (p->p_flag &
 2914                                     (P_TRACED | P_WEXIT | P_SINGLE_EXIT) ||
 2915                                     (p->p_pgrp->pg_jobc == 0 &&
 2916                                      prop & SA_TTYSTOP))
 2917                                         break;  /* == ignore */
 2918                                 if (TD_SBDRY_INTR(td)) {
 2919                                         KASSERT((td->td_flags & TDF_SBDRY) != 0,
 2920                                             ("lost TDF_SBDRY"));
 2921                                         return (-1);
 2922                                 }
 2923                                 mtx_unlock(&ps->ps_mtx);
 2924                                 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK,
 2925                                     &p->p_mtx.lock_object, "Catching SIGSTOP");
 2926                                 sigqueue_delete(&td->td_sigqueue, sig);
 2927                                 sigqueue_delete(&p->p_sigqueue, sig);
 2928                                 p->p_flag |= P_STOPPED_SIG;
 2929                                 p->p_xsig = sig;
 2930                                 PROC_SLOCK(p);
 2931                                 sig_suspend_threads(td, p, 0);
 2932                                 thread_suspend_switch(td, p);
 2933                                 PROC_SUNLOCK(p);
 2934                                 mtx_lock(&ps->ps_mtx);
 2935                                 goto next;
 2936                         } else if (prop & SA_IGNORE) {
 2937                                 /*
 2938                                  * Except for SIGCONT, shouldn't get here.
 2939                                  * Default action is to ignore; drop it.
 2940                                  */
 2941                                 break;          /* == ignore */
 2942                         } else
 2943                                 return (sig);
 2944                         /*NOTREACHED*/
 2945 
 2946                 case (intptr_t)SIG_IGN:
 2947                         /*
 2948                          * Masking above should prevent us ever trying
 2949                          * to take action on an ignored signal other
 2950                          * than SIGCONT, unless process is traced.
 2951                          */
 2952                         if ((prop & SA_CONT) == 0 &&
 2953                             (p->p_flag & P_TRACED) == 0)
 2954                                 printf("issignal\n");
 2955                         break;          /* == ignore */
 2956 
 2957                 default:
 2958                         /*
 2959                          * This signal has an action, let
 2960                          * postsig() process it.
 2961                          */
 2962                         return (sig);
 2963                 }
 2964                 sigqueue_delete(&td->td_sigqueue, sig); /* take the signal! */
 2965                 sigqueue_delete(&p->p_sigqueue, sig);
 2966 next:;
 2967         }
 2968         /* NOTREACHED */
 2969 }
 2970 
 2971 void
 2972 thread_stopped(struct proc *p)
 2973 {
 2974         int n;
 2975 
 2976         PROC_LOCK_ASSERT(p, MA_OWNED);
 2977         PROC_SLOCK_ASSERT(p, MA_OWNED);
 2978         n = p->p_suspcount;
 2979         if (p == curproc)
 2980                 n++;
 2981         if ((p->p_flag & P_STOPPED_SIG) && (n == p->p_numthreads)) {
 2982                 PROC_SUNLOCK(p);
 2983                 p->p_flag &= ~P_WAITED;
 2984                 PROC_LOCK(p->p_pptr);
 2985                 childproc_stopped(p, (p->p_flag & P_TRACED) ?
 2986                         CLD_TRAPPED : CLD_STOPPED);
 2987                 PROC_UNLOCK(p->p_pptr);
 2988                 PROC_SLOCK(p);
 2989         }
 2990 }
 2991 
 2992 /*
 2993  * Take the action for the specified signal
 2994  * from the current set of pending signals.
 2995  */
 2996 int
 2997 postsig(sig)
 2998         register int sig;
 2999 {
 3000         struct thread *td = curthread;
 3001         register struct proc *p = td->td_proc;
 3002         struct sigacts *ps;
 3003         sig_t action;
 3004         ksiginfo_t ksi;
 3005         sigset_t returnmask;
 3006 
 3007         KASSERT(sig != 0, ("postsig"));
 3008 
 3009         PROC_LOCK_ASSERT(p, MA_OWNED);
 3010         ps = p->p_sigacts;
 3011         mtx_assert(&ps->ps_mtx, MA_OWNED);
 3012         ksiginfo_init(&ksi);
 3013         if (sigqueue_get(&td->td_sigqueue, sig, &ksi) == 0 &&
 3014             sigqueue_get(&p->p_sigqueue, sig, &ksi) == 0)
 3015                 return (0);
 3016         ksi.ksi_signo = sig;
 3017         if (ksi.ksi_code == SI_TIMER)
 3018                 itimer_accept(p, ksi.ksi_timerid, &ksi);
 3019         action = ps->ps_sigact[_SIG_IDX(sig)];
 3020 #ifdef KTRACE
 3021         if (KTRPOINT(td, KTR_PSIG))
 3022                 ktrpsig(sig, action, td->td_pflags & TDP_OLDMASK ?
 3023                     &td->td_oldsigmask : &td->td_sigmask, ksi.ksi_code);
 3024 #endif
 3025         if (p->p_stops & S_SIG) {
 3026                 mtx_unlock(&ps->ps_mtx);
 3027                 stopevent(p, S_SIG, sig);
 3028                 mtx_lock(&ps->ps_mtx);
 3029         }
 3030 
 3031         if (action == SIG_DFL) {
 3032                 /*
 3033                  * Default action, where the default is to kill
 3034                  * the process.  (Other cases were ignored above.)
 3035                  */
 3036                 mtx_unlock(&ps->ps_mtx);
 3037                 sigexit(td, sig);
 3038                 /* NOTREACHED */
 3039         } else {
 3040                 /*
 3041                  * If we get here, the signal must be caught.
 3042                  */
 3043                 KASSERT(action != SIG_IGN && !SIGISMEMBER(td->td_sigmask, sig),
 3044                     ("postsig action"));
 3045                 /*
 3046                  * Set the new mask value and also defer further
 3047                  * occurrences of this signal.
 3048                  *
 3049                  * Special case: user has done a sigsuspend.  Here the
 3050                  * current mask is not of interest, but rather the
 3051                  * mask from before the sigsuspend is what we want
 3052                  * restored after the signal processing is completed.
 3053                  */
 3054                 if (td->td_pflags & TDP_OLDMASK) {
 3055                         returnmask = td->td_oldsigmask;
 3056                         td->td_pflags &= ~TDP_OLDMASK;
 3057                 } else
 3058                         returnmask = td->td_sigmask;
 3059 
 3060                 if (p->p_sig == sig) {
 3061                         p->p_code = 0;
 3062                         p->p_sig = 0;
 3063                 }
 3064                 (*p->p_sysent->sv_sendsig)(action, &ksi, &returnmask);
 3065                 postsig_done(sig, td, ps);
 3066         }
 3067         return (1);
 3068 }
 3069 
 3070 /*
 3071  * Kill the current process for stated reason.
 3072  */
 3073 void
 3074 killproc(p, why)
 3075         struct proc *p;
 3076         char *why;
 3077 {
 3078 
 3079         PROC_LOCK_ASSERT(p, MA_OWNED);
 3080         CTR3(KTR_PROC, "killproc: proc %p (pid %d, %s)", p, p->p_pid,
 3081             p->p_comm);
 3082         log(LOG_ERR, "pid %d (%s), uid %d, was killed: %s\n", p->p_pid,
 3083             p->p_comm, p->p_ucred ? p->p_ucred->cr_uid : -1, why);
 3084         p->p_flag |= P_WKILLED;
 3085         kern_psignal(p, SIGKILL);
 3086 }
 3087 
 3088 /*
 3089  * Force the current process to exit with the specified signal, dumping core
 3090  * if appropriate.  We bypass the normal tests for masked and caught signals,
 3091  * allowing unrecoverable failures to terminate the process without changing
 3092  * signal state.  Mark the accounting record with the signal termination.
 3093  * If dumping core, save the signal number for the debugger.  Calls exit and
 3094  * does not return.
 3095  */
 3096 void
 3097 sigexit(td, sig)
 3098         struct thread *td;
 3099         int sig;
 3100 {
 3101         struct proc *p = td->td_proc;
 3102 
 3103         PROC_LOCK_ASSERT(p, MA_OWNED);
 3104         p->p_acflag |= AXSIG;
 3105         /*
 3106          * We must be single-threading to generate a core dump.  This
 3107          * ensures that the registers in the core file are up-to-date.
 3108          * Also, the ELF dump handler assumes that the thread list doesn't
 3109          * change out from under it.
 3110          *
 3111          * XXX If another thread attempts to single-thread before us
 3112          *     (e.g. via fork()), we won't get a dump at all.
 3113          */
 3114         if ((sigprop(sig) & SA_CORE) && thread_single(p, SINGLE_NO_EXIT) == 0) {
 3115                 p->p_sig = sig;
 3116                 /*
 3117                  * Log signals which would cause core dumps
 3118                  * (Log as LOG_INFO to appease those who don't want
 3119                  * these messages.)
 3120                  * XXX : Todo, as well as euid, write out ruid too
 3121                  * Note that coredump() drops proc lock.
 3122                  */
 3123                 if (coredump(td) == 0)
 3124                         sig |= WCOREFLAG;
 3125                 if (kern_logsigexit)
 3126                         log(LOG_INFO,
 3127                             "pid %d (%s), uid %d: exited on signal %d%s\n",
 3128                             p->p_pid, p->p_comm,
 3129                             td->td_ucred ? td->td_ucred->cr_uid : -1,
 3130                             sig &~ WCOREFLAG,
 3131                             sig & WCOREFLAG ? " (core dumped)" : "");
 3132         } else
 3133                 PROC_UNLOCK(p);
 3134         exit1(td, 0, sig);
 3135         /* NOTREACHED */
 3136 }
 3137 
 3138 /*
 3139  * Send queued SIGCHLD to parent when child process's state
 3140  * is changed.
 3141  */
 3142 static void
 3143 sigparent(struct proc *p, int reason, int status)
 3144 {
 3145         PROC_LOCK_ASSERT(p, MA_OWNED);
 3146         PROC_LOCK_ASSERT(p->p_pptr, MA_OWNED);
 3147 
 3148         if (p->p_ksi != NULL) {
 3149                 p->p_ksi->ksi_signo  = SIGCHLD;
 3150                 p->p_ksi->ksi_code   = reason;
 3151                 p->p_ksi->ksi_status = status;
 3152                 p->p_ksi->ksi_pid    = p->p_pid;
 3153                 p->p_ksi->ksi_uid    = p->p_ucred->cr_ruid;
 3154                 if (KSI_ONQ(p->p_ksi))
 3155                         return;
 3156         }
 3157         pksignal(p->p_pptr, SIGCHLD, p->p_ksi);
 3158 }
 3159 
 3160 static void
 3161 childproc_jobstate(struct proc *p, int reason, int sig)
 3162 {
 3163         struct sigacts *ps;
 3164 
 3165         PROC_LOCK_ASSERT(p, MA_OWNED);
 3166         PROC_LOCK_ASSERT(p->p_pptr, MA_OWNED);
 3167 
 3168         /*
 3169          * Wake up parent sleeping in kern_wait(), also send
 3170          * SIGCHLD to parent, but SIGCHLD does not guarantee
 3171          * that parent will awake, because parent may masked
 3172          * the signal.
 3173          */
 3174         p->p_pptr->p_flag |= P_STATCHILD;
 3175         wakeup(p->p_pptr);
 3176 
 3177         ps = p->p_pptr->p_sigacts;
 3178         mtx_lock(&ps->ps_mtx);
 3179         if ((ps->ps_flag & PS_NOCLDSTOP) == 0) {
 3180                 mtx_unlock(&ps->ps_mtx);
 3181                 sigparent(p, reason, sig);
 3182         } else
 3183                 mtx_unlock(&ps->ps_mtx);
 3184 }
 3185 
 3186 void
 3187 childproc_stopped(struct proc *p, int reason)
 3188 {
 3189 
 3190         childproc_jobstate(p, reason, p->p_xsig);
 3191 }
 3192 
 3193 void
 3194 childproc_continued(struct proc *p)
 3195 {
 3196         childproc_jobstate(p, CLD_CONTINUED, SIGCONT);
 3197 }
 3198 
 3199 void
 3200 childproc_exited(struct proc *p)
 3201 {
 3202         int reason, status;
 3203 
 3204         if (WCOREDUMP(p->p_xsig)) {
 3205                 reason = CLD_DUMPED;
 3206                 status = WTERMSIG(p->p_xsig);
 3207         } else if (WIFSIGNALED(p->p_xsig)) {
 3208                 reason = CLD_KILLED;
 3209                 status = WTERMSIG(p->p_xsig);
 3210         } else {
 3211                 reason = CLD_EXITED;
 3212                 status = p->p_xexit;
 3213         }
 3214         /*
 3215          * XXX avoid calling wakeup(p->p_pptr), the work is
 3216          * done in exit1().
 3217          */
 3218         sigparent(p, reason, status);
 3219 }
 3220 
 3221 /*
 3222  * We only have 1 character for the core count in the format
 3223  * string, so the range will be 0-9
 3224  */
 3225 #define MAX_NUM_CORES 10
 3226 static int num_cores = 5;
 3227 
 3228 static int
 3229 sysctl_debug_num_cores_check (SYSCTL_HANDLER_ARGS)
 3230 {
 3231         int error;
 3232         int new_val;
 3233 
 3234         new_val = num_cores;
 3235         error = sysctl_handle_int(oidp, &new_val, 0, req);
 3236         if (error != 0 || req->newptr == NULL)
 3237                 return (error);
 3238         if (new_val > MAX_NUM_CORES)
 3239                 new_val = MAX_NUM_CORES;
 3240         if (new_val < 0)
 3241                 new_val = 0;
 3242         num_cores = new_val;
 3243         return (0);
 3244 }
 3245 SYSCTL_PROC(_debug, OID_AUTO, ncores, CTLTYPE_INT|CTLFLAG_RW,
 3246             0, sizeof(int), sysctl_debug_num_cores_check, "I", "");
 3247 
 3248 #define GZ_SUFFIX       ".gz"
 3249 
 3250 #ifdef GZIO
 3251 static int compress_user_cores = 1;
 3252 SYSCTL_INT(_kern, OID_AUTO, compress_user_cores, CTLFLAG_RWTUN,
 3253     &compress_user_cores, 0, "Compression of user corefiles");
 3254 
 3255 int compress_user_cores_gzlevel = 6;
 3256 SYSCTL_INT(_kern, OID_AUTO, compress_user_cores_gzlevel, CTLFLAG_RWTUN,
 3257     &compress_user_cores_gzlevel, 0, "Corefile gzip compression level");
 3258 #else
 3259 static int compress_user_cores = 0;
 3260 #endif
 3261 
 3262 /*
 3263  * Protect the access to corefilename[] by allproc_lock.
 3264  */
 3265 #define corefilename_lock       allproc_lock
 3266 
 3267 static char corefilename[MAXPATHLEN] = {"%N.core"};
 3268 TUNABLE_STR("kern.corefile", corefilename, sizeof(corefilename));
 3269 
 3270 static int
 3271 sysctl_kern_corefile(SYSCTL_HANDLER_ARGS)
 3272 {
 3273         int error;
 3274 
 3275         sx_xlock(&corefilename_lock);
 3276         error = sysctl_handle_string(oidp, corefilename, sizeof(corefilename),
 3277             req);
 3278         sx_xunlock(&corefilename_lock);
 3279 
 3280         return (error);
 3281 }
 3282 SYSCTL_PROC(_kern, OID_AUTO, corefile, CTLTYPE_STRING | CTLFLAG_RW |
 3283     CTLFLAG_MPSAFE, 0, 0, sysctl_kern_corefile, "A",
 3284     "Process corefile name format string");
 3285 
 3286 /*
 3287  * corefile_open(comm, uid, pid, td, compress, vpp, namep)
 3288  * Expand the name described in corefilename, using name, uid, and pid
 3289  * and open/create core file.
 3290  * corefilename is a printf-like string, with three format specifiers:
 3291  *      %N      name of process ("name")
 3292  *      %P      process id (pid)
 3293  *      %U      user id (uid)
 3294  * For example, "%N.core" is the default; they can be disabled completely
 3295  * by using "/dev/null", or all core files can be stored in "/cores/%U/%N-%P".
 3296  * This is controlled by the sysctl variable kern.corefile (see above).
 3297  */
 3298 static int
 3299 corefile_open(const char *comm, uid_t uid, pid_t pid, struct thread *td,
 3300     int compress, struct vnode **vpp, char **namep)
 3301 {
 3302         struct nameidata nd;
 3303         struct sbuf sb;
 3304         const char *format;
 3305         char *hostname, *name;
 3306         int indexpos, i, error, cmode, flags, oflags;
 3307 
 3308         hostname = NULL;
 3309         format = corefilename;
 3310         name = malloc(MAXPATHLEN, M_TEMP, M_WAITOK | M_ZERO);
 3311         indexpos = -1;
 3312         (void)sbuf_new(&sb, name, MAXPATHLEN, SBUF_FIXEDLEN);
 3313         sx_slock(&corefilename_lock);
 3314         for (i = 0; format[i] != '\0'; i++) {
 3315                 switch (format[i]) {
 3316                 case '%':       /* Format character */
 3317                         i++;
 3318                         switch (format[i]) {
 3319                         case '%':
 3320                                 sbuf_putc(&sb, '%');
 3321                                 break;
 3322                         case 'H':       /* hostname */
 3323                                 if (hostname == NULL) {
 3324                                         hostname = malloc(MAXHOSTNAMELEN,
 3325                                             M_TEMP, M_WAITOK);
 3326                                 }
 3327                                 getcredhostname(td->td_ucred, hostname,
 3328                                     MAXHOSTNAMELEN);
 3329                                 sbuf_printf(&sb, "%s", hostname);
 3330                                 break;
 3331                         case 'I':       /* autoincrementing index */
 3332                                 sbuf_printf(&sb, "");
 3333                                 indexpos = sbuf_len(&sb) - 1;
 3334                                 break;
 3335                         case 'N':       /* process name */
 3336                                 sbuf_printf(&sb, "%s", comm);
 3337                                 break;
 3338                         case 'P':       /* process id */
 3339                                 sbuf_printf(&sb, "%u", pid);
 3340                                 break;
 3341                         case 'U':       /* user id */
 3342                                 sbuf_printf(&sb, "%u", uid);
 3343                                 break;
 3344                         default:
 3345                                 log(LOG_ERR,
 3346                                     "Unknown format character %c in "
 3347                                     "corename `%s'\n", format[i], format);
 3348                                 break;
 3349                         }
 3350                         break;
 3351                 default:
 3352                         sbuf_putc(&sb, format[i]);
 3353                         break;
 3354                 }
 3355         }
 3356         sx_sunlock(&corefilename_lock);
 3357         free(hostname, M_TEMP);
 3358         if (compress)
 3359                 sbuf_printf(&sb, GZ_SUFFIX);
 3360         if (sbuf_error(&sb) != 0) {
 3361                 log(LOG_ERR, "pid %ld (%s), uid (%lu): corename is too "
 3362                     "long\n", (long)pid, comm, (u_long)uid);
 3363                 sbuf_delete(&sb);
 3364                 free(name, M_TEMP);
 3365                 return (ENOMEM);
 3366         }
 3367         sbuf_finish(&sb);
 3368         sbuf_delete(&sb);
 3369 
 3370         cmode = S_IRUSR | S_IWUSR;
 3371         oflags = VN_OPEN_NOAUDIT | VN_OPEN_NAMECACHE |
 3372             (capmode_coredump ? VN_OPEN_NOCAPCHECK : 0);
 3373 
 3374         /*
 3375          * If the core format has a %I in it, then we need to check
 3376          * for existing corefiles before returning a name.
 3377          * To do this we iterate over 0..num_cores to find a
 3378          * non-existing core file name to use.
 3379          */
 3380         if (indexpos != -1) {
 3381                 for (i = 0; i < num_cores; i++) {
 3382                         flags = O_CREAT | O_EXCL | FWRITE | O_NOFOLLOW;
 3383                         name[indexpos] = '' + i;
 3384                         NDINIT(&nd, LOOKUP, NOFOLLOW, UIO_SYSSPACE, name, td);
 3385                         error = vn_open_cred(&nd, &flags, cmode, oflags,
 3386                             td->td_ucred, NULL);
 3387                         if (error) {
 3388                                 if (error == EEXIST)
 3389                                         continue;
 3390                                 log(LOG_ERR,
 3391                                     "pid %d (%s), uid (%u):  Path `%s' failed "
 3392                                     "on initial open test, error = %d\n",
 3393                                     pid, comm, uid, name, error);
 3394                         }
 3395                         goto out;
 3396                 }
 3397         }
 3398 
 3399         flags = O_CREAT | FWRITE | O_NOFOLLOW;
 3400         NDINIT(&nd, LOOKUP, NOFOLLOW, UIO_SYSSPACE, name, td);
 3401         error = vn_open_cred(&nd, &flags, cmode, oflags, td->td_ucred, NULL);
 3402 out:
 3403         if (error) {
 3404 #ifdef AUDIT
 3405                 audit_proc_coredump(td, name, error);
 3406 #endif
 3407                 free(name, M_TEMP);
 3408                 return (error);
 3409         }
 3410         NDFREE(&nd, NDF_ONLY_PNBUF);
 3411         *vpp = nd.ni_vp;
 3412         *namep = name;
 3413         return (0);
 3414 }
 3415 
 3416 static int
 3417 coredump_sanitise_path(const char *path)
 3418 {
 3419         size_t i;
 3420 
 3421         /*
 3422          * Only send a subset of ASCII to devd(8) because it
 3423          * might pass these strings to sh -c.
 3424          */
 3425         for (i = 0; path[i]; i++)
 3426                 if (!(isalpha(path[i]) || isdigit(path[i])) &&
 3427                     path[i] != '/' && path[i] != '.' &&
 3428                     path[i] != '-')
 3429                         return (0);
 3430 
 3431         return (1);
 3432 }
 3433 
 3434 /*
 3435  * Dump a process' core.  The main routine does some
 3436  * policy checking, and creates the name of the coredump;
 3437  * then it passes on a vnode and a size limit to the process-specific
 3438  * coredump routine if there is one; if there _is not_ one, it returns
 3439  * ENOSYS; otherwise it returns the error from the process-specific routine.
 3440  */
 3441 
 3442 static int
 3443 coredump(struct thread *td)
 3444 {
 3445         struct proc *p = td->td_proc;
 3446         struct ucred *cred = td->td_ucred;
 3447         struct vnode *vp;
 3448         struct flock lf;
 3449         struct vattr vattr;
 3450         int error, error1, locked;
 3451         char *name;                     /* name of corefile */
 3452         void *rl_cookie;
 3453         off_t limit;
 3454         char *data = NULL;
 3455         char *fullpath, *freepath = NULL;
 3456         size_t len;
 3457         static const char comm_name[] = "comm=";
 3458         static const char core_name[] = "core=";
 3459 
 3460         PROC_LOCK_ASSERT(p, MA_OWNED);
 3461         MPASS((p->p_flag & P_HADTHREADS) == 0 || p->p_singlethread == td);
 3462         _STOPEVENT(p, S_CORE, 0);
 3463 
 3464         if (!do_coredump || (!sugid_coredump && (p->p_flag & P_SUGID) != 0) ||
 3465             (p->p_flag2 & P2_NOTRACE) != 0) {
 3466                 PROC_UNLOCK(p);
 3467                 return (EFAULT);
 3468         }
 3469 
 3470         /*
 3471          * Note that the bulk of limit checking is done after
 3472          * the corefile is created.  The exception is if the limit
 3473          * for corefiles is 0, in which case we don't bother
 3474          * creating the corefile at all.  This layout means that
 3475          * a corefile is truncated instead of not being created,
 3476          * if it is larger than the limit.
 3477          */
 3478         limit = (off_t)lim_cur(td, RLIMIT_CORE);
 3479         if (limit == 0 || racct_get_available(p, RACCT_CORE) == 0) {
 3480                 PROC_UNLOCK(p);
 3481                 return (EFBIG);
 3482         }
 3483         PROC_UNLOCK(p);
 3484 
 3485         error = corefile_open(p->p_comm, cred->cr_uid, p->p_pid, td,
 3486             compress_user_cores, &vp, &name);
 3487         if (error != 0)
 3488                 return (error);
 3489 
 3490         /*
 3491          * Don't dump to non-regular files or files with links.
 3492          * Do not dump into system files.
 3493          */
 3494         if (vp->v_type != VREG || VOP_GETATTR(vp, &vattr, cred) != 0 ||
 3495             vattr.va_nlink != 1 || (vp->v_vflag & VV_SYSTEM) != 0) {
 3496                 VOP_UNLOCK(vp, 0);
 3497                 error = EFAULT;
 3498                 goto out;
 3499         }
 3500 
 3501         VOP_UNLOCK(vp, 0);
 3502 
 3503         /* Postpone other writers, including core dumps of other processes. */
 3504         rl_cookie = vn_rangelock_wlock(vp, 0, OFF_MAX);
 3505 
 3506         lf.l_whence = SEEK_SET;
 3507         lf.l_start = 0;
 3508         lf.l_len = 0;
 3509         lf.l_type = F_WRLCK;
 3510         locked = (VOP_ADVLOCK(vp, (caddr_t)p, F_SETLK, &lf, F_FLOCK) == 0);
 3511 
 3512         VATTR_NULL(&vattr);
 3513         vattr.va_size = 0;
 3514         if (set_core_nodump_flag)
 3515                 vattr.va_flags = UF_NODUMP;
 3516         vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
 3517         VOP_SETATTR(vp, &vattr, cred);
 3518         VOP_UNLOCK(vp, 0);
 3519         PROC_LOCK(p);
 3520         p->p_acflag |= ACORE;
 3521         PROC_UNLOCK(p);
 3522 
 3523         if (p->p_sysent->sv_coredump != NULL) {
 3524                 error = p->p_sysent->sv_coredump(td, vp, limit,
 3525                     compress_user_cores ? IMGACT_CORE_COMPRESS : 0);
 3526         } else {
 3527                 error = ENOSYS;
 3528         }
 3529 
 3530         if (locked) {
 3531                 lf.l_type = F_UNLCK;
 3532                 VOP_ADVLOCK(vp, (caddr_t)p, F_UNLCK, &lf, F_FLOCK);
 3533         }
 3534         vn_rangelock_unlock(vp, rl_cookie);
 3535 
 3536         /*
 3537          * Notify the userland helper that a process triggered a core dump.
 3538          * This allows the helper to run an automated debugging session.
 3539          */
 3540         if (error != 0 || coredump_devctl == 0)
 3541                 goto out;
 3542         len = MAXPATHLEN * 2 + sizeof(comm_name) - 1 +
 3543             sizeof(' ') + sizeof(core_name) - 1;
 3544         data = malloc(len, M_TEMP, M_WAITOK);
 3545         if (vn_fullpath_global(td, p->p_textvp, &fullpath, &freepath) != 0)
 3546                 goto out;
 3547         if (!coredump_sanitise_path(fullpath))
 3548                 goto out;
 3549         snprintf(data, len, "%s%s ", comm_name, fullpath);
 3550         free(freepath, M_TEMP);
 3551         freepath = NULL;
 3552         if (vn_fullpath_global(td, vp, &fullpath, &freepath) != 0)
 3553                 goto out;
 3554         if (!coredump_sanitise_path(fullpath))
 3555                 goto out;
 3556         strlcat(data, core_name, len);
 3557         strlcat(data, fullpath, len);
 3558         devctl_notify("kernel", "signal", "coredump", data);
 3559 out:
 3560         error1 = vn_close(vp, FWRITE, cred, td);
 3561         if (error == 0)
 3562                 error = error1;
 3563 #ifdef AUDIT
 3564         audit_proc_coredump(td, name, error);
 3565 #endif
 3566         free(freepath, M_TEMP);
 3567         free(data, M_TEMP);
 3568         free(name, M_TEMP);
 3569         return (error);
 3570 }
 3571 
 3572 /*
 3573  * Nonexistent system call-- signal process (may want to handle it).  Flag
 3574  * error in case process won't see signal immediately (blocked or ignored).
 3575  */
 3576 #ifndef _SYS_SYSPROTO_H_
 3577 struct nosys_args {
 3578         int     dummy;
 3579 };
 3580 #endif
 3581 /* ARGSUSED */
 3582 int
 3583 nosys(td, args)
 3584         struct thread *td;
 3585         struct nosys_args *args;
 3586 {
 3587         struct proc *p = td->td_proc;
 3588 
 3589         PROC_LOCK(p);
 3590         tdsignal(td, SIGSYS);
 3591         PROC_UNLOCK(p);
 3592         return (ENOSYS);
 3593 }
 3594 
 3595 /*
 3596  * Send a SIGIO or SIGURG signal to a process or process group using stored
 3597  * credentials rather than those of the current process.
 3598  */
 3599 void
 3600 pgsigio(sigiop, sig, checkctty)
 3601         struct sigio **sigiop;
 3602         int sig, checkctty;
 3603 {
 3604         ksiginfo_t ksi;
 3605         struct sigio *sigio;
 3606 
 3607         ksiginfo_init(&ksi);
 3608         ksi.ksi_signo = sig;
 3609         ksi.ksi_code = SI_KERNEL;
 3610 
 3611         SIGIO_LOCK();
 3612         sigio = *sigiop;
 3613         if (sigio == NULL) {
 3614                 SIGIO_UNLOCK();
 3615                 return;
 3616         }
 3617         if (sigio->sio_pgid > 0) {
 3618                 PROC_LOCK(sigio->sio_proc);
 3619                 if (CANSIGIO(sigio->sio_ucred, sigio->sio_proc->p_ucred))
 3620                         kern_psignal(sigio->sio_proc, sig);
 3621                 PROC_UNLOCK(sigio->sio_proc);
 3622         } else if (sigio->sio_pgid < 0) {
 3623                 struct proc *p;
 3624 
 3625                 PGRP_LOCK(sigio->sio_pgrp);
 3626                 LIST_FOREACH(p, &sigio->sio_pgrp->pg_members, p_pglist) {
 3627                         PROC_LOCK(p);
 3628                         if (p->p_state == PRS_NORMAL &&
 3629                             CANSIGIO(sigio->sio_ucred, p->p_ucred) &&
 3630                             (checkctty == 0 || (p->p_flag & P_CONTROLT)))
 3631                                 kern_psignal(p, sig);
 3632                         PROC_UNLOCK(p);
 3633                 }
 3634                 PGRP_UNLOCK(sigio->sio_pgrp);
 3635         }
 3636         SIGIO_UNLOCK();
 3637 }
 3638 
 3639 static int
 3640 filt_sigattach(struct knote *kn)
 3641 {
 3642         struct proc *p = curproc;
 3643 
 3644         kn->kn_ptr.p_proc = p;
 3645         kn->kn_flags |= EV_CLEAR;               /* automatically set */
 3646 
 3647         knlist_add(p->p_klist, kn, 0);
 3648 
 3649         return (0);
 3650 }
 3651 
 3652 static void
 3653 filt_sigdetach(struct knote *kn)
 3654 {
 3655         struct proc *p = kn->kn_ptr.p_proc;
 3656 
 3657         knlist_remove(p->p_klist, kn, 0);
 3658 }
 3659 
 3660 /*
 3661  * signal knotes are shared with proc knotes, so we apply a mask to
 3662  * the hint in order to differentiate them from process hints.  This
 3663  * could be avoided by using a signal-specific knote list, but probably
 3664  * isn't worth the trouble.
 3665  */
 3666 static int
 3667 filt_signal(struct knote *kn, long hint)
 3668 {
 3669 
 3670         if (hint & NOTE_SIGNAL) {
 3671                 hint &= ~NOTE_SIGNAL;
 3672 
 3673                 if (kn->kn_id == hint)
 3674                         kn->kn_data++;
 3675         }
 3676         return (kn->kn_data != 0);
 3677 }
 3678 
 3679 struct sigacts *
 3680 sigacts_alloc(void)
 3681 {
 3682         struct sigacts *ps;
 3683 
 3684         ps = malloc(sizeof(struct sigacts), M_SUBPROC, M_WAITOK | M_ZERO);
 3685         refcount_init(&ps->ps_refcnt, 1);
 3686         mtx_init(&ps->ps_mtx, "sigacts", NULL, MTX_DEF);
 3687         return (ps);
 3688 }
 3689 
 3690 void
 3691 sigacts_free(struct sigacts *ps)
 3692 {
 3693 
 3694         if (refcount_release(&ps->ps_refcnt) == 0)
 3695                 return;
 3696         mtx_destroy(&ps->ps_mtx);
 3697         free(ps, M_SUBPROC);
 3698 }
 3699 
 3700 struct sigacts *
 3701 sigacts_hold(struct sigacts *ps)
 3702 {
 3703 
 3704         refcount_acquire(&ps->ps_refcnt);
 3705         return (ps);
 3706 }
 3707 
 3708 void
 3709 sigacts_copy(struct sigacts *dest, struct sigacts *src)
 3710 {
 3711 
 3712         KASSERT(dest->ps_refcnt == 1, ("sigacts_copy to shared dest"));
 3713         mtx_lock(&src->ps_mtx);
 3714         bcopy(src, dest, offsetof(struct sigacts, ps_refcnt));
 3715         mtx_unlock(&src->ps_mtx);
 3716 }
 3717 
 3718 int
 3719 sigacts_shared(struct sigacts *ps)
 3720 {
 3721 
 3722         return (ps->ps_refcnt > 1);
 3723 }

Cache object: cf22f54f0d5a1c0146de0ffbd80c6523


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.