The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_sig.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 1982, 1986, 1989, 1991, 1993
    3  *      The Regents of the University of California.  All rights reserved.
    4  * (c) UNIX System Laboratories, Inc.
    5  * All or some portions of this file are derived from material licensed
    6  * to the University of California by American Telephone and Telegraph
    7  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
    8  * the permission of UNIX System Laboratories, Inc.
    9  *
   10  * Redistribution and use in source and binary forms, with or without
   11  * modification, are permitted provided that the following conditions
   12  * are met:
   13  * 1. Redistributions of source code must retain the above copyright
   14  *    notice, this list of conditions and the following disclaimer.
   15  * 2. Redistributions in binary form must reproduce the above copyright
   16  *    notice, this list of conditions and the following disclaimer in the
   17  *    documentation and/or other materials provided with the distribution.
   18  * 4. Neither the name of the University nor the names of its contributors
   19  *    may be used to endorse or promote products derived from this software
   20  *    without specific prior written permission.
   21  *
   22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
   23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   32  * SUCH DAMAGE.
   33  *
   34  *      @(#)kern_sig.c  8.7 (Berkeley) 4/18/94
   35  */
   36 
   37 #include <sys/cdefs.h>
   38 __FBSDID("$FreeBSD: releng/11.0/sys/kern/kern_sig.c 302328 2016-07-03 18:19:48Z kib $");
   39 
   40 #include "opt_compat.h"
   41 #include "opt_gzio.h"
   42 #include "opt_ktrace.h"
   43 
   44 #include <sys/param.h>
   45 #include <sys/ctype.h>
   46 #include <sys/systm.h>
   47 #include <sys/signalvar.h>
   48 #include <sys/vnode.h>
   49 #include <sys/acct.h>
   50 #include <sys/bus.h>
   51 #include <sys/capsicum.h>
   52 #include <sys/condvar.h>
   53 #include <sys/event.h>
   54 #include <sys/fcntl.h>
   55 #include <sys/imgact.h>
   56 #include <sys/kernel.h>
   57 #include <sys/ktr.h>
   58 #include <sys/ktrace.h>
   59 #include <sys/lock.h>
   60 #include <sys/malloc.h>
   61 #include <sys/mutex.h>
   62 #include <sys/refcount.h>
   63 #include <sys/namei.h>
   64 #include <sys/proc.h>
   65 #include <sys/procdesc.h>
   66 #include <sys/posix4.h>
   67 #include <sys/pioctl.h>
   68 #include <sys/racct.h>
   69 #include <sys/resourcevar.h>
   70 #include <sys/sdt.h>
   71 #include <sys/sbuf.h>
   72 #include <sys/sleepqueue.h>
   73 #include <sys/smp.h>
   74 #include <sys/stat.h>
   75 #include <sys/sx.h>
   76 #include <sys/syscallsubr.h>
   77 #include <sys/sysctl.h>
   78 #include <sys/sysent.h>
   79 #include <sys/syslog.h>
   80 #include <sys/sysproto.h>
   81 #include <sys/timers.h>
   82 #include <sys/unistd.h>
   83 #include <sys/wait.h>
   84 #include <vm/vm.h>
   85 #include <vm/vm_extern.h>
   86 #include <vm/uma.h>
   87 
   88 #include <sys/jail.h>
   89 
   90 #include <machine/cpu.h>
   91 
   92 #include <security/audit/audit.h>
   93 
   94 #define ONSIG   32              /* NSIG for osig* syscalls.  XXX. */
   95 
   96 SDT_PROVIDER_DECLARE(proc);
   97 SDT_PROBE_DEFINE3(proc, , , signal__send,
   98     "struct thread *", "struct proc *", "int");
   99 SDT_PROBE_DEFINE2(proc, , , signal__clear,
  100     "int", "ksiginfo_t *");
  101 SDT_PROBE_DEFINE3(proc, , , signal__discard,
  102     "struct thread *", "struct proc *", "int");
  103 
  104 static int      coredump(struct thread *);
  105 static int      killpg1(struct thread *td, int sig, int pgid, int all,
  106                     ksiginfo_t *ksi);
  107 static int      issignal(struct thread *td);
  108 static int      sigprop(int sig);
  109 static void     tdsigwakeup(struct thread *, int, sig_t, int);
  110 static int      sig_suspend_threads(struct thread *, struct proc *, int);
  111 static int      filt_sigattach(struct knote *kn);
  112 static void     filt_sigdetach(struct knote *kn);
  113 static int      filt_signal(struct knote *kn, long hint);
  114 static struct thread *sigtd(struct proc *p, int sig, int prop);
  115 static void     sigqueue_start(void);
  116 
  117 static uma_zone_t       ksiginfo_zone = NULL;
  118 struct filterops sig_filtops = {
  119         .f_isfd = 0,
  120         .f_attach = filt_sigattach,
  121         .f_detach = filt_sigdetach,
  122         .f_event = filt_signal,
  123 };
  124 
  125 static int      kern_logsigexit = 1;
  126 SYSCTL_INT(_kern, KERN_LOGSIGEXIT, logsigexit, CTLFLAG_RW,
  127     &kern_logsigexit, 0,
  128     "Log processes quitting on abnormal signals to syslog(3)");
  129 
  130 static int      kern_forcesigexit = 1;
  131 SYSCTL_INT(_kern, OID_AUTO, forcesigexit, CTLFLAG_RW,
  132     &kern_forcesigexit, 0, "Force trap signal to be handled");
  133 
  134 static SYSCTL_NODE(_kern, OID_AUTO, sigqueue, CTLFLAG_RW, 0,
  135     "POSIX real time signal");
  136 
  137 static int      max_pending_per_proc = 128;
  138 SYSCTL_INT(_kern_sigqueue, OID_AUTO, max_pending_per_proc, CTLFLAG_RW,
  139     &max_pending_per_proc, 0, "Max pending signals per proc");
  140 
  141 static int      preallocate_siginfo = 1024;
  142 SYSCTL_INT(_kern_sigqueue, OID_AUTO, preallocate, CTLFLAG_RDTUN,
  143     &preallocate_siginfo, 0, "Preallocated signal memory size");
  144 
  145 static int      signal_overflow = 0;
  146 SYSCTL_INT(_kern_sigqueue, OID_AUTO, overflow, CTLFLAG_RD,
  147     &signal_overflow, 0, "Number of signals overflew");
  148 
  149 static int      signal_alloc_fail = 0;
  150 SYSCTL_INT(_kern_sigqueue, OID_AUTO, alloc_fail, CTLFLAG_RD,
  151     &signal_alloc_fail, 0, "signals failed to be allocated");
  152 
  153 SYSINIT(signal, SI_SUB_P1003_1B, SI_ORDER_FIRST+3, sigqueue_start, NULL);
  154 
  155 /*
  156  * Policy -- Can ucred cr1 send SIGIO to process cr2?
  157  * Should use cr_cansignal() once cr_cansignal() allows SIGIO and SIGURG
  158  * in the right situations.
  159  */
  160 #define CANSIGIO(cr1, cr2) \
  161         ((cr1)->cr_uid == 0 || \
  162             (cr1)->cr_ruid == (cr2)->cr_ruid || \
  163             (cr1)->cr_uid == (cr2)->cr_ruid || \
  164             (cr1)->cr_ruid == (cr2)->cr_uid || \
  165             (cr1)->cr_uid == (cr2)->cr_uid)
  166 
  167 static int      sugid_coredump;
  168 SYSCTL_INT(_kern, OID_AUTO, sugid_coredump, CTLFLAG_RWTUN,
  169     &sugid_coredump, 0, "Allow setuid and setgid processes to dump core");
  170 
  171 static int      capmode_coredump;
  172 SYSCTL_INT(_kern, OID_AUTO, capmode_coredump, CTLFLAG_RWTUN,
  173     &capmode_coredump, 0, "Allow processes in capability mode to dump core");
  174 
  175 static int      do_coredump = 1;
  176 SYSCTL_INT(_kern, OID_AUTO, coredump, CTLFLAG_RW,
  177         &do_coredump, 0, "Enable/Disable coredumps");
  178 
  179 static int      set_core_nodump_flag = 0;
  180 SYSCTL_INT(_kern, OID_AUTO, nodump_coredump, CTLFLAG_RW, &set_core_nodump_flag,
  181         0, "Enable setting the NODUMP flag on coredump files");
  182 
  183 static int      coredump_devctl = 0;
  184 SYSCTL_INT(_kern, OID_AUTO, coredump_devctl, CTLFLAG_RW, &coredump_devctl,
  185         0, "Generate a devctl notification when processes coredump");
  186 
  187 /*
  188  * Signal properties and actions.
  189  * The array below categorizes the signals and their default actions
  190  * according to the following properties:
  191  */
  192 #define SA_KILL         0x01            /* terminates process by default */
  193 #define SA_CORE         0x02            /* ditto and coredumps */
  194 #define SA_STOP         0x04            /* suspend process */
  195 #define SA_TTYSTOP      0x08            /* ditto, from tty */
  196 #define SA_IGNORE       0x10            /* ignore by default */
  197 #define SA_CONT         0x20            /* continue if suspended */
  198 #define SA_CANTMASK     0x40            /* non-maskable, catchable */
  199 
  200 static int sigproptbl[NSIG] = {
  201         SA_KILL,                        /* SIGHUP */
  202         SA_KILL,                        /* SIGINT */
  203         SA_KILL|SA_CORE,                /* SIGQUIT */
  204         SA_KILL|SA_CORE,                /* SIGILL */
  205         SA_KILL|SA_CORE,                /* SIGTRAP */
  206         SA_KILL|SA_CORE,                /* SIGABRT */
  207         SA_KILL|SA_CORE,                /* SIGEMT */
  208         SA_KILL|SA_CORE,                /* SIGFPE */
  209         SA_KILL,                        /* SIGKILL */
  210         SA_KILL|SA_CORE,                /* SIGBUS */
  211         SA_KILL|SA_CORE,                /* SIGSEGV */
  212         SA_KILL|SA_CORE,                /* SIGSYS */
  213         SA_KILL,                        /* SIGPIPE */
  214         SA_KILL,                        /* SIGALRM */
  215         SA_KILL,                        /* SIGTERM */
  216         SA_IGNORE,                      /* SIGURG */
  217         SA_STOP,                        /* SIGSTOP */
  218         SA_STOP|SA_TTYSTOP,             /* SIGTSTP */
  219         SA_IGNORE|SA_CONT,              /* SIGCONT */
  220         SA_IGNORE,                      /* SIGCHLD */
  221         SA_STOP|SA_TTYSTOP,             /* SIGTTIN */
  222         SA_STOP|SA_TTYSTOP,             /* SIGTTOU */
  223         SA_IGNORE,                      /* SIGIO */
  224         SA_KILL,                        /* SIGXCPU */
  225         SA_KILL,                        /* SIGXFSZ */
  226         SA_KILL,                        /* SIGVTALRM */
  227         SA_KILL,                        /* SIGPROF */
  228         SA_IGNORE,                      /* SIGWINCH  */
  229         SA_IGNORE,                      /* SIGINFO */
  230         SA_KILL,                        /* SIGUSR1 */
  231         SA_KILL,                        /* SIGUSR2 */
  232 };
  233 
  234 static void reschedule_signals(struct proc *p, sigset_t block, int flags);
  235 
  236 static void
  237 sigqueue_start(void)
  238 {
  239         ksiginfo_zone = uma_zcreate("ksiginfo", sizeof(ksiginfo_t),
  240                 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
  241         uma_prealloc(ksiginfo_zone, preallocate_siginfo);
  242         p31b_setcfg(CTL_P1003_1B_REALTIME_SIGNALS, _POSIX_REALTIME_SIGNALS);
  243         p31b_setcfg(CTL_P1003_1B_RTSIG_MAX, SIGRTMAX - SIGRTMIN + 1);
  244         p31b_setcfg(CTL_P1003_1B_SIGQUEUE_MAX, max_pending_per_proc);
  245 }
  246 
  247 ksiginfo_t *
  248 ksiginfo_alloc(int wait)
  249 {
  250         int flags;
  251 
  252         flags = M_ZERO;
  253         if (! wait)
  254                 flags |= M_NOWAIT;
  255         if (ksiginfo_zone != NULL)
  256                 return ((ksiginfo_t *)uma_zalloc(ksiginfo_zone, flags));
  257         return (NULL);
  258 }
  259 
  260 void
  261 ksiginfo_free(ksiginfo_t *ksi)
  262 {
  263         uma_zfree(ksiginfo_zone, ksi);
  264 }
  265 
  266 static __inline int
  267 ksiginfo_tryfree(ksiginfo_t *ksi)
  268 {
  269         if (!(ksi->ksi_flags & KSI_EXT)) {
  270                 uma_zfree(ksiginfo_zone, ksi);
  271                 return (1);
  272         }
  273         return (0);
  274 }
  275 
  276 void
  277 sigqueue_init(sigqueue_t *list, struct proc *p)
  278 {
  279         SIGEMPTYSET(list->sq_signals);
  280         SIGEMPTYSET(list->sq_kill);
  281         TAILQ_INIT(&list->sq_list);
  282         list->sq_proc = p;
  283         list->sq_flags = SQ_INIT;
  284 }
  285 
  286 /*
  287  * Get a signal's ksiginfo.
  288  * Return:
  289  *      0       -       signal not found
  290  *      others  -       signal number
  291  */
  292 static int
  293 sigqueue_get(sigqueue_t *sq, int signo, ksiginfo_t *si)
  294 {
  295         struct proc *p = sq->sq_proc;
  296         struct ksiginfo *ksi, *next;
  297         int count = 0;
  298 
  299         KASSERT(sq->sq_flags & SQ_INIT, ("sigqueue not inited"));
  300 
  301         if (!SIGISMEMBER(sq->sq_signals, signo))
  302                 return (0);
  303 
  304         if (SIGISMEMBER(sq->sq_kill, signo)) {
  305                 count++;
  306                 SIGDELSET(sq->sq_kill, signo);
  307         }
  308 
  309         TAILQ_FOREACH_SAFE(ksi, &sq->sq_list, ksi_link, next) {
  310                 if (ksi->ksi_signo == signo) {
  311                         if (count == 0) {
  312                                 TAILQ_REMOVE(&sq->sq_list, ksi, ksi_link);
  313                                 ksi->ksi_sigq = NULL;
  314                                 ksiginfo_copy(ksi, si);
  315                                 if (ksiginfo_tryfree(ksi) && p != NULL)
  316                                         p->p_pendingcnt--;
  317                         }
  318                         if (++count > 1)
  319                                 break;
  320                 }
  321         }
  322 
  323         if (count <= 1)
  324                 SIGDELSET(sq->sq_signals, signo);
  325         si->ksi_signo = signo;
  326         return (signo);
  327 }
  328 
  329 void
  330 sigqueue_take(ksiginfo_t *ksi)
  331 {
  332         struct ksiginfo *kp;
  333         struct proc     *p;
  334         sigqueue_t      *sq;
  335 
  336         if (ksi == NULL || (sq = ksi->ksi_sigq) == NULL)
  337                 return;
  338 
  339         p = sq->sq_proc;
  340         TAILQ_REMOVE(&sq->sq_list, ksi, ksi_link);
  341         ksi->ksi_sigq = NULL;
  342         if (!(ksi->ksi_flags & KSI_EXT) && p != NULL)
  343                 p->p_pendingcnt--;
  344 
  345         for (kp = TAILQ_FIRST(&sq->sq_list); kp != NULL;
  346              kp = TAILQ_NEXT(kp, ksi_link)) {
  347                 if (kp->ksi_signo == ksi->ksi_signo)
  348                         break;
  349         }
  350         if (kp == NULL && !SIGISMEMBER(sq->sq_kill, ksi->ksi_signo))
  351                 SIGDELSET(sq->sq_signals, ksi->ksi_signo);
  352 }
  353 
  354 static int
  355 sigqueue_add(sigqueue_t *sq, int signo, ksiginfo_t *si)
  356 {
  357         struct proc *p = sq->sq_proc;
  358         struct ksiginfo *ksi;
  359         int ret = 0;
  360 
  361         KASSERT(sq->sq_flags & SQ_INIT, ("sigqueue not inited"));
  362 
  363         if (signo == SIGKILL || signo == SIGSTOP || si == NULL) {
  364                 SIGADDSET(sq->sq_kill, signo);
  365                 goto out_set_bit;
  366         }
  367 
  368         /* directly insert the ksi, don't copy it */
  369         if (si->ksi_flags & KSI_INS) {
  370                 if (si->ksi_flags & KSI_HEAD)
  371                         TAILQ_INSERT_HEAD(&sq->sq_list, si, ksi_link);
  372                 else
  373                         TAILQ_INSERT_TAIL(&sq->sq_list, si, ksi_link);
  374                 si->ksi_sigq = sq;
  375                 goto out_set_bit;
  376         }
  377 
  378         if (__predict_false(ksiginfo_zone == NULL)) {
  379                 SIGADDSET(sq->sq_kill, signo);
  380                 goto out_set_bit;
  381         }
  382 
  383         if (p != NULL && p->p_pendingcnt >= max_pending_per_proc) {
  384                 signal_overflow++;
  385                 ret = EAGAIN;
  386         } else if ((ksi = ksiginfo_alloc(0)) == NULL) {
  387                 signal_alloc_fail++;
  388                 ret = EAGAIN;
  389         } else {
  390                 if (p != NULL)
  391                         p->p_pendingcnt++;
  392                 ksiginfo_copy(si, ksi);
  393                 ksi->ksi_signo = signo;
  394                 if (si->ksi_flags & KSI_HEAD)
  395                         TAILQ_INSERT_HEAD(&sq->sq_list, ksi, ksi_link);
  396                 else
  397                         TAILQ_INSERT_TAIL(&sq->sq_list, ksi, ksi_link);
  398                 ksi->ksi_sigq = sq;
  399         }
  400 
  401         if ((si->ksi_flags & KSI_TRAP) != 0 ||
  402             (si->ksi_flags & KSI_SIGQ) == 0) {
  403                 if (ret != 0)
  404                         SIGADDSET(sq->sq_kill, signo);
  405                 ret = 0;
  406                 goto out_set_bit;
  407         }
  408 
  409         if (ret != 0)
  410                 return (ret);
  411 
  412 out_set_bit:
  413         SIGADDSET(sq->sq_signals, signo);
  414         return (ret);
  415 }
  416 
  417 void
  418 sigqueue_flush(sigqueue_t *sq)
  419 {
  420         struct proc *p = sq->sq_proc;
  421         ksiginfo_t *ksi;
  422 
  423         KASSERT(sq->sq_flags & SQ_INIT, ("sigqueue not inited"));
  424 
  425         if (p != NULL)
  426                 PROC_LOCK_ASSERT(p, MA_OWNED);
  427 
  428         while ((ksi = TAILQ_FIRST(&sq->sq_list)) != NULL) {
  429                 TAILQ_REMOVE(&sq->sq_list, ksi, ksi_link);
  430                 ksi->ksi_sigq = NULL;
  431                 if (ksiginfo_tryfree(ksi) && p != NULL)
  432                         p->p_pendingcnt--;
  433         }
  434 
  435         SIGEMPTYSET(sq->sq_signals);
  436         SIGEMPTYSET(sq->sq_kill);
  437 }
  438 
  439 static void
  440 sigqueue_move_set(sigqueue_t *src, sigqueue_t *dst, const sigset_t *set)
  441 {
  442         sigset_t tmp;
  443         struct proc *p1, *p2;
  444         ksiginfo_t *ksi, *next;
  445 
  446         KASSERT(src->sq_flags & SQ_INIT, ("src sigqueue not inited"));
  447         KASSERT(dst->sq_flags & SQ_INIT, ("dst sigqueue not inited"));
  448         p1 = src->sq_proc;
  449         p2 = dst->sq_proc;
  450         /* Move siginfo to target list */
  451         TAILQ_FOREACH_SAFE(ksi, &src->sq_list, ksi_link, next) {
  452                 if (SIGISMEMBER(*set, ksi->ksi_signo)) {
  453                         TAILQ_REMOVE(&src->sq_list, ksi, ksi_link);
  454                         if (p1 != NULL)
  455                                 p1->p_pendingcnt--;
  456                         TAILQ_INSERT_TAIL(&dst->sq_list, ksi, ksi_link);
  457                         ksi->ksi_sigq = dst;
  458                         if (p2 != NULL)
  459                                 p2->p_pendingcnt++;
  460                 }
  461         }
  462 
  463         /* Move pending bits to target list */
  464         tmp = src->sq_kill;
  465         SIGSETAND(tmp, *set);
  466         SIGSETOR(dst->sq_kill, tmp);
  467         SIGSETNAND(src->sq_kill, tmp);
  468 
  469         tmp = src->sq_signals;
  470         SIGSETAND(tmp, *set);
  471         SIGSETOR(dst->sq_signals, tmp);
  472         SIGSETNAND(src->sq_signals, tmp);
  473 }
  474 
  475 #if 0
  476 static void
  477 sigqueue_move(sigqueue_t *src, sigqueue_t *dst, int signo)
  478 {
  479         sigset_t set;
  480 
  481         SIGEMPTYSET(set);
  482         SIGADDSET(set, signo);
  483         sigqueue_move_set(src, dst, &set);
  484 }
  485 #endif
  486 
  487 static void
  488 sigqueue_delete_set(sigqueue_t *sq, const sigset_t *set)
  489 {
  490         struct proc *p = sq->sq_proc;
  491         ksiginfo_t *ksi, *next;
  492 
  493         KASSERT(sq->sq_flags & SQ_INIT, ("src sigqueue not inited"));
  494 
  495         /* Remove siginfo queue */
  496         TAILQ_FOREACH_SAFE(ksi, &sq->sq_list, ksi_link, next) {
  497                 if (SIGISMEMBER(*set, ksi->ksi_signo)) {
  498                         TAILQ_REMOVE(&sq->sq_list, ksi, ksi_link);
  499                         ksi->ksi_sigq = NULL;
  500                         if (ksiginfo_tryfree(ksi) && p != NULL)
  501                                 p->p_pendingcnt--;
  502                 }
  503         }
  504         SIGSETNAND(sq->sq_kill, *set);
  505         SIGSETNAND(sq->sq_signals, *set);
  506 }
  507 
  508 void
  509 sigqueue_delete(sigqueue_t *sq, int signo)
  510 {
  511         sigset_t set;
  512 
  513         SIGEMPTYSET(set);
  514         SIGADDSET(set, signo);
  515         sigqueue_delete_set(sq, &set);
  516 }
  517 
  518 /* Remove a set of signals for a process */
  519 static void
  520 sigqueue_delete_set_proc(struct proc *p, const sigset_t *set)
  521 {
  522         sigqueue_t worklist;
  523         struct thread *td0;
  524 
  525         PROC_LOCK_ASSERT(p, MA_OWNED);
  526 
  527         sigqueue_init(&worklist, NULL);
  528         sigqueue_move_set(&p->p_sigqueue, &worklist, set);
  529 
  530         FOREACH_THREAD_IN_PROC(p, td0)
  531                 sigqueue_move_set(&td0->td_sigqueue, &worklist, set);
  532 
  533         sigqueue_flush(&worklist);
  534 }
  535 
  536 void
  537 sigqueue_delete_proc(struct proc *p, int signo)
  538 {
  539         sigset_t set;
  540 
  541         SIGEMPTYSET(set);
  542         SIGADDSET(set, signo);
  543         sigqueue_delete_set_proc(p, &set);
  544 }
  545 
  546 static void
  547 sigqueue_delete_stopmask_proc(struct proc *p)
  548 {
  549         sigset_t set;
  550 
  551         SIGEMPTYSET(set);
  552         SIGADDSET(set, SIGSTOP);
  553         SIGADDSET(set, SIGTSTP);
  554         SIGADDSET(set, SIGTTIN);
  555         SIGADDSET(set, SIGTTOU);
  556         sigqueue_delete_set_proc(p, &set);
  557 }
  558 
  559 /*
  560  * Determine signal that should be delivered to thread td, the current
  561  * thread, 0 if none.  If there is a pending stop signal with default
  562  * action, the process stops in issignal().
  563  */
  564 int
  565 cursig(struct thread *td)
  566 {
  567         PROC_LOCK_ASSERT(td->td_proc, MA_OWNED);
  568         mtx_assert(&td->td_proc->p_sigacts->ps_mtx, MA_OWNED);
  569         THREAD_LOCK_ASSERT(td, MA_NOTOWNED);
  570         return (SIGPENDING(td) ? issignal(td) : 0);
  571 }
  572 
  573 /*
  574  * Arrange for ast() to handle unmasked pending signals on return to user
  575  * mode.  This must be called whenever a signal is added to td_sigqueue or
  576  * unmasked in td_sigmask.
  577  */
  578 void
  579 signotify(struct thread *td)
  580 {
  581         struct proc *p;
  582 
  583         p = td->td_proc;
  584 
  585         PROC_LOCK_ASSERT(p, MA_OWNED);
  586 
  587         if (SIGPENDING(td)) {
  588                 thread_lock(td);
  589                 td->td_flags |= TDF_NEEDSIGCHK | TDF_ASTPENDING;
  590                 thread_unlock(td);
  591         }
  592 }
  593 
  594 int
  595 sigonstack(size_t sp)
  596 {
  597         struct thread *td = curthread;
  598 
  599         return ((td->td_pflags & TDP_ALTSTACK) ?
  600 #if defined(COMPAT_43)
  601             ((td->td_sigstk.ss_size == 0) ?
  602                 (td->td_sigstk.ss_flags & SS_ONSTACK) :
  603                 ((sp - (size_t)td->td_sigstk.ss_sp) < td->td_sigstk.ss_size))
  604 #else
  605             ((sp - (size_t)td->td_sigstk.ss_sp) < td->td_sigstk.ss_size)
  606 #endif
  607             : 0);
  608 }
  609 
  610 static __inline int
  611 sigprop(int sig)
  612 {
  613 
  614         if (sig > 0 && sig < NSIG)
  615                 return (sigproptbl[_SIG_IDX(sig)]);
  616         return (0);
  617 }
  618 
  619 int
  620 sig_ffs(sigset_t *set)
  621 {
  622         int i;
  623 
  624         for (i = 0; i < _SIG_WORDS; i++)
  625                 if (set->__bits[i])
  626                         return (ffs(set->__bits[i]) + (i * 32));
  627         return (0);
  628 }
  629 
  630 static bool
  631 sigact_flag_test(const struct sigaction *act, int flag)
  632 {
  633 
  634         /*
  635          * SA_SIGINFO is reset when signal disposition is set to
  636          * ignore or default.  Other flags are kept according to user
  637          * settings.
  638          */
  639         return ((act->sa_flags & flag) != 0 && (flag != SA_SIGINFO ||
  640             ((__sighandler_t *)act->sa_sigaction != SIG_IGN &&
  641             (__sighandler_t *)act->sa_sigaction != SIG_DFL)));
  642 }
  643 
  644 /*
  645  * kern_sigaction
  646  * sigaction
  647  * freebsd4_sigaction
  648  * osigaction
  649  */
  650 int
  651 kern_sigaction(struct thread *td, int sig, const struct sigaction *act,
  652     struct sigaction *oact, int flags)
  653 {
  654         struct sigacts *ps;
  655         struct proc *p = td->td_proc;
  656 
  657         if (!_SIG_VALID(sig))
  658                 return (EINVAL);
  659         if (act != NULL && act->sa_handler != SIG_DFL &&
  660             act->sa_handler != SIG_IGN && (act->sa_flags & ~(SA_ONSTACK |
  661             SA_RESTART | SA_RESETHAND | SA_NOCLDSTOP | SA_NODEFER |
  662             SA_NOCLDWAIT | SA_SIGINFO)) != 0)
  663                 return (EINVAL);
  664 
  665         PROC_LOCK(p);
  666         ps = p->p_sigacts;
  667         mtx_lock(&ps->ps_mtx);
  668         if (oact) {
  669                 oact->sa_mask = ps->ps_catchmask[_SIG_IDX(sig)];
  670                 oact->sa_flags = 0;
  671                 if (SIGISMEMBER(ps->ps_sigonstack, sig))
  672                         oact->sa_flags |= SA_ONSTACK;
  673                 if (!SIGISMEMBER(ps->ps_sigintr, sig))
  674                         oact->sa_flags |= SA_RESTART;
  675                 if (SIGISMEMBER(ps->ps_sigreset, sig))
  676                         oact->sa_flags |= SA_RESETHAND;
  677                 if (SIGISMEMBER(ps->ps_signodefer, sig))
  678                         oact->sa_flags |= SA_NODEFER;
  679                 if (SIGISMEMBER(ps->ps_siginfo, sig)) {
  680                         oact->sa_flags |= SA_SIGINFO;
  681                         oact->sa_sigaction =
  682                             (__siginfohandler_t *)ps->ps_sigact[_SIG_IDX(sig)];
  683                 } else
  684                         oact->sa_handler = ps->ps_sigact[_SIG_IDX(sig)];
  685                 if (sig == SIGCHLD && ps->ps_flag & PS_NOCLDSTOP)
  686                         oact->sa_flags |= SA_NOCLDSTOP;
  687                 if (sig == SIGCHLD && ps->ps_flag & PS_NOCLDWAIT)
  688                         oact->sa_flags |= SA_NOCLDWAIT;
  689         }
  690         if (act) {
  691                 if ((sig == SIGKILL || sig == SIGSTOP) &&
  692                     act->sa_handler != SIG_DFL) {
  693                         mtx_unlock(&ps->ps_mtx);
  694                         PROC_UNLOCK(p);
  695                         return (EINVAL);
  696                 }
  697 
  698                 /*
  699                  * Change setting atomically.
  700                  */
  701 
  702                 ps->ps_catchmask[_SIG_IDX(sig)] = act->sa_mask;
  703                 SIG_CANTMASK(ps->ps_catchmask[_SIG_IDX(sig)]);
  704                 if (sigact_flag_test(act, SA_SIGINFO)) {
  705                         ps->ps_sigact[_SIG_IDX(sig)] =
  706                             (__sighandler_t *)act->sa_sigaction;
  707                         SIGADDSET(ps->ps_siginfo, sig);
  708                 } else {
  709                         ps->ps_sigact[_SIG_IDX(sig)] = act->sa_handler;
  710                         SIGDELSET(ps->ps_siginfo, sig);
  711                 }
  712                 if (!sigact_flag_test(act, SA_RESTART))
  713                         SIGADDSET(ps->ps_sigintr, sig);
  714                 else
  715                         SIGDELSET(ps->ps_sigintr, sig);
  716                 if (sigact_flag_test(act, SA_ONSTACK))
  717                         SIGADDSET(ps->ps_sigonstack, sig);
  718                 else
  719                         SIGDELSET(ps->ps_sigonstack, sig);
  720                 if (sigact_flag_test(act, SA_RESETHAND))
  721                         SIGADDSET(ps->ps_sigreset, sig);
  722                 else
  723                         SIGDELSET(ps->ps_sigreset, sig);
  724                 if (sigact_flag_test(act, SA_NODEFER))
  725                         SIGADDSET(ps->ps_signodefer, sig);
  726                 else
  727                         SIGDELSET(ps->ps_signodefer, sig);
  728                 if (sig == SIGCHLD) {
  729                         if (act->sa_flags & SA_NOCLDSTOP)
  730                                 ps->ps_flag |= PS_NOCLDSTOP;
  731                         else
  732                                 ps->ps_flag &= ~PS_NOCLDSTOP;
  733                         if (act->sa_flags & SA_NOCLDWAIT) {
  734                                 /*
  735                                  * Paranoia: since SA_NOCLDWAIT is implemented
  736                                  * by reparenting the dying child to PID 1 (and
  737                                  * trust it to reap the zombie), PID 1 itself
  738                                  * is forbidden to set SA_NOCLDWAIT.
  739                                  */
  740                                 if (p->p_pid == 1)
  741                                         ps->ps_flag &= ~PS_NOCLDWAIT;
  742                                 else
  743                                         ps->ps_flag |= PS_NOCLDWAIT;
  744                         } else
  745                                 ps->ps_flag &= ~PS_NOCLDWAIT;
  746                         if (ps->ps_sigact[_SIG_IDX(SIGCHLD)] == SIG_IGN)
  747                                 ps->ps_flag |= PS_CLDSIGIGN;
  748                         else
  749                                 ps->ps_flag &= ~PS_CLDSIGIGN;
  750                 }
  751                 /*
  752                  * Set bit in ps_sigignore for signals that are set to SIG_IGN,
  753                  * and for signals set to SIG_DFL where the default is to
  754                  * ignore. However, don't put SIGCONT in ps_sigignore, as we
  755                  * have to restart the process.
  756                  */
  757                 if (ps->ps_sigact[_SIG_IDX(sig)] == SIG_IGN ||
  758                     (sigprop(sig) & SA_IGNORE &&
  759                      ps->ps_sigact[_SIG_IDX(sig)] == SIG_DFL)) {
  760                         /* never to be seen again */
  761                         sigqueue_delete_proc(p, sig);
  762                         if (sig != SIGCONT)
  763                                 /* easier in psignal */
  764                                 SIGADDSET(ps->ps_sigignore, sig);
  765                         SIGDELSET(ps->ps_sigcatch, sig);
  766                 } else {
  767                         SIGDELSET(ps->ps_sigignore, sig);
  768                         if (ps->ps_sigact[_SIG_IDX(sig)] == SIG_DFL)
  769                                 SIGDELSET(ps->ps_sigcatch, sig);
  770                         else
  771                                 SIGADDSET(ps->ps_sigcatch, sig);
  772                 }
  773 #ifdef COMPAT_FREEBSD4
  774                 if (ps->ps_sigact[_SIG_IDX(sig)] == SIG_IGN ||
  775                     ps->ps_sigact[_SIG_IDX(sig)] == SIG_DFL ||
  776                     (flags & KSA_FREEBSD4) == 0)
  777                         SIGDELSET(ps->ps_freebsd4, sig);
  778                 else
  779                         SIGADDSET(ps->ps_freebsd4, sig);
  780 #endif
  781 #ifdef COMPAT_43
  782                 if (ps->ps_sigact[_SIG_IDX(sig)] == SIG_IGN ||
  783                     ps->ps_sigact[_SIG_IDX(sig)] == SIG_DFL ||
  784                     (flags & KSA_OSIGSET) == 0)
  785                         SIGDELSET(ps->ps_osigset, sig);
  786                 else
  787                         SIGADDSET(ps->ps_osigset, sig);
  788 #endif
  789         }
  790         mtx_unlock(&ps->ps_mtx);
  791         PROC_UNLOCK(p);
  792         return (0);
  793 }
  794 
  795 #ifndef _SYS_SYSPROTO_H_
  796 struct sigaction_args {
  797         int     sig;
  798         struct  sigaction *act;
  799         struct  sigaction *oact;
  800 };
  801 #endif
  802 int
  803 sys_sigaction(td, uap)
  804         struct thread *td;
  805         register struct sigaction_args *uap;
  806 {
  807         struct sigaction act, oact;
  808         register struct sigaction *actp, *oactp;
  809         int error;
  810 
  811         actp = (uap->act != NULL) ? &act : NULL;
  812         oactp = (uap->oact != NULL) ? &oact : NULL;
  813         if (actp) {
  814                 error = copyin(uap->act, actp, sizeof(act));
  815                 if (error)
  816                         return (error);
  817         }
  818         error = kern_sigaction(td, uap->sig, actp, oactp, 0);
  819         if (oactp && !error)
  820                 error = copyout(oactp, uap->oact, sizeof(oact));
  821         return (error);
  822 }
  823 
  824 #ifdef COMPAT_FREEBSD4
  825 #ifndef _SYS_SYSPROTO_H_
  826 struct freebsd4_sigaction_args {
  827         int     sig;
  828         struct  sigaction *act;
  829         struct  sigaction *oact;
  830 };
  831 #endif
  832 int
  833 freebsd4_sigaction(td, uap)
  834         struct thread *td;
  835         register struct freebsd4_sigaction_args *uap;
  836 {
  837         struct sigaction act, oact;
  838         register struct sigaction *actp, *oactp;
  839         int error;
  840 
  841 
  842         actp = (uap->act != NULL) ? &act : NULL;
  843         oactp = (uap->oact != NULL) ? &oact : NULL;
  844         if (actp) {
  845                 error = copyin(uap->act, actp, sizeof(act));
  846                 if (error)
  847                         return (error);
  848         }
  849         error = kern_sigaction(td, uap->sig, actp, oactp, KSA_FREEBSD4);
  850         if (oactp && !error)
  851                 error = copyout(oactp, uap->oact, sizeof(oact));
  852         return (error);
  853 }
  854 #endif  /* COMAPT_FREEBSD4 */
  855 
  856 #ifdef COMPAT_43        /* XXX - COMPAT_FBSD3 */
  857 #ifndef _SYS_SYSPROTO_H_
  858 struct osigaction_args {
  859         int     signum;
  860         struct  osigaction *nsa;
  861         struct  osigaction *osa;
  862 };
  863 #endif
  864 int
  865 osigaction(td, uap)
  866         struct thread *td;
  867         register struct osigaction_args *uap;
  868 {
  869         struct osigaction sa;
  870         struct sigaction nsa, osa;
  871         register struct sigaction *nsap, *osap;
  872         int error;
  873 
  874         if (uap->signum <= 0 || uap->signum >= ONSIG)
  875                 return (EINVAL);
  876 
  877         nsap = (uap->nsa != NULL) ? &nsa : NULL;
  878         osap = (uap->osa != NULL) ? &osa : NULL;
  879 
  880         if (nsap) {
  881                 error = copyin(uap->nsa, &sa, sizeof(sa));
  882                 if (error)
  883                         return (error);
  884                 nsap->sa_handler = sa.sa_handler;
  885                 nsap->sa_flags = sa.sa_flags;
  886                 OSIG2SIG(sa.sa_mask, nsap->sa_mask);
  887         }
  888         error = kern_sigaction(td, uap->signum, nsap, osap, KSA_OSIGSET);
  889         if (osap && !error) {
  890                 sa.sa_handler = osap->sa_handler;
  891                 sa.sa_flags = osap->sa_flags;
  892                 SIG2OSIG(osap->sa_mask, sa.sa_mask);
  893                 error = copyout(&sa, uap->osa, sizeof(sa));
  894         }
  895         return (error);
  896 }
  897 
  898 #if !defined(__i386__)
  899 /* Avoid replicating the same stub everywhere */
  900 int
  901 osigreturn(td, uap)
  902         struct thread *td;
  903         struct osigreturn_args *uap;
  904 {
  905 
  906         return (nosys(td, (struct nosys_args *)uap));
  907 }
  908 #endif
  909 #endif /* COMPAT_43 */
  910 
  911 /*
  912  * Initialize signal state for process 0;
  913  * set to ignore signals that are ignored by default.
  914  */
  915 void
  916 siginit(p)
  917         struct proc *p;
  918 {
  919         register int i;
  920         struct sigacts *ps;
  921 
  922         PROC_LOCK(p);
  923         ps = p->p_sigacts;
  924         mtx_lock(&ps->ps_mtx);
  925         for (i = 1; i <= NSIG; i++) {
  926                 if (sigprop(i) & SA_IGNORE && i != SIGCONT) {
  927                         SIGADDSET(ps->ps_sigignore, i);
  928                 }
  929         }
  930         mtx_unlock(&ps->ps_mtx);
  931         PROC_UNLOCK(p);
  932 }
  933 
  934 /*
  935  * Reset specified signal to the default disposition.
  936  */
  937 static void
  938 sigdflt(struct sigacts *ps, int sig)
  939 {
  940 
  941         mtx_assert(&ps->ps_mtx, MA_OWNED);
  942         SIGDELSET(ps->ps_sigcatch, sig);
  943         if ((sigprop(sig) & SA_IGNORE) != 0 && sig != SIGCONT)
  944                 SIGADDSET(ps->ps_sigignore, sig);
  945         ps->ps_sigact[_SIG_IDX(sig)] = SIG_DFL;
  946         SIGDELSET(ps->ps_siginfo, sig);
  947 }
  948 
  949 /*
  950  * Reset signals for an exec of the specified process.
  951  */
  952 void
  953 execsigs(struct proc *p)
  954 {
  955         sigset_t osigignore;
  956         struct sigacts *ps;
  957         int sig;
  958         struct thread *td;
  959 
  960         /*
  961          * Reset caught signals.  Held signals remain held
  962          * through td_sigmask (unless they were caught,
  963          * and are now ignored by default).
  964          */
  965         PROC_LOCK_ASSERT(p, MA_OWNED);
  966         td = FIRST_THREAD_IN_PROC(p);
  967         ps = p->p_sigacts;
  968         mtx_lock(&ps->ps_mtx);
  969         while (SIGNOTEMPTY(ps->ps_sigcatch)) {
  970                 sig = sig_ffs(&ps->ps_sigcatch);
  971                 sigdflt(ps, sig);
  972                 if ((sigprop(sig) & SA_IGNORE) != 0)
  973                         sigqueue_delete_proc(p, sig);
  974         }
  975 
  976         /*
  977          * As CloudABI processes cannot modify signal handlers, fully
  978          * reset all signals to their default behavior. Do ignore
  979          * SIGPIPE, as it would otherwise be impossible to recover from
  980          * writes to broken pipes and sockets.
  981          */
  982         if (SV_PROC_ABI(p) == SV_ABI_CLOUDABI) {
  983                 osigignore = ps->ps_sigignore;
  984                 while (SIGNOTEMPTY(osigignore)) {
  985                         sig = sig_ffs(&osigignore);
  986                         SIGDELSET(osigignore, sig);
  987                         if (sig != SIGPIPE)
  988                                 sigdflt(ps, sig);
  989                 }
  990                 SIGADDSET(ps->ps_sigignore, SIGPIPE);
  991         }
  992 
  993         /*
  994          * Reset stack state to the user stack.
  995          * Clear set of signals caught on the signal stack.
  996          */
  997         td->td_sigstk.ss_flags = SS_DISABLE;
  998         td->td_sigstk.ss_size = 0;
  999         td->td_sigstk.ss_sp = 0;
 1000         td->td_pflags &= ~TDP_ALTSTACK;
 1001         /*
 1002          * Reset no zombies if child dies flag as Solaris does.
 1003          */
 1004         ps->ps_flag &= ~(PS_NOCLDWAIT | PS_CLDSIGIGN);
 1005         if (ps->ps_sigact[_SIG_IDX(SIGCHLD)] == SIG_IGN)
 1006                 ps->ps_sigact[_SIG_IDX(SIGCHLD)] = SIG_DFL;
 1007         mtx_unlock(&ps->ps_mtx);
 1008 }
 1009 
 1010 /*
 1011  * kern_sigprocmask()
 1012  *
 1013  *      Manipulate signal mask.
 1014  */
 1015 int
 1016 kern_sigprocmask(struct thread *td, int how, sigset_t *set, sigset_t *oset,
 1017     int flags)
 1018 {
 1019         sigset_t new_block, oset1;
 1020         struct proc *p;
 1021         int error;
 1022 
 1023         p = td->td_proc;
 1024         if ((flags & SIGPROCMASK_PROC_LOCKED) != 0)
 1025                 PROC_LOCK_ASSERT(p, MA_OWNED);
 1026         else
 1027                 PROC_LOCK(p);
 1028         mtx_assert(&p->p_sigacts->ps_mtx, (flags & SIGPROCMASK_PS_LOCKED) != 0
 1029             ? MA_OWNED : MA_NOTOWNED);
 1030         if (oset != NULL)
 1031                 *oset = td->td_sigmask;
 1032 
 1033         error = 0;
 1034         if (set != NULL) {
 1035                 switch (how) {
 1036                 case SIG_BLOCK:
 1037                         SIG_CANTMASK(*set);
 1038                         oset1 = td->td_sigmask;
 1039                         SIGSETOR(td->td_sigmask, *set);
 1040                         new_block = td->td_sigmask;
 1041                         SIGSETNAND(new_block, oset1);
 1042                         break;
 1043                 case SIG_UNBLOCK:
 1044                         SIGSETNAND(td->td_sigmask, *set);
 1045                         signotify(td);
 1046                         goto out;
 1047                 case SIG_SETMASK:
 1048                         SIG_CANTMASK(*set);
 1049                         oset1 = td->td_sigmask;
 1050                         if (flags & SIGPROCMASK_OLD)
 1051                                 SIGSETLO(td->td_sigmask, *set);
 1052                         else
 1053                                 td->td_sigmask = *set;
 1054                         new_block = td->td_sigmask;
 1055                         SIGSETNAND(new_block, oset1);
 1056                         signotify(td);
 1057                         break;
 1058                 default:
 1059                         error = EINVAL;
 1060                         goto out;
 1061                 }
 1062 
 1063                 /*
 1064                  * The new_block set contains signals that were not previously
 1065                  * blocked, but are blocked now.
 1066                  *
 1067                  * In case we block any signal that was not previously blocked
 1068                  * for td, and process has the signal pending, try to schedule
 1069                  * signal delivery to some thread that does not block the
 1070                  * signal, possibly waking it up.
 1071                  */
 1072                 if (p->p_numthreads != 1)
 1073                         reschedule_signals(p, new_block, flags);
 1074         }
 1075 
 1076 out:
 1077         if (!(flags & SIGPROCMASK_PROC_LOCKED))
 1078                 PROC_UNLOCK(p);
 1079         return (error);
 1080 }
 1081 
 1082 #ifndef _SYS_SYSPROTO_H_
 1083 struct sigprocmask_args {
 1084         int     how;
 1085         const sigset_t *set;
 1086         sigset_t *oset;
 1087 };
 1088 #endif
 1089 int
 1090 sys_sigprocmask(td, uap)
 1091         register struct thread *td;
 1092         struct sigprocmask_args *uap;
 1093 {
 1094         sigset_t set, oset;
 1095         sigset_t *setp, *osetp;
 1096         int error;
 1097 
 1098         setp = (uap->set != NULL) ? &set : NULL;
 1099         osetp = (uap->oset != NULL) ? &oset : NULL;
 1100         if (setp) {
 1101                 error = copyin(uap->set, setp, sizeof(set));
 1102                 if (error)
 1103                         return (error);
 1104         }
 1105         error = kern_sigprocmask(td, uap->how, setp, osetp, 0);
 1106         if (osetp && !error) {
 1107                 error = copyout(osetp, uap->oset, sizeof(oset));
 1108         }
 1109         return (error);
 1110 }
 1111 
 1112 #ifdef COMPAT_43        /* XXX - COMPAT_FBSD3 */
 1113 #ifndef _SYS_SYSPROTO_H_
 1114 struct osigprocmask_args {
 1115         int     how;
 1116         osigset_t mask;
 1117 };
 1118 #endif
 1119 int
 1120 osigprocmask(td, uap)
 1121         register struct thread *td;
 1122         struct osigprocmask_args *uap;
 1123 {
 1124         sigset_t set, oset;
 1125         int error;
 1126 
 1127         OSIG2SIG(uap->mask, set);
 1128         error = kern_sigprocmask(td, uap->how, &set, &oset, 1);
 1129         SIG2OSIG(oset, td->td_retval[0]);
 1130         return (error);
 1131 }
 1132 #endif /* COMPAT_43 */
 1133 
 1134 int
 1135 sys_sigwait(struct thread *td, struct sigwait_args *uap)
 1136 {
 1137         ksiginfo_t ksi;
 1138         sigset_t set;
 1139         int error;
 1140 
 1141         error = copyin(uap->set, &set, sizeof(set));
 1142         if (error) {
 1143                 td->td_retval[0] = error;
 1144                 return (0);
 1145         }
 1146 
 1147         error = kern_sigtimedwait(td, set, &ksi, NULL);
 1148         if (error) {
 1149                 if (error == EINTR && td->td_proc->p_osrel < P_OSREL_SIGWAIT)
 1150                         error = ERESTART;
 1151                 if (error == ERESTART)
 1152                         return (error);
 1153                 td->td_retval[0] = error;
 1154                 return (0);
 1155         }
 1156 
 1157         error = copyout(&ksi.ksi_signo, uap->sig, sizeof(ksi.ksi_signo));
 1158         td->td_retval[0] = error;
 1159         return (0);
 1160 }
 1161 
 1162 int
 1163 sys_sigtimedwait(struct thread *td, struct sigtimedwait_args *uap)
 1164 {
 1165         struct timespec ts;
 1166         struct timespec *timeout;
 1167         sigset_t set;
 1168         ksiginfo_t ksi;
 1169         int error;
 1170 
 1171         if (uap->timeout) {
 1172                 error = copyin(uap->timeout, &ts, sizeof(ts));
 1173                 if (error)
 1174                         return (error);
 1175 
 1176                 timeout = &ts;
 1177         } else
 1178                 timeout = NULL;
 1179 
 1180         error = copyin(uap->set, &set, sizeof(set));
 1181         if (error)
 1182                 return (error);
 1183 
 1184         error = kern_sigtimedwait(td, set, &ksi, timeout);
 1185         if (error)
 1186                 return (error);
 1187 
 1188         if (uap->info)
 1189                 error = copyout(&ksi.ksi_info, uap->info, sizeof(siginfo_t));
 1190 
 1191         if (error == 0)
 1192                 td->td_retval[0] = ksi.ksi_signo;
 1193         return (error);
 1194 }
 1195 
 1196 int
 1197 sys_sigwaitinfo(struct thread *td, struct sigwaitinfo_args *uap)
 1198 {
 1199         ksiginfo_t ksi;
 1200         sigset_t set;
 1201         int error;
 1202 
 1203         error = copyin(uap->set, &set, sizeof(set));
 1204         if (error)
 1205                 return (error);
 1206 
 1207         error = kern_sigtimedwait(td, set, &ksi, NULL);
 1208         if (error)
 1209                 return (error);
 1210 
 1211         if (uap->info)
 1212                 error = copyout(&ksi.ksi_info, uap->info, sizeof(siginfo_t));
 1213 
 1214         if (error == 0)
 1215                 td->td_retval[0] = ksi.ksi_signo;
 1216         return (error);
 1217 }
 1218 
 1219 int
 1220 kern_sigtimedwait(struct thread *td, sigset_t waitset, ksiginfo_t *ksi,
 1221         struct timespec *timeout)
 1222 {
 1223         struct sigacts *ps;
 1224         sigset_t saved_mask, new_block;
 1225         struct proc *p;
 1226         int error, sig, timo, timevalid = 0;
 1227         struct timespec rts, ets, ts;
 1228         struct timeval tv;
 1229 
 1230         p = td->td_proc;
 1231         error = 0;
 1232         ets.tv_sec = 0;
 1233         ets.tv_nsec = 0;
 1234 
 1235         if (timeout != NULL) {
 1236                 if (timeout->tv_nsec >= 0 && timeout->tv_nsec < 1000000000) {
 1237                         timevalid = 1;
 1238                         getnanouptime(&rts);
 1239                         ets = rts;
 1240                         timespecadd(&ets, timeout);
 1241                 }
 1242         }
 1243         ksiginfo_init(ksi);
 1244         /* Some signals can not be waited for. */
 1245         SIG_CANTMASK(waitset);
 1246         ps = p->p_sigacts;
 1247         PROC_LOCK(p);
 1248         saved_mask = td->td_sigmask;
 1249         SIGSETNAND(td->td_sigmask, waitset);
 1250         for (;;) {
 1251                 mtx_lock(&ps->ps_mtx);
 1252                 sig = cursig(td);
 1253                 mtx_unlock(&ps->ps_mtx);
 1254                 KASSERT(sig >= 0, ("sig %d", sig));
 1255                 if (sig != 0 && SIGISMEMBER(waitset, sig)) {
 1256                         if (sigqueue_get(&td->td_sigqueue, sig, ksi) != 0 ||
 1257                             sigqueue_get(&p->p_sigqueue, sig, ksi) != 0) {
 1258                                 error = 0;
 1259                                 break;
 1260                         }
 1261                 }
 1262 
 1263                 if (error != 0)
 1264                         break;
 1265 
 1266                 /*
 1267                  * POSIX says this must be checked after looking for pending
 1268                  * signals.
 1269                  */
 1270                 if (timeout != NULL) {
 1271                         if (!timevalid) {
 1272                                 error = EINVAL;
 1273                                 break;
 1274                         }
 1275                         getnanouptime(&rts);
 1276                         if (timespeccmp(&rts, &ets, >=)) {
 1277                                 error = EAGAIN;
 1278                                 break;
 1279                         }
 1280                         ts = ets;
 1281                         timespecsub(&ts, &rts);
 1282                         TIMESPEC_TO_TIMEVAL(&tv, &ts);
 1283                         timo = tvtohz(&tv);
 1284                 } else {
 1285                         timo = 0;
 1286                 }
 1287 
 1288                 error = msleep(ps, &p->p_mtx, PPAUSE|PCATCH, "sigwait", timo);
 1289 
 1290                 if (timeout != NULL) {
 1291                         if (error == ERESTART) {
 1292                                 /* Timeout can not be restarted. */
 1293                                 error = EINTR;
 1294                         } else if (error == EAGAIN) {
 1295                                 /* We will calculate timeout by ourself. */
 1296                                 error = 0;
 1297                         }
 1298                 }
 1299         }
 1300 
 1301         new_block = saved_mask;
 1302         SIGSETNAND(new_block, td->td_sigmask);
 1303         td->td_sigmask = saved_mask;
 1304         /*
 1305          * Fewer signals can be delivered to us, reschedule signal
 1306          * notification.
 1307          */
 1308         if (p->p_numthreads != 1)
 1309                 reschedule_signals(p, new_block, 0);
 1310 
 1311         if (error == 0) {
 1312                 SDT_PROBE2(proc, , , signal__clear, sig, ksi);
 1313 
 1314                 if (ksi->ksi_code == SI_TIMER)
 1315                         itimer_accept(p, ksi->ksi_timerid, ksi);
 1316 
 1317 #ifdef KTRACE
 1318                 if (KTRPOINT(td, KTR_PSIG)) {
 1319                         sig_t action;
 1320 
 1321                         mtx_lock(&ps->ps_mtx);
 1322                         action = ps->ps_sigact[_SIG_IDX(sig)];
 1323                         mtx_unlock(&ps->ps_mtx);
 1324                         ktrpsig(sig, action, &td->td_sigmask, ksi->ksi_code);
 1325                 }
 1326 #endif
 1327                 if (sig == SIGKILL)
 1328                         sigexit(td, sig);
 1329         }
 1330         PROC_UNLOCK(p);
 1331         return (error);
 1332 }
 1333 
 1334 #ifndef _SYS_SYSPROTO_H_
 1335 struct sigpending_args {
 1336         sigset_t        *set;
 1337 };
 1338 #endif
 1339 int
 1340 sys_sigpending(td, uap)
 1341         struct thread *td;
 1342         struct sigpending_args *uap;
 1343 {
 1344         struct proc *p = td->td_proc;
 1345         sigset_t pending;
 1346 
 1347         PROC_LOCK(p);
 1348         pending = p->p_sigqueue.sq_signals;
 1349         SIGSETOR(pending, td->td_sigqueue.sq_signals);
 1350         PROC_UNLOCK(p);
 1351         return (copyout(&pending, uap->set, sizeof(sigset_t)));
 1352 }
 1353 
 1354 #ifdef COMPAT_43        /* XXX - COMPAT_FBSD3 */
 1355 #ifndef _SYS_SYSPROTO_H_
 1356 struct osigpending_args {
 1357         int     dummy;
 1358 };
 1359 #endif
 1360 int
 1361 osigpending(td, uap)
 1362         struct thread *td;
 1363         struct osigpending_args *uap;
 1364 {
 1365         struct proc *p = td->td_proc;
 1366         sigset_t pending;
 1367 
 1368         PROC_LOCK(p);
 1369         pending = p->p_sigqueue.sq_signals;
 1370         SIGSETOR(pending, td->td_sigqueue.sq_signals);
 1371         PROC_UNLOCK(p);
 1372         SIG2OSIG(pending, td->td_retval[0]);
 1373         return (0);
 1374 }
 1375 #endif /* COMPAT_43 */
 1376 
 1377 #if defined(COMPAT_43)
 1378 /*
 1379  * Generalized interface signal handler, 4.3-compatible.
 1380  */
 1381 #ifndef _SYS_SYSPROTO_H_
 1382 struct osigvec_args {
 1383         int     signum;
 1384         struct  sigvec *nsv;
 1385         struct  sigvec *osv;
 1386 };
 1387 #endif
 1388 /* ARGSUSED */
 1389 int
 1390 osigvec(td, uap)
 1391         struct thread *td;
 1392         register struct osigvec_args *uap;
 1393 {
 1394         struct sigvec vec;
 1395         struct sigaction nsa, osa;
 1396         register struct sigaction *nsap, *osap;
 1397         int error;
 1398 
 1399         if (uap->signum <= 0 || uap->signum >= ONSIG)
 1400                 return (EINVAL);
 1401         nsap = (uap->nsv != NULL) ? &nsa : NULL;
 1402         osap = (uap->osv != NULL) ? &osa : NULL;
 1403         if (nsap) {
 1404                 error = copyin(uap->nsv, &vec, sizeof(vec));
 1405                 if (error)
 1406                         return (error);
 1407                 nsap->sa_handler = vec.sv_handler;
 1408                 OSIG2SIG(vec.sv_mask, nsap->sa_mask);
 1409                 nsap->sa_flags = vec.sv_flags;
 1410                 nsap->sa_flags ^= SA_RESTART;   /* opposite of SV_INTERRUPT */
 1411         }
 1412         error = kern_sigaction(td, uap->signum, nsap, osap, KSA_OSIGSET);
 1413         if (osap && !error) {
 1414                 vec.sv_handler = osap->sa_handler;
 1415                 SIG2OSIG(osap->sa_mask, vec.sv_mask);
 1416                 vec.sv_flags = osap->sa_flags;
 1417                 vec.sv_flags &= ~SA_NOCLDWAIT;
 1418                 vec.sv_flags ^= SA_RESTART;
 1419                 error = copyout(&vec, uap->osv, sizeof(vec));
 1420         }
 1421         return (error);
 1422 }
 1423 
 1424 #ifndef _SYS_SYSPROTO_H_
 1425 struct osigblock_args {
 1426         int     mask;
 1427 };
 1428 #endif
 1429 int
 1430 osigblock(td, uap)
 1431         register struct thread *td;
 1432         struct osigblock_args *uap;
 1433 {
 1434         sigset_t set, oset;
 1435 
 1436         OSIG2SIG(uap->mask, set);
 1437         kern_sigprocmask(td, SIG_BLOCK, &set, &oset, 0);
 1438         SIG2OSIG(oset, td->td_retval[0]);
 1439         return (0);
 1440 }
 1441 
 1442 #ifndef _SYS_SYSPROTO_H_
 1443 struct osigsetmask_args {
 1444         int     mask;
 1445 };
 1446 #endif
 1447 int
 1448 osigsetmask(td, uap)
 1449         struct thread *td;
 1450         struct osigsetmask_args *uap;
 1451 {
 1452         sigset_t set, oset;
 1453 
 1454         OSIG2SIG(uap->mask, set);
 1455         kern_sigprocmask(td, SIG_SETMASK, &set, &oset, 0);
 1456         SIG2OSIG(oset, td->td_retval[0]);
 1457         return (0);
 1458 }
 1459 #endif /* COMPAT_43 */
 1460 
 1461 /*
 1462  * Suspend calling thread until signal, providing mask to be set in the
 1463  * meantime.
 1464  */
 1465 #ifndef _SYS_SYSPROTO_H_
 1466 struct sigsuspend_args {
 1467         const sigset_t *sigmask;
 1468 };
 1469 #endif
 1470 /* ARGSUSED */
 1471 int
 1472 sys_sigsuspend(td, uap)
 1473         struct thread *td;
 1474         struct sigsuspend_args *uap;
 1475 {
 1476         sigset_t mask;
 1477         int error;
 1478 
 1479         error = copyin(uap->sigmask, &mask, sizeof(mask));
 1480         if (error)
 1481                 return (error);
 1482         return (kern_sigsuspend(td, mask));
 1483 }
 1484 
 1485 int
 1486 kern_sigsuspend(struct thread *td, sigset_t mask)
 1487 {
 1488         struct proc *p = td->td_proc;
 1489         int has_sig, sig;
 1490 
 1491         /*
 1492          * When returning from sigsuspend, we want
 1493          * the old mask to be restored after the
 1494          * signal handler has finished.  Thus, we
 1495          * save it here and mark the sigacts structure
 1496          * to indicate this.
 1497          */
 1498         PROC_LOCK(p);
 1499         kern_sigprocmask(td, SIG_SETMASK, &mask, &td->td_oldsigmask,
 1500             SIGPROCMASK_PROC_LOCKED);
 1501         td->td_pflags |= TDP_OLDMASK;
 1502 
 1503         /*
 1504          * Process signals now. Otherwise, we can get spurious wakeup
 1505          * due to signal entered process queue, but delivered to other
 1506          * thread. But sigsuspend should return only on signal
 1507          * delivery.
 1508          */
 1509         (p->p_sysent->sv_set_syscall_retval)(td, EINTR);
 1510         for (has_sig = 0; !has_sig;) {
 1511                 while (msleep(&p->p_sigacts, &p->p_mtx, PPAUSE|PCATCH, "pause",
 1512                         0) == 0)
 1513                         /* void */;
 1514                 thread_suspend_check(0);
 1515                 mtx_lock(&p->p_sigacts->ps_mtx);
 1516                 while ((sig = cursig(td)) != 0) {
 1517                         KASSERT(sig >= 0, ("sig %d", sig));
 1518                         has_sig += postsig(sig);
 1519                 }
 1520                 mtx_unlock(&p->p_sigacts->ps_mtx);
 1521         }
 1522         PROC_UNLOCK(p);
 1523         td->td_errno = EINTR;
 1524         td->td_pflags |= TDP_NERRNO;
 1525         return (EJUSTRETURN);
 1526 }
 1527 
 1528 #ifdef COMPAT_43        /* XXX - COMPAT_FBSD3 */
 1529 /*
 1530  * Compatibility sigsuspend call for old binaries.  Note nonstandard calling
 1531  * convention: libc stub passes mask, not pointer, to save a copyin.
 1532  */
 1533 #ifndef _SYS_SYSPROTO_H_
 1534 struct osigsuspend_args {
 1535         osigset_t mask;
 1536 };
 1537 #endif
 1538 /* ARGSUSED */
 1539 int
 1540 osigsuspend(td, uap)
 1541         struct thread *td;
 1542         struct osigsuspend_args *uap;
 1543 {
 1544         sigset_t mask;
 1545 
 1546         OSIG2SIG(uap->mask, mask);
 1547         return (kern_sigsuspend(td, mask));
 1548 }
 1549 #endif /* COMPAT_43 */
 1550 
 1551 #if defined(COMPAT_43)
 1552 #ifndef _SYS_SYSPROTO_H_
 1553 struct osigstack_args {
 1554         struct  sigstack *nss;
 1555         struct  sigstack *oss;
 1556 };
 1557 #endif
 1558 /* ARGSUSED */
 1559 int
 1560 osigstack(td, uap)
 1561         struct thread *td;
 1562         register struct osigstack_args *uap;
 1563 {
 1564         struct sigstack nss, oss;
 1565         int error = 0;
 1566 
 1567         if (uap->nss != NULL) {
 1568                 error = copyin(uap->nss, &nss, sizeof(nss));
 1569                 if (error)
 1570                         return (error);
 1571         }
 1572         oss.ss_sp = td->td_sigstk.ss_sp;
 1573         oss.ss_onstack = sigonstack(cpu_getstack(td));
 1574         if (uap->nss != NULL) {
 1575                 td->td_sigstk.ss_sp = nss.ss_sp;
 1576                 td->td_sigstk.ss_size = 0;
 1577                 td->td_sigstk.ss_flags |= nss.ss_onstack & SS_ONSTACK;
 1578                 td->td_pflags |= TDP_ALTSTACK;
 1579         }
 1580         if (uap->oss != NULL)
 1581                 error = copyout(&oss, uap->oss, sizeof(oss));
 1582 
 1583         return (error);
 1584 }
 1585 #endif /* COMPAT_43 */
 1586 
 1587 #ifndef _SYS_SYSPROTO_H_
 1588 struct sigaltstack_args {
 1589         stack_t *ss;
 1590         stack_t *oss;
 1591 };
 1592 #endif
 1593 /* ARGSUSED */
 1594 int
 1595 sys_sigaltstack(td, uap)
 1596         struct thread *td;
 1597         register struct sigaltstack_args *uap;
 1598 {
 1599         stack_t ss, oss;
 1600         int error;
 1601 
 1602         if (uap->ss != NULL) {
 1603                 error = copyin(uap->ss, &ss, sizeof(ss));
 1604                 if (error)
 1605                         return (error);
 1606         }
 1607         error = kern_sigaltstack(td, (uap->ss != NULL) ? &ss : NULL,
 1608             (uap->oss != NULL) ? &oss : NULL);
 1609         if (error)
 1610                 return (error);
 1611         if (uap->oss != NULL)
 1612                 error = copyout(&oss, uap->oss, sizeof(stack_t));
 1613         return (error);
 1614 }
 1615 
 1616 int
 1617 kern_sigaltstack(struct thread *td, stack_t *ss, stack_t *oss)
 1618 {
 1619         struct proc *p = td->td_proc;
 1620         int oonstack;
 1621 
 1622         oonstack = sigonstack(cpu_getstack(td));
 1623 
 1624         if (oss != NULL) {
 1625                 *oss = td->td_sigstk;
 1626                 oss->ss_flags = (td->td_pflags & TDP_ALTSTACK)
 1627                     ? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE;
 1628         }
 1629 
 1630         if (ss != NULL) {
 1631                 if (oonstack)
 1632                         return (EPERM);
 1633                 if ((ss->ss_flags & ~SS_DISABLE) != 0)
 1634                         return (EINVAL);
 1635                 if (!(ss->ss_flags & SS_DISABLE)) {
 1636                         if (ss->ss_size < p->p_sysent->sv_minsigstksz)
 1637                                 return (ENOMEM);
 1638 
 1639                         td->td_sigstk = *ss;
 1640                         td->td_pflags |= TDP_ALTSTACK;
 1641                 } else {
 1642                         td->td_pflags &= ~TDP_ALTSTACK;
 1643                 }
 1644         }
 1645         return (0);
 1646 }
 1647 
 1648 /*
 1649  * Common code for kill process group/broadcast kill.
 1650  * cp is calling process.
 1651  */
 1652 static int
 1653 killpg1(struct thread *td, int sig, int pgid, int all, ksiginfo_t *ksi)
 1654 {
 1655         struct proc *p;
 1656         struct pgrp *pgrp;
 1657         int err;
 1658         int ret;
 1659 
 1660         ret = ESRCH;
 1661         if (all) {
 1662                 /*
 1663                  * broadcast
 1664                  */
 1665                 sx_slock(&allproc_lock);
 1666                 FOREACH_PROC_IN_SYSTEM(p) {
 1667                         PROC_LOCK(p);
 1668                         if (p->p_pid <= 1 || p->p_flag & P_SYSTEM ||
 1669                             p == td->td_proc || p->p_state == PRS_NEW) {
 1670                                 PROC_UNLOCK(p);
 1671                                 continue;
 1672                         }
 1673                         err = p_cansignal(td, p, sig);
 1674                         if (err == 0) {
 1675                                 if (sig)
 1676                                         pksignal(p, sig, ksi);
 1677                                 ret = err;
 1678                         }
 1679                         else if (ret == ESRCH)
 1680                                 ret = err;
 1681                         PROC_UNLOCK(p);
 1682                 }
 1683                 sx_sunlock(&allproc_lock);
 1684         } else {
 1685                 sx_slock(&proctree_lock);
 1686                 if (pgid == 0) {
 1687                         /*
 1688                          * zero pgid means send to my process group.
 1689                          */
 1690                         pgrp = td->td_proc->p_pgrp;
 1691                         PGRP_LOCK(pgrp);
 1692                 } else {
 1693                         pgrp = pgfind(pgid);
 1694                         if (pgrp == NULL) {
 1695                                 sx_sunlock(&proctree_lock);
 1696                                 return (ESRCH);
 1697                         }
 1698                 }
 1699                 sx_sunlock(&proctree_lock);
 1700                 LIST_FOREACH(p, &pgrp->pg_members, p_pglist) {
 1701                         PROC_LOCK(p);
 1702                         if (p->p_pid <= 1 || p->p_flag & P_SYSTEM ||
 1703                             p->p_state == PRS_NEW) {
 1704                                 PROC_UNLOCK(p);
 1705                                 continue;
 1706                         }
 1707                         err = p_cansignal(td, p, sig);
 1708                         if (err == 0) {
 1709                                 if (sig)
 1710                                         pksignal(p, sig, ksi);
 1711                                 ret = err;
 1712                         }
 1713                         else if (ret == ESRCH)
 1714                                 ret = err;
 1715                         PROC_UNLOCK(p);
 1716                 }
 1717                 PGRP_UNLOCK(pgrp);
 1718         }
 1719         return (ret);
 1720 }
 1721 
 1722 #ifndef _SYS_SYSPROTO_H_
 1723 struct kill_args {
 1724         int     pid;
 1725         int     signum;
 1726 };
 1727 #endif
 1728 /* ARGSUSED */
 1729 int
 1730 sys_kill(struct thread *td, struct kill_args *uap)
 1731 {
 1732         ksiginfo_t ksi;
 1733         struct proc *p;
 1734         int error;
 1735 
 1736         /*
 1737          * A process in capability mode can send signals only to himself.
 1738          * The main rationale behind this is that abort(3) is implemented as
 1739          * kill(getpid(), SIGABRT).
 1740          */
 1741         if (IN_CAPABILITY_MODE(td) && uap->pid != td->td_proc->p_pid)
 1742                 return (ECAPMODE);
 1743 
 1744         AUDIT_ARG_SIGNUM(uap->signum);
 1745         AUDIT_ARG_PID(uap->pid);
 1746         if ((u_int)uap->signum > _SIG_MAXSIG)
 1747                 return (EINVAL);
 1748 
 1749         ksiginfo_init(&ksi);
 1750         ksi.ksi_signo = uap->signum;
 1751         ksi.ksi_code = SI_USER;
 1752         ksi.ksi_pid = td->td_proc->p_pid;
 1753         ksi.ksi_uid = td->td_ucred->cr_ruid;
 1754 
 1755         if (uap->pid > 0) {
 1756                 /* kill single process */
 1757                 if ((p = pfind(uap->pid)) == NULL) {
 1758                         if ((p = zpfind(uap->pid)) == NULL)
 1759                                 return (ESRCH);
 1760                 }
 1761                 AUDIT_ARG_PROCESS(p);
 1762                 error = p_cansignal(td, p, uap->signum);
 1763                 if (error == 0 && uap->signum)
 1764                         pksignal(p, uap->signum, &ksi);
 1765                 PROC_UNLOCK(p);
 1766                 return (error);
 1767         }
 1768         switch (uap->pid) {
 1769         case -1:                /* broadcast signal */
 1770                 return (killpg1(td, uap->signum, 0, 1, &ksi));
 1771         case 0:                 /* signal own process group */
 1772                 return (killpg1(td, uap->signum, 0, 0, &ksi));
 1773         default:                /* negative explicit process group */
 1774                 return (killpg1(td, uap->signum, -uap->pid, 0, &ksi));
 1775         }
 1776         /* NOTREACHED */
 1777 }
 1778 
 1779 int
 1780 sys_pdkill(td, uap)
 1781         struct thread *td;
 1782         struct pdkill_args *uap;
 1783 {
 1784         struct proc *p;
 1785         cap_rights_t rights;
 1786         int error;
 1787 
 1788         AUDIT_ARG_SIGNUM(uap->signum);
 1789         AUDIT_ARG_FD(uap->fd);
 1790         if ((u_int)uap->signum > _SIG_MAXSIG)
 1791                 return (EINVAL);
 1792 
 1793         error = procdesc_find(td, uap->fd,
 1794             cap_rights_init(&rights, CAP_PDKILL), &p);
 1795         if (error)
 1796                 return (error);
 1797         AUDIT_ARG_PROCESS(p);
 1798         error = p_cansignal(td, p, uap->signum);
 1799         if (error == 0 && uap->signum)
 1800                 kern_psignal(p, uap->signum);
 1801         PROC_UNLOCK(p);
 1802         return (error);
 1803 }
 1804 
 1805 #if defined(COMPAT_43)
 1806 #ifndef _SYS_SYSPROTO_H_
 1807 struct okillpg_args {
 1808         int     pgid;
 1809         int     signum;
 1810 };
 1811 #endif
 1812 /* ARGSUSED */
 1813 int
 1814 okillpg(struct thread *td, struct okillpg_args *uap)
 1815 {
 1816         ksiginfo_t ksi;
 1817 
 1818         AUDIT_ARG_SIGNUM(uap->signum);
 1819         AUDIT_ARG_PID(uap->pgid);
 1820         if ((u_int)uap->signum > _SIG_MAXSIG)
 1821                 return (EINVAL);
 1822 
 1823         ksiginfo_init(&ksi);
 1824         ksi.ksi_signo = uap->signum;
 1825         ksi.ksi_code = SI_USER;
 1826         ksi.ksi_pid = td->td_proc->p_pid;
 1827         ksi.ksi_uid = td->td_ucred->cr_ruid;
 1828         return (killpg1(td, uap->signum, uap->pgid, 0, &ksi));
 1829 }
 1830 #endif /* COMPAT_43 */
 1831 
 1832 #ifndef _SYS_SYSPROTO_H_
 1833 struct sigqueue_args {
 1834         pid_t pid;
 1835         int signum;
 1836         /* union sigval */ void *value;
 1837 };
 1838 #endif
 1839 int
 1840 sys_sigqueue(struct thread *td, struct sigqueue_args *uap)
 1841 {
 1842         ksiginfo_t ksi;
 1843         struct proc *p;
 1844         int error;
 1845 
 1846         if ((u_int)uap->signum > _SIG_MAXSIG)
 1847                 return (EINVAL);
 1848 
 1849         /*
 1850          * Specification says sigqueue can only send signal to
 1851          * single process.
 1852          */
 1853         if (uap->pid <= 0)
 1854                 return (EINVAL);
 1855 
 1856         if ((p = pfind(uap->pid)) == NULL) {
 1857                 if ((p = zpfind(uap->pid)) == NULL)
 1858                         return (ESRCH);
 1859         }
 1860         error = p_cansignal(td, p, uap->signum);
 1861         if (error == 0 && uap->signum != 0) {
 1862                 ksiginfo_init(&ksi);
 1863                 ksi.ksi_flags = KSI_SIGQ;
 1864                 ksi.ksi_signo = uap->signum;
 1865                 ksi.ksi_code = SI_QUEUE;
 1866                 ksi.ksi_pid = td->td_proc->p_pid;
 1867                 ksi.ksi_uid = td->td_ucred->cr_ruid;
 1868                 ksi.ksi_value.sival_ptr = uap->value;
 1869                 error = pksignal(p, ksi.ksi_signo, &ksi);
 1870         }
 1871         PROC_UNLOCK(p);
 1872         return (error);
 1873 }
 1874 
 1875 /*
 1876  * Send a signal to a process group.
 1877  */
 1878 void
 1879 gsignal(int pgid, int sig, ksiginfo_t *ksi)
 1880 {
 1881         struct pgrp *pgrp;
 1882 
 1883         if (pgid != 0) {
 1884                 sx_slock(&proctree_lock);
 1885                 pgrp = pgfind(pgid);
 1886                 sx_sunlock(&proctree_lock);
 1887                 if (pgrp != NULL) {
 1888                         pgsignal(pgrp, sig, 0, ksi);
 1889                         PGRP_UNLOCK(pgrp);
 1890                 }
 1891         }
 1892 }
 1893 
 1894 /*
 1895  * Send a signal to a process group.  If checktty is 1,
 1896  * limit to members which have a controlling terminal.
 1897  */
 1898 void
 1899 pgsignal(struct pgrp *pgrp, int sig, int checkctty, ksiginfo_t *ksi)
 1900 {
 1901         struct proc *p;
 1902 
 1903         if (pgrp) {
 1904                 PGRP_LOCK_ASSERT(pgrp, MA_OWNED);
 1905                 LIST_FOREACH(p, &pgrp->pg_members, p_pglist) {
 1906                         PROC_LOCK(p);
 1907                         if (p->p_state == PRS_NORMAL &&
 1908                             (checkctty == 0 || p->p_flag & P_CONTROLT))
 1909                                 pksignal(p, sig, ksi);
 1910                         PROC_UNLOCK(p);
 1911                 }
 1912         }
 1913 }
 1914 
 1915 
 1916 /*
 1917  * Recalculate the signal mask and reset the signal disposition after
 1918  * usermode frame for delivery is formed.  Should be called after
 1919  * mach-specific routine, because sysent->sv_sendsig() needs correct
 1920  * ps_siginfo and signal mask.
 1921  */
 1922 static void
 1923 postsig_done(int sig, struct thread *td, struct sigacts *ps)
 1924 {
 1925         sigset_t mask;
 1926 
 1927         mtx_assert(&ps->ps_mtx, MA_OWNED);
 1928         td->td_ru.ru_nsignals++;
 1929         mask = ps->ps_catchmask[_SIG_IDX(sig)];
 1930         if (!SIGISMEMBER(ps->ps_signodefer, sig))
 1931                 SIGADDSET(mask, sig);
 1932         kern_sigprocmask(td, SIG_BLOCK, &mask, NULL,
 1933             SIGPROCMASK_PROC_LOCKED | SIGPROCMASK_PS_LOCKED);
 1934         if (SIGISMEMBER(ps->ps_sigreset, sig))
 1935                 sigdflt(ps, sig);
 1936 }
 1937 
 1938 
 1939 /*
 1940  * Send a signal caused by a trap to the current thread.  If it will be
 1941  * caught immediately, deliver it with correct code.  Otherwise, post it
 1942  * normally.
 1943  */
 1944 void
 1945 trapsignal(struct thread *td, ksiginfo_t *ksi)
 1946 {
 1947         struct sigacts *ps;
 1948         struct proc *p;
 1949         int sig;
 1950         int code;
 1951 
 1952         p = td->td_proc;
 1953         sig = ksi->ksi_signo;
 1954         code = ksi->ksi_code;
 1955         KASSERT(_SIG_VALID(sig), ("invalid signal"));
 1956 
 1957         PROC_LOCK(p);
 1958         ps = p->p_sigacts;
 1959         mtx_lock(&ps->ps_mtx);
 1960         if ((p->p_flag & P_TRACED) == 0 && SIGISMEMBER(ps->ps_sigcatch, sig) &&
 1961             !SIGISMEMBER(td->td_sigmask, sig)) {
 1962 #ifdef KTRACE
 1963                 if (KTRPOINT(curthread, KTR_PSIG))
 1964                         ktrpsig(sig, ps->ps_sigact[_SIG_IDX(sig)],
 1965                             &td->td_sigmask, code);
 1966 #endif
 1967                 (*p->p_sysent->sv_sendsig)(ps->ps_sigact[_SIG_IDX(sig)],
 1968                                 ksi, &td->td_sigmask);
 1969                 postsig_done(sig, td, ps);
 1970                 mtx_unlock(&ps->ps_mtx);
 1971         } else {
 1972                 /*
 1973                  * Avoid a possible infinite loop if the thread
 1974                  * masking the signal or process is ignoring the
 1975                  * signal.
 1976                  */
 1977                 if (kern_forcesigexit &&
 1978                     (SIGISMEMBER(td->td_sigmask, sig) ||
 1979                      ps->ps_sigact[_SIG_IDX(sig)] == SIG_IGN)) {
 1980                         SIGDELSET(td->td_sigmask, sig);
 1981                         SIGDELSET(ps->ps_sigcatch, sig);
 1982                         SIGDELSET(ps->ps_sigignore, sig);
 1983                         ps->ps_sigact[_SIG_IDX(sig)] = SIG_DFL;
 1984                 }
 1985                 mtx_unlock(&ps->ps_mtx);
 1986                 p->p_code = code;       /* XXX for core dump/debugger */
 1987                 p->p_sig = sig;         /* XXX to verify code */
 1988                 tdsendsignal(p, td, sig, ksi);
 1989         }
 1990         PROC_UNLOCK(p);
 1991 }
 1992 
 1993 static struct thread *
 1994 sigtd(struct proc *p, int sig, int prop)
 1995 {
 1996         struct thread *td, *signal_td;
 1997 
 1998         PROC_LOCK_ASSERT(p, MA_OWNED);
 1999 
 2000         /*
 2001          * Check if current thread can handle the signal without
 2002          * switching context to another thread.
 2003          */
 2004         if (curproc == p && !SIGISMEMBER(curthread->td_sigmask, sig))
 2005                 return (curthread);
 2006         signal_td = NULL;
 2007         FOREACH_THREAD_IN_PROC(p, td) {
 2008                 if (!SIGISMEMBER(td->td_sigmask, sig)) {
 2009                         signal_td = td;
 2010                         break;
 2011                 }
 2012         }
 2013         if (signal_td == NULL)
 2014                 signal_td = FIRST_THREAD_IN_PROC(p);
 2015         return (signal_td);
 2016 }
 2017 
 2018 /*
 2019  * Send the signal to the process.  If the signal has an action, the action
 2020  * is usually performed by the target process rather than the caller; we add
 2021  * the signal to the set of pending signals for the process.
 2022  *
 2023  * Exceptions:
 2024  *   o When a stop signal is sent to a sleeping process that takes the
 2025  *     default action, the process is stopped without awakening it.
 2026  *   o SIGCONT restarts stopped processes (or puts them back to sleep)
 2027  *     regardless of the signal action (eg, blocked or ignored).
 2028  *
 2029  * Other ignored signals are discarded immediately.
 2030  *
 2031  * NB: This function may be entered from the debugger via the "kill" DDB
 2032  * command.  There is little that can be done to mitigate the possibly messy
 2033  * side effects of this unwise possibility.
 2034  */
 2035 void
 2036 kern_psignal(struct proc *p, int sig)
 2037 {
 2038         ksiginfo_t ksi;
 2039 
 2040         ksiginfo_init(&ksi);
 2041         ksi.ksi_signo = sig;
 2042         ksi.ksi_code = SI_KERNEL;
 2043         (void) tdsendsignal(p, NULL, sig, &ksi);
 2044 }
 2045 
 2046 int
 2047 pksignal(struct proc *p, int sig, ksiginfo_t *ksi)
 2048 {
 2049 
 2050         return (tdsendsignal(p, NULL, sig, ksi));
 2051 }
 2052 
 2053 /* Utility function for finding a thread to send signal event to. */
 2054 int
 2055 sigev_findtd(struct proc *p ,struct sigevent *sigev, struct thread **ttd)
 2056 {
 2057         struct thread *td;
 2058 
 2059         if (sigev->sigev_notify == SIGEV_THREAD_ID) {
 2060                 td = tdfind(sigev->sigev_notify_thread_id, p->p_pid);
 2061                 if (td == NULL)
 2062                         return (ESRCH);
 2063                 *ttd = td;
 2064         } else {
 2065                 *ttd = NULL;
 2066                 PROC_LOCK(p);
 2067         }
 2068         return (0);
 2069 }
 2070 
 2071 void
 2072 tdsignal(struct thread *td, int sig)
 2073 {
 2074         ksiginfo_t ksi;
 2075 
 2076         ksiginfo_init(&ksi);
 2077         ksi.ksi_signo = sig;
 2078         ksi.ksi_code = SI_KERNEL;
 2079         (void) tdsendsignal(td->td_proc, td, sig, &ksi);
 2080 }
 2081 
 2082 void
 2083 tdksignal(struct thread *td, int sig, ksiginfo_t *ksi)
 2084 {
 2085 
 2086         (void) tdsendsignal(td->td_proc, td, sig, ksi);
 2087 }
 2088 
 2089 int
 2090 tdsendsignal(struct proc *p, struct thread *td, int sig, ksiginfo_t *ksi)
 2091 {
 2092         sig_t action;
 2093         sigqueue_t *sigqueue;
 2094         int prop;
 2095         struct sigacts *ps;
 2096         int intrval;
 2097         int ret = 0;
 2098         int wakeup_swapper;
 2099 
 2100         MPASS(td == NULL || p == td->td_proc);
 2101         PROC_LOCK_ASSERT(p, MA_OWNED);
 2102 
 2103         if (!_SIG_VALID(sig))
 2104                 panic("%s(): invalid signal %d", __func__, sig);
 2105 
 2106         KASSERT(ksi == NULL || !KSI_ONQ(ksi), ("%s: ksi on queue", __func__));
 2107 
 2108         /*
 2109          * IEEE Std 1003.1-2001: return success when killing a zombie.
 2110          */
 2111         if (p->p_state == PRS_ZOMBIE) {
 2112                 if (ksi && (ksi->ksi_flags & KSI_INS))
 2113                         ksiginfo_tryfree(ksi);
 2114                 return (ret);
 2115         }
 2116 
 2117         ps = p->p_sigacts;
 2118         KNOTE_LOCKED(p->p_klist, NOTE_SIGNAL | sig);
 2119         prop = sigprop(sig);
 2120 
 2121         if (td == NULL) {
 2122                 td = sigtd(p, sig, prop);
 2123                 sigqueue = &p->p_sigqueue;
 2124         } else
 2125                 sigqueue = &td->td_sigqueue;
 2126 
 2127         SDT_PROBE3(proc, , , signal__send, td, p, sig);
 2128 
 2129         /*
 2130          * If the signal is being ignored,
 2131          * then we forget about it immediately.
 2132          * (Note: we don't set SIGCONT in ps_sigignore,
 2133          * and if it is set to SIG_IGN,
 2134          * action will be SIG_DFL here.)
 2135          */
 2136         mtx_lock(&ps->ps_mtx);
 2137         if (SIGISMEMBER(ps->ps_sigignore, sig)) {
 2138                 SDT_PROBE3(proc, , , signal__discard, td, p, sig);
 2139 
 2140                 mtx_unlock(&ps->ps_mtx);
 2141                 if (ksi && (ksi->ksi_flags & KSI_INS))
 2142                         ksiginfo_tryfree(ksi);
 2143                 return (ret);
 2144         }
 2145         if (SIGISMEMBER(td->td_sigmask, sig))
 2146                 action = SIG_HOLD;
 2147         else if (SIGISMEMBER(ps->ps_sigcatch, sig))
 2148                 action = SIG_CATCH;
 2149         else
 2150                 action = SIG_DFL;
 2151         if (SIGISMEMBER(ps->ps_sigintr, sig))
 2152                 intrval = EINTR;
 2153         else
 2154                 intrval = ERESTART;
 2155         mtx_unlock(&ps->ps_mtx);
 2156 
 2157         if (prop & SA_CONT)
 2158                 sigqueue_delete_stopmask_proc(p);
 2159         else if (prop & SA_STOP) {
 2160                 /*
 2161                  * If sending a tty stop signal to a member of an orphaned
 2162                  * process group, discard the signal here if the action
 2163                  * is default; don't stop the process below if sleeping,
 2164                  * and don't clear any pending SIGCONT.
 2165                  */
 2166                 if ((prop & SA_TTYSTOP) &&
 2167                     (p->p_pgrp->pg_jobc == 0) &&
 2168                     (action == SIG_DFL)) {
 2169                         if (ksi && (ksi->ksi_flags & KSI_INS))
 2170                                 ksiginfo_tryfree(ksi);
 2171                         return (ret);
 2172                 }
 2173                 sigqueue_delete_proc(p, SIGCONT);
 2174                 if (p->p_flag & P_CONTINUED) {
 2175                         p->p_flag &= ~P_CONTINUED;
 2176                         PROC_LOCK(p->p_pptr);
 2177                         sigqueue_take(p->p_ksi);
 2178                         PROC_UNLOCK(p->p_pptr);
 2179                 }
 2180         }
 2181 
 2182         ret = sigqueue_add(sigqueue, sig, ksi);
 2183         if (ret != 0)
 2184                 return (ret);
 2185         signotify(td);
 2186         /*
 2187          * Defer further processing for signals which are held,
 2188          * except that stopped processes must be continued by SIGCONT.
 2189          */
 2190         if (action == SIG_HOLD &&
 2191             !((prop & SA_CONT) && (p->p_flag & P_STOPPED_SIG)))
 2192                 return (ret);
 2193         /*
 2194          * SIGKILL: Remove procfs STOPEVENTs.
 2195          */
 2196         if (sig == SIGKILL) {
 2197                 /* from procfs_ioctl.c: PIOCBIC */
 2198                 p->p_stops = 0;
 2199                 /* from procfs_ioctl.c: PIOCCONT */
 2200                 p->p_step = 0;
 2201                 wakeup(&p->p_step);
 2202         }
 2203         /*
 2204          * Some signals have a process-wide effect and a per-thread
 2205          * component.  Most processing occurs when the process next
 2206          * tries to cross the user boundary, however there are some
 2207          * times when processing needs to be done immediately, such as
 2208          * waking up threads so that they can cross the user boundary.
 2209          * We try to do the per-process part here.
 2210          */
 2211         if (P_SHOULDSTOP(p)) {
 2212                 KASSERT(!(p->p_flag & P_WEXIT),
 2213                     ("signal to stopped but exiting process"));
 2214                 if (sig == SIGKILL) {
 2215                         /*
 2216                          * If traced process is already stopped,
 2217                          * then no further action is necessary.
 2218                          */
 2219                         if (p->p_flag & P_TRACED)
 2220                                 goto out;
 2221                         /*
 2222                          * SIGKILL sets process running.
 2223                          * It will die elsewhere.
 2224                          * All threads must be restarted.
 2225                          */
 2226                         p->p_flag &= ~P_STOPPED_SIG;
 2227                         goto runfast;
 2228                 }
 2229 
 2230                 if (prop & SA_CONT) {
 2231                         /*
 2232                          * If traced process is already stopped,
 2233                          * then no further action is necessary.
 2234                          */
 2235                         if (p->p_flag & P_TRACED)
 2236                                 goto out;
 2237                         /*
 2238                          * If SIGCONT is default (or ignored), we continue the
 2239                          * process but don't leave the signal in sigqueue as
 2240                          * it has no further action.  If SIGCONT is held, we
 2241                          * continue the process and leave the signal in
 2242                          * sigqueue.  If the process catches SIGCONT, let it
 2243                          * handle the signal itself.  If it isn't waiting on
 2244                          * an event, it goes back to run state.
 2245                          * Otherwise, process goes back to sleep state.
 2246                          */
 2247                         p->p_flag &= ~P_STOPPED_SIG;
 2248                         PROC_SLOCK(p);
 2249                         if (p->p_numthreads == p->p_suspcount) {
 2250                                 PROC_SUNLOCK(p);
 2251                                 p->p_flag |= P_CONTINUED;
 2252                                 p->p_xsig = SIGCONT;
 2253                                 PROC_LOCK(p->p_pptr);
 2254                                 childproc_continued(p);
 2255                                 PROC_UNLOCK(p->p_pptr);
 2256                                 PROC_SLOCK(p);
 2257                         }
 2258                         if (action == SIG_DFL) {
 2259                                 thread_unsuspend(p);
 2260                                 PROC_SUNLOCK(p);
 2261                                 sigqueue_delete(sigqueue, sig);
 2262                                 goto out;
 2263                         }
 2264                         if (action == SIG_CATCH) {
 2265                                 /*
 2266                                  * The process wants to catch it so it needs
 2267                                  * to run at least one thread, but which one?
 2268                                  */
 2269                                 PROC_SUNLOCK(p);
 2270                                 goto runfast;
 2271                         }
 2272                         /*
 2273                          * The signal is not ignored or caught.
 2274                          */
 2275                         thread_unsuspend(p);
 2276                         PROC_SUNLOCK(p);
 2277                         goto out;
 2278                 }
 2279 
 2280                 if (prop & SA_STOP) {
 2281                         /*
 2282                          * If traced process is already stopped,
 2283                          * then no further action is necessary.
 2284                          */
 2285                         if (p->p_flag & P_TRACED)
 2286                                 goto out;
 2287                         /*
 2288                          * Already stopped, don't need to stop again
 2289                          * (If we did the shell could get confused).
 2290                          * Just make sure the signal STOP bit set.
 2291                          */
 2292                         p->p_flag |= P_STOPPED_SIG;
 2293                         sigqueue_delete(sigqueue, sig);
 2294                         goto out;
 2295                 }
 2296 
 2297                 /*
 2298                  * All other kinds of signals:
 2299                  * If a thread is sleeping interruptibly, simulate a
 2300                  * wakeup so that when it is continued it will be made
 2301                  * runnable and can look at the signal.  However, don't make
 2302                  * the PROCESS runnable, leave it stopped.
 2303                  * It may run a bit until it hits a thread_suspend_check().
 2304                  */
 2305                 wakeup_swapper = 0;
 2306                 PROC_SLOCK(p);
 2307                 thread_lock(td);
 2308                 if (TD_ON_SLEEPQ(td) && (td->td_flags & TDF_SINTR))
 2309                         wakeup_swapper = sleepq_abort(td, intrval);
 2310                 thread_unlock(td);
 2311                 PROC_SUNLOCK(p);
 2312                 if (wakeup_swapper)
 2313                         kick_proc0();
 2314                 goto out;
 2315                 /*
 2316                  * Mutexes are short lived. Threads waiting on them will
 2317                  * hit thread_suspend_check() soon.
 2318                  */
 2319         } else if (p->p_state == PRS_NORMAL) {
 2320                 if (p->p_flag & P_TRACED || action == SIG_CATCH) {
 2321                         tdsigwakeup(td, sig, action, intrval);
 2322                         goto out;
 2323                 }
 2324 
 2325                 MPASS(action == SIG_DFL);
 2326 
 2327                 if (prop & SA_STOP) {
 2328                         if (p->p_flag & (P_PPWAIT|P_WEXIT))
 2329                                 goto out;
 2330                         p->p_flag |= P_STOPPED_SIG;
 2331                         p->p_xsig = sig;
 2332                         PROC_SLOCK(p);
 2333                         wakeup_swapper = sig_suspend_threads(td, p, 1);
 2334                         if (p->p_numthreads == p->p_suspcount) {
 2335                                 /*
 2336                                  * only thread sending signal to another
 2337                                  * process can reach here, if thread is sending
 2338                                  * signal to its process, because thread does
 2339                                  * not suspend itself here, p_numthreads
 2340                                  * should never be equal to p_suspcount.
 2341                                  */
 2342                                 thread_stopped(p);
 2343                                 PROC_SUNLOCK(p);
 2344                                 sigqueue_delete_proc(p, p->p_xsig);
 2345                         } else
 2346                                 PROC_SUNLOCK(p);
 2347                         if (wakeup_swapper)
 2348                                 kick_proc0();
 2349                         goto out;
 2350                 }
 2351         } else {
 2352                 /* Not in "NORMAL" state. discard the signal. */
 2353                 sigqueue_delete(sigqueue, sig);
 2354                 goto out;
 2355         }
 2356 
 2357         /*
 2358          * The process is not stopped so we need to apply the signal to all the
 2359          * running threads.
 2360          */
 2361 runfast:
 2362         tdsigwakeup(td, sig, action, intrval);
 2363         PROC_SLOCK(p);
 2364         thread_unsuspend(p);
 2365         PROC_SUNLOCK(p);
 2366 out:
 2367         /* If we jump here, proc slock should not be owned. */
 2368         PROC_SLOCK_ASSERT(p, MA_NOTOWNED);
 2369         return (ret);
 2370 }
 2371 
 2372 /*
 2373  * The force of a signal has been directed against a single
 2374  * thread.  We need to see what we can do about knocking it
 2375  * out of any sleep it may be in etc.
 2376  */
 2377 static void
 2378 tdsigwakeup(struct thread *td, int sig, sig_t action, int intrval)
 2379 {
 2380         struct proc *p = td->td_proc;
 2381         register int prop;
 2382         int wakeup_swapper;
 2383 
 2384         wakeup_swapper = 0;
 2385         PROC_LOCK_ASSERT(p, MA_OWNED);
 2386         prop = sigprop(sig);
 2387 
 2388         PROC_SLOCK(p);
 2389         thread_lock(td);
 2390         /*
 2391          * Bring the priority of a thread up if we want it to get
 2392          * killed in this lifetime.  Be careful to avoid bumping the
 2393          * priority of the idle thread, since we still allow to signal
 2394          * kernel processes.
 2395          */
 2396         if (action == SIG_DFL && (prop & SA_KILL) != 0 &&
 2397             td->td_priority > PUSER && !TD_IS_IDLETHREAD(td))
 2398                 sched_prio(td, PUSER);
 2399         if (TD_ON_SLEEPQ(td)) {
 2400                 /*
 2401                  * If thread is sleeping uninterruptibly
 2402                  * we can't interrupt the sleep... the signal will
 2403                  * be noticed when the process returns through
 2404                  * trap() or syscall().
 2405                  */
 2406                 if ((td->td_flags & TDF_SINTR) == 0)
 2407                         goto out;
 2408                 /*
 2409                  * If SIGCONT is default (or ignored) and process is
 2410                  * asleep, we are finished; the process should not
 2411                  * be awakened.
 2412                  */
 2413                 if ((prop & SA_CONT) && action == SIG_DFL) {
 2414                         thread_unlock(td);
 2415                         PROC_SUNLOCK(p);
 2416                         sigqueue_delete(&p->p_sigqueue, sig);
 2417                         /*
 2418                          * It may be on either list in this state.
 2419                          * Remove from both for now.
 2420                          */
 2421                         sigqueue_delete(&td->td_sigqueue, sig);
 2422                         return;
 2423                 }
 2424 
 2425                 /*
 2426                  * Don't awaken a sleeping thread for SIGSTOP if the
 2427                  * STOP signal is deferred.
 2428                  */
 2429                 if ((prop & SA_STOP) != 0 && (td->td_flags & (TDF_SBDRY |
 2430                     TDF_SERESTART | TDF_SEINTR)) == TDF_SBDRY)
 2431                         goto out;
 2432 
 2433                 /*
 2434                  * Give low priority threads a better chance to run.
 2435                  */
 2436                 if (td->td_priority > PUSER && !TD_IS_IDLETHREAD(td))
 2437                         sched_prio(td, PUSER);
 2438 
 2439                 wakeup_swapper = sleepq_abort(td, intrval);
 2440         } else {
 2441                 /*
 2442                  * Other states do nothing with the signal immediately,
 2443                  * other than kicking ourselves if we are running.
 2444                  * It will either never be noticed, or noticed very soon.
 2445                  */
 2446 #ifdef SMP
 2447                 if (TD_IS_RUNNING(td) && td != curthread)
 2448                         forward_signal(td);
 2449 #endif
 2450         }
 2451 out:
 2452         PROC_SUNLOCK(p);
 2453         thread_unlock(td);
 2454         if (wakeup_swapper)
 2455                 kick_proc0();
 2456 }
 2457 
 2458 static int
 2459 sig_suspend_threads(struct thread *td, struct proc *p, int sending)
 2460 {
 2461         struct thread *td2;
 2462         int wakeup_swapper;
 2463 
 2464         PROC_LOCK_ASSERT(p, MA_OWNED);
 2465         PROC_SLOCK_ASSERT(p, MA_OWNED);
 2466 
 2467         wakeup_swapper = 0;
 2468         FOREACH_THREAD_IN_PROC(p, td2) {
 2469                 thread_lock(td2);
 2470                 td2->td_flags |= TDF_ASTPENDING | TDF_NEEDSUSPCHK;
 2471                 if ((TD_IS_SLEEPING(td2) || TD_IS_SWAPPED(td2)) &&
 2472                     (td2->td_flags & TDF_SINTR)) {
 2473                         if (td2->td_flags & TDF_SBDRY) {
 2474                                 /*
 2475                                  * Once a thread is asleep with
 2476                                  * TDF_SBDRY and without TDF_SERESTART
 2477                                  * or TDF_SEINTR set, it should never
 2478                                  * become suspended due to this check.
 2479                                  */
 2480                                 KASSERT(!TD_IS_SUSPENDED(td2),
 2481                                     ("thread with deferred stops suspended"));
 2482                                 if (TD_SBDRY_INTR(td2) && sending) {
 2483                                         wakeup_swapper |= sleepq_abort(td2,
 2484                                             TD_SBDRY_ERRNO(td2));
 2485                                 }
 2486                         } else if (!TD_IS_SUSPENDED(td2)) {
 2487                                 thread_suspend_one(td2);
 2488                         }
 2489                 } else if (!TD_IS_SUSPENDED(td2)) {
 2490                         if (sending || td != td2)
 2491                                 td2->td_flags |= TDF_ASTPENDING;
 2492 #ifdef SMP
 2493                         if (TD_IS_RUNNING(td2) && td2 != td)
 2494                                 forward_signal(td2);
 2495 #endif
 2496                 }
 2497                 thread_unlock(td2);
 2498         }
 2499         return (wakeup_swapper);
 2500 }
 2501 
 2502 int
 2503 ptracestop(struct thread *td, int sig)
 2504 {
 2505         struct proc *p = td->td_proc;
 2506 
 2507         PROC_LOCK_ASSERT(p, MA_OWNED);
 2508         KASSERT(!(p->p_flag & P_WEXIT), ("Stopping exiting process"));
 2509         WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK,
 2510             &p->p_mtx.lock_object, "Stopping for traced signal");
 2511 
 2512         td->td_dbgflags |= TDB_XSIG;
 2513         td->td_xsig = sig;
 2514         CTR4(KTR_PTRACE, "ptracestop: tid %d (pid %d) flags %#x sig %d",
 2515             td->td_tid, p->p_pid, td->td_dbgflags, sig);
 2516         PROC_SLOCK(p);
 2517         while ((p->p_flag & P_TRACED) && (td->td_dbgflags & TDB_XSIG)) {
 2518                 if (p->p_flag & P_SINGLE_EXIT &&
 2519                     !(td->td_dbgflags & TDB_EXIT)) {
 2520                         /*
 2521                          * Ignore ptrace stops except for thread exit
 2522                          * events when the process exits.
 2523                          */
 2524                         td->td_dbgflags &= ~TDB_XSIG;
 2525                         PROC_SUNLOCK(p);
 2526                         return (sig);
 2527                 }
 2528                 /*
 2529                  * Just make wait() to work, the last stopped thread
 2530                  * will win.
 2531                  */
 2532                 p->p_xsig = sig;
 2533                 p->p_xthread = td;
 2534                 p->p_flag |= (P_STOPPED_SIG|P_STOPPED_TRACE);
 2535                 sig_suspend_threads(td, p, 0);
 2536                 if ((td->td_dbgflags & TDB_STOPATFORK) != 0) {
 2537                         td->td_dbgflags &= ~TDB_STOPATFORK;
 2538                         cv_broadcast(&p->p_dbgwait);
 2539                 }
 2540 stopme:
 2541                 thread_suspend_switch(td, p);
 2542                 if (p->p_xthread == td)
 2543                         p->p_xthread = NULL;
 2544                 if (!(p->p_flag & P_TRACED))
 2545                         break;
 2546                 if (td->td_dbgflags & TDB_SUSPEND) {
 2547                         if (p->p_flag & P_SINGLE_EXIT)
 2548                                 break;
 2549                         goto stopme;
 2550                 }
 2551         }
 2552         PROC_SUNLOCK(p);
 2553         return (td->td_xsig);
 2554 }
 2555 
 2556 static void
 2557 reschedule_signals(struct proc *p, sigset_t block, int flags)
 2558 {
 2559         struct sigacts *ps;
 2560         struct thread *td;
 2561         int sig;
 2562 
 2563         PROC_LOCK_ASSERT(p, MA_OWNED);
 2564         ps = p->p_sigacts;
 2565         mtx_assert(&ps->ps_mtx, (flags & SIGPROCMASK_PS_LOCKED) != 0 ?
 2566             MA_OWNED : MA_NOTOWNED);
 2567         if (SIGISEMPTY(p->p_siglist))
 2568                 return;
 2569         SIGSETAND(block, p->p_siglist);
 2570         while ((sig = sig_ffs(&block)) != 0) {
 2571                 SIGDELSET(block, sig);
 2572                 td = sigtd(p, sig, 0);
 2573                 signotify(td);
 2574                 if (!(flags & SIGPROCMASK_PS_LOCKED))
 2575                         mtx_lock(&ps->ps_mtx);
 2576                 if (p->p_flag & P_TRACED || SIGISMEMBER(ps->ps_sigcatch, sig))
 2577                         tdsigwakeup(td, sig, SIG_CATCH,
 2578                             (SIGISMEMBER(ps->ps_sigintr, sig) ? EINTR :
 2579                              ERESTART));
 2580                 if (!(flags & SIGPROCMASK_PS_LOCKED))
 2581                         mtx_unlock(&ps->ps_mtx);
 2582         }
 2583 }
 2584 
 2585 void
 2586 tdsigcleanup(struct thread *td)
 2587 {
 2588         struct proc *p;
 2589         sigset_t unblocked;
 2590 
 2591         p = td->td_proc;
 2592         PROC_LOCK_ASSERT(p, MA_OWNED);
 2593 
 2594         sigqueue_flush(&td->td_sigqueue);
 2595         if (p->p_numthreads == 1)
 2596                 return;
 2597 
 2598         /*
 2599          * Since we cannot handle signals, notify signal post code
 2600          * about this by filling the sigmask.
 2601          *
 2602          * Also, if needed, wake up thread(s) that do not block the
 2603          * same signals as the exiting thread, since the thread might
 2604          * have been selected for delivery and woken up.
 2605          */
 2606         SIGFILLSET(unblocked);
 2607         SIGSETNAND(unblocked, td->td_sigmask);
 2608         SIGFILLSET(td->td_sigmask);
 2609         reschedule_signals(p, unblocked, 0);
 2610 
 2611 }
 2612 
 2613 static int
 2614 sigdeferstop_curr_flags(int cflags)
 2615 {
 2616 
 2617         MPASS((cflags & (TDF_SEINTR | TDF_SERESTART)) == 0 ||
 2618             (cflags & TDF_SBDRY) != 0);
 2619         return (cflags & (TDF_SBDRY | TDF_SEINTR | TDF_SERESTART));
 2620 }
 2621 
 2622 /*
 2623  * Defer the delivery of SIGSTOP for the current thread, according to
 2624  * the requested mode.  Returns previous flags, which must be restored
 2625  * by sigallowstop().
 2626  *
 2627  * TDF_SBDRY, TDF_SEINTR, and TDF_SERESTART flags are only set and
 2628  * cleared by the current thread, which allow the lock-less read-only
 2629  * accesses below.
 2630  */
 2631 int
 2632 sigdeferstop_impl(int mode)
 2633 {
 2634         struct thread *td;
 2635         int cflags, nflags;
 2636 
 2637         td = curthread;
 2638         cflags = sigdeferstop_curr_flags(td->td_flags);
 2639         switch (mode) {
 2640         case SIGDEFERSTOP_NOP:
 2641                 nflags = cflags;
 2642                 break;
 2643         case SIGDEFERSTOP_OFF:
 2644                 nflags = 0;
 2645                 break;
 2646         case SIGDEFERSTOP_SILENT:
 2647                 nflags = (cflags | TDF_SBDRY) & ~(TDF_SEINTR | TDF_SERESTART);
 2648                 break;
 2649         case SIGDEFERSTOP_EINTR:
 2650                 nflags = (cflags | TDF_SBDRY | TDF_SEINTR) & ~TDF_SERESTART;
 2651                 break;
 2652         case SIGDEFERSTOP_ERESTART:
 2653                 nflags = (cflags | TDF_SBDRY | TDF_SERESTART) & ~TDF_SEINTR;
 2654                 break;
 2655         default:
 2656                 panic("sigdeferstop: invalid mode %x", mode);
 2657                 break;
 2658         }
 2659         if (cflags == nflags)
 2660                 return (SIGDEFERSTOP_VAL_NCHG);
 2661         thread_lock(td);
 2662         td->td_flags = (td->td_flags & ~cflags) | nflags;
 2663         thread_unlock(td);
 2664         return (cflags);
 2665 }
 2666 
 2667 /*
 2668  * Restores the STOP handling mode, typically permitting the delivery
 2669  * of SIGSTOP for the current thread.  This does not immediately
 2670  * suspend if a stop was posted.  Instead, the thread will suspend
 2671  * either via ast() or a subsequent interruptible sleep.
 2672  */
 2673 void
 2674 sigallowstop_impl(int prev)
 2675 {
 2676         struct thread *td;
 2677         int cflags;
 2678 
 2679         KASSERT(prev != SIGDEFERSTOP_VAL_NCHG, ("failed sigallowstop"));
 2680         KASSERT((prev & ~(TDF_SBDRY | TDF_SEINTR | TDF_SERESTART)) == 0,
 2681             ("sigallowstop: incorrect previous mode %x", prev));
 2682         td = curthread;
 2683         cflags = sigdeferstop_curr_flags(td->td_flags);
 2684         if (cflags != prev) {
 2685                 thread_lock(td);
 2686                 td->td_flags = (td->td_flags & ~cflags) | prev;
 2687                 thread_unlock(td);
 2688         }
 2689 }
 2690 
 2691 /*
 2692  * If the current process has received a signal (should be caught or cause
 2693  * termination, should interrupt current syscall), return the signal number.
 2694  * Stop signals with default action are processed immediately, then cleared;
 2695  * they aren't returned.  This is checked after each entry to the system for
 2696  * a syscall or trap (though this can usually be done without calling issignal
 2697  * by checking the pending signal masks in cursig.) The normal call
 2698  * sequence is
 2699  *
 2700  *      while (sig = cursig(curthread))
 2701  *              postsig(sig);
 2702  */
 2703 static int
 2704 issignal(struct thread *td)
 2705 {
 2706         struct proc *p;
 2707         struct sigacts *ps;
 2708         struct sigqueue *queue;
 2709         sigset_t sigpending;
 2710         int sig, prop, newsig;
 2711 
 2712         p = td->td_proc;
 2713         ps = p->p_sigacts;
 2714         mtx_assert(&ps->ps_mtx, MA_OWNED);
 2715         PROC_LOCK_ASSERT(p, MA_OWNED);
 2716         for (;;) {
 2717                 int traced = (p->p_flag & P_TRACED) || (p->p_stops & S_SIG);
 2718 
 2719                 sigpending = td->td_sigqueue.sq_signals;
 2720                 SIGSETOR(sigpending, p->p_sigqueue.sq_signals);
 2721                 SIGSETNAND(sigpending, td->td_sigmask);
 2722 
 2723                 if ((p->p_flag & P_PPWAIT) != 0 || (td->td_flags &
 2724                     (TDF_SBDRY | TDF_SERESTART | TDF_SEINTR)) == TDF_SBDRY)
 2725                         SIG_STOPSIGMASK(sigpending);
 2726                 if (SIGISEMPTY(sigpending))     /* no signal to send */
 2727                         return (0);
 2728                 sig = sig_ffs(&sigpending);
 2729 
 2730                 if (p->p_stops & S_SIG) {
 2731                         mtx_unlock(&ps->ps_mtx);
 2732                         stopevent(p, S_SIG, sig);
 2733                         mtx_lock(&ps->ps_mtx);
 2734                 }
 2735 
 2736                 /*
 2737                  * We should see pending but ignored signals
 2738                  * only if P_TRACED was on when they were posted.
 2739                  */
 2740                 if (SIGISMEMBER(ps->ps_sigignore, sig) && (traced == 0)) {
 2741                         sigqueue_delete(&td->td_sigqueue, sig);
 2742                         sigqueue_delete(&p->p_sigqueue, sig);
 2743                         continue;
 2744                 }
 2745                 if (p->p_flag & P_TRACED && (p->p_flag & P_PPTRACE) == 0) {
 2746                         /*
 2747                          * If traced, always stop.
 2748                          * Remove old signal from queue before the stop.
 2749                          * XXX shrug off debugger, it causes siginfo to
 2750                          * be thrown away.
 2751                          */
 2752                         queue = &td->td_sigqueue;
 2753                         td->td_dbgksi.ksi_signo = 0;
 2754                         if (sigqueue_get(queue, sig, &td->td_dbgksi) == 0) {
 2755                                 queue = &p->p_sigqueue;
 2756                                 sigqueue_get(queue, sig, &td->td_dbgksi);
 2757                         }
 2758 
 2759                         mtx_unlock(&ps->ps_mtx);
 2760                         newsig = ptracestop(td, sig);
 2761                         mtx_lock(&ps->ps_mtx);
 2762 
 2763                         if (sig != newsig) {
 2764 
 2765                                 /*
 2766                                  * If parent wants us to take the signal,
 2767                                  * then it will leave it in p->p_xsig;
 2768                                  * otherwise we just look for signals again.
 2769                                 */
 2770                                 if (newsig == 0)
 2771                                         continue;
 2772                                 sig = newsig;
 2773 
 2774                                 /*
 2775                                  * Put the new signal into td_sigqueue. If the
 2776                                  * signal is being masked, look for other
 2777                                  * signals.
 2778                                  */
 2779                                 sigqueue_add(queue, sig, NULL);
 2780                                 if (SIGISMEMBER(td->td_sigmask, sig))
 2781                                         continue;
 2782                                 signotify(td);
 2783                         } else {
 2784                                 if (td->td_dbgksi.ksi_signo != 0) {
 2785                                         td->td_dbgksi.ksi_flags |= KSI_HEAD;
 2786                                         if (sigqueue_add(&td->td_sigqueue, sig,
 2787                                             &td->td_dbgksi) != 0)
 2788                                                 td->td_dbgksi.ksi_signo = 0;
 2789                                 }
 2790                                 if (td->td_dbgksi.ksi_signo == 0)
 2791                                         sigqueue_add(&td->td_sigqueue, sig,
 2792                                             NULL);
 2793                         }
 2794 
 2795                         /*
 2796                          * If the traced bit got turned off, go back up
 2797                          * to the top to rescan signals.  This ensures
 2798                          * that p_sig* and p_sigact are consistent.
 2799                          */
 2800                         if ((p->p_flag & P_TRACED) == 0)
 2801                                 continue;
 2802                 }
 2803 
 2804                 prop = sigprop(sig);
 2805 
 2806                 /*
 2807                  * Decide whether the signal should be returned.
 2808                  * Return the signal's number, or fall through
 2809                  * to clear it from the pending mask.
 2810                  */
 2811                 switch ((intptr_t)p->p_sigacts->ps_sigact[_SIG_IDX(sig)]) {
 2812 
 2813                 case (intptr_t)SIG_DFL:
 2814                         /*
 2815                          * Don't take default actions on system processes.
 2816                          */
 2817                         if (p->p_pid <= 1) {
 2818 #ifdef DIAGNOSTIC
 2819                                 /*
 2820                                  * Are you sure you want to ignore SIGSEGV
 2821                                  * in init? XXX
 2822                                  */
 2823                                 printf("Process (pid %lu) got signal %d\n",
 2824                                         (u_long)p->p_pid, sig);
 2825 #endif
 2826                                 break;          /* == ignore */
 2827                         }
 2828                         /*
 2829                          * If there is a pending stop signal to process
 2830                          * with default action, stop here,
 2831                          * then clear the signal.  However,
 2832                          * if process is member of an orphaned
 2833                          * process group, ignore tty stop signals.
 2834                          */
 2835                         if (prop & SA_STOP) {
 2836                                 if (p->p_flag & (P_TRACED|P_WEXIT) ||
 2837                                     (p->p_pgrp->pg_jobc == 0 &&
 2838                                      prop & SA_TTYSTOP))
 2839                                         break;  /* == ignore */
 2840                                 if (TD_SBDRY_INTR(td)) {
 2841                                         KASSERT((td->td_flags & TDF_SBDRY) != 0,
 2842                                             ("lost TDF_SBDRY"));
 2843                                         return (-1);
 2844                                 }
 2845                                 mtx_unlock(&ps->ps_mtx);
 2846                                 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK,
 2847                                     &p->p_mtx.lock_object, "Catching SIGSTOP");
 2848                                 p->p_flag |= P_STOPPED_SIG;
 2849                                 p->p_xsig = sig;
 2850                                 PROC_SLOCK(p);
 2851                                 sig_suspend_threads(td, p, 0);
 2852                                 thread_suspend_switch(td, p);
 2853                                 PROC_SUNLOCK(p);
 2854                                 mtx_lock(&ps->ps_mtx);
 2855                                 break;
 2856                         } else if (prop & SA_IGNORE) {
 2857                                 /*
 2858                                  * Except for SIGCONT, shouldn't get here.
 2859                                  * Default action is to ignore; drop it.
 2860                                  */
 2861                                 break;          /* == ignore */
 2862                         } else
 2863                                 return (sig);
 2864                         /*NOTREACHED*/
 2865 
 2866                 case (intptr_t)SIG_IGN:
 2867                         /*
 2868                          * Masking above should prevent us ever trying
 2869                          * to take action on an ignored signal other
 2870                          * than SIGCONT, unless process is traced.
 2871                          */
 2872                         if ((prop & SA_CONT) == 0 &&
 2873                             (p->p_flag & P_TRACED) == 0)
 2874                                 printf("issignal\n");
 2875                         break;          /* == ignore */
 2876 
 2877                 default:
 2878                         /*
 2879                          * This signal has an action, let
 2880                          * postsig() process it.
 2881                          */
 2882                         return (sig);
 2883                 }
 2884                 sigqueue_delete(&td->td_sigqueue, sig); /* take the signal! */
 2885                 sigqueue_delete(&p->p_sigqueue, sig);
 2886         }
 2887         /* NOTREACHED */
 2888 }
 2889 
 2890 void
 2891 thread_stopped(struct proc *p)
 2892 {
 2893         int n;
 2894 
 2895         PROC_LOCK_ASSERT(p, MA_OWNED);
 2896         PROC_SLOCK_ASSERT(p, MA_OWNED);
 2897         n = p->p_suspcount;
 2898         if (p == curproc)
 2899                 n++;
 2900         if ((p->p_flag & P_STOPPED_SIG) && (n == p->p_numthreads)) {
 2901                 PROC_SUNLOCK(p);
 2902                 p->p_flag &= ~P_WAITED;
 2903                 PROC_LOCK(p->p_pptr);
 2904                 childproc_stopped(p, (p->p_flag & P_TRACED) ?
 2905                         CLD_TRAPPED : CLD_STOPPED);
 2906                 PROC_UNLOCK(p->p_pptr);
 2907                 PROC_SLOCK(p);
 2908         }
 2909 }
 2910 
 2911 /*
 2912  * Take the action for the specified signal
 2913  * from the current set of pending signals.
 2914  */
 2915 int
 2916 postsig(sig)
 2917         register int sig;
 2918 {
 2919         struct thread *td = curthread;
 2920         register struct proc *p = td->td_proc;
 2921         struct sigacts *ps;
 2922         sig_t action;
 2923         ksiginfo_t ksi;
 2924         sigset_t returnmask;
 2925 
 2926         KASSERT(sig != 0, ("postsig"));
 2927 
 2928         PROC_LOCK_ASSERT(p, MA_OWNED);
 2929         ps = p->p_sigacts;
 2930         mtx_assert(&ps->ps_mtx, MA_OWNED);
 2931         ksiginfo_init(&ksi);
 2932         if (sigqueue_get(&td->td_sigqueue, sig, &ksi) == 0 &&
 2933             sigqueue_get(&p->p_sigqueue, sig, &ksi) == 0)
 2934                 return (0);
 2935         ksi.ksi_signo = sig;
 2936         if (ksi.ksi_code == SI_TIMER)
 2937                 itimer_accept(p, ksi.ksi_timerid, &ksi);
 2938         action = ps->ps_sigact[_SIG_IDX(sig)];
 2939 #ifdef KTRACE
 2940         if (KTRPOINT(td, KTR_PSIG))
 2941                 ktrpsig(sig, action, td->td_pflags & TDP_OLDMASK ?
 2942                     &td->td_oldsigmask : &td->td_sigmask, ksi.ksi_code);
 2943 #endif
 2944         if (p->p_stops & S_SIG) {
 2945                 mtx_unlock(&ps->ps_mtx);
 2946                 stopevent(p, S_SIG, sig);
 2947                 mtx_lock(&ps->ps_mtx);
 2948         }
 2949 
 2950         if (action == SIG_DFL) {
 2951                 /*
 2952                  * Default action, where the default is to kill
 2953                  * the process.  (Other cases were ignored above.)
 2954                  */
 2955                 mtx_unlock(&ps->ps_mtx);
 2956                 sigexit(td, sig);
 2957                 /* NOTREACHED */
 2958         } else {
 2959                 /*
 2960                  * If we get here, the signal must be caught.
 2961                  */
 2962                 KASSERT(action != SIG_IGN && !SIGISMEMBER(td->td_sigmask, sig),
 2963                     ("postsig action"));
 2964                 /*
 2965                  * Set the new mask value and also defer further
 2966                  * occurrences of this signal.
 2967                  *
 2968                  * Special case: user has done a sigsuspend.  Here the
 2969                  * current mask is not of interest, but rather the
 2970                  * mask from before the sigsuspend is what we want
 2971                  * restored after the signal processing is completed.
 2972                  */
 2973                 if (td->td_pflags & TDP_OLDMASK) {
 2974                         returnmask = td->td_oldsigmask;
 2975                         td->td_pflags &= ~TDP_OLDMASK;
 2976                 } else
 2977                         returnmask = td->td_sigmask;
 2978 
 2979                 if (p->p_sig == sig) {
 2980                         p->p_code = 0;
 2981                         p->p_sig = 0;
 2982                 }
 2983                 (*p->p_sysent->sv_sendsig)(action, &ksi, &returnmask);
 2984                 postsig_done(sig, td, ps);
 2985         }
 2986         return (1);
 2987 }
 2988 
 2989 /*
 2990  * Kill the current process for stated reason.
 2991  */
 2992 void
 2993 killproc(p, why)
 2994         struct proc *p;
 2995         char *why;
 2996 {
 2997 
 2998         PROC_LOCK_ASSERT(p, MA_OWNED);
 2999         CTR3(KTR_PROC, "killproc: proc %p (pid %d, %s)", p, p->p_pid,
 3000             p->p_comm);
 3001         log(LOG_ERR, "pid %d (%s), uid %d, was killed: %s\n", p->p_pid,
 3002             p->p_comm, p->p_ucred ? p->p_ucred->cr_uid : -1, why);
 3003         p->p_flag |= P_WKILLED;
 3004         kern_psignal(p, SIGKILL);
 3005 }
 3006 
 3007 /*
 3008  * Force the current process to exit with the specified signal, dumping core
 3009  * if appropriate.  We bypass the normal tests for masked and caught signals,
 3010  * allowing unrecoverable failures to terminate the process without changing
 3011  * signal state.  Mark the accounting record with the signal termination.
 3012  * If dumping core, save the signal number for the debugger.  Calls exit and
 3013  * does not return.
 3014  */
 3015 void
 3016 sigexit(td, sig)
 3017         struct thread *td;
 3018         int sig;
 3019 {
 3020         struct proc *p = td->td_proc;
 3021 
 3022         PROC_LOCK_ASSERT(p, MA_OWNED);
 3023         p->p_acflag |= AXSIG;
 3024         /*
 3025          * We must be single-threading to generate a core dump.  This
 3026          * ensures that the registers in the core file are up-to-date.
 3027          * Also, the ELF dump handler assumes that the thread list doesn't
 3028          * change out from under it.
 3029          *
 3030          * XXX If another thread attempts to single-thread before us
 3031          *     (e.g. via fork()), we won't get a dump at all.
 3032          */
 3033         if ((sigprop(sig) & SA_CORE) && thread_single(p, SINGLE_NO_EXIT) == 0) {
 3034                 p->p_sig = sig;
 3035                 /*
 3036                  * Log signals which would cause core dumps
 3037                  * (Log as LOG_INFO to appease those who don't want
 3038                  * these messages.)
 3039                  * XXX : Todo, as well as euid, write out ruid too
 3040                  * Note that coredump() drops proc lock.
 3041                  */
 3042                 if (coredump(td) == 0)
 3043                         sig |= WCOREFLAG;
 3044                 if (kern_logsigexit)
 3045                         log(LOG_INFO,
 3046                             "pid %d (%s), uid %d: exited on signal %d%s\n",
 3047                             p->p_pid, p->p_comm,
 3048                             td->td_ucred ? td->td_ucred->cr_uid : -1,
 3049                             sig &~ WCOREFLAG,
 3050                             sig & WCOREFLAG ? " (core dumped)" : "");
 3051         } else
 3052                 PROC_UNLOCK(p);
 3053         exit1(td, 0, sig);
 3054         /* NOTREACHED */
 3055 }
 3056 
 3057 /*
 3058  * Send queued SIGCHLD to parent when child process's state
 3059  * is changed.
 3060  */
 3061 static void
 3062 sigparent(struct proc *p, int reason, int status)
 3063 {
 3064         PROC_LOCK_ASSERT(p, MA_OWNED);
 3065         PROC_LOCK_ASSERT(p->p_pptr, MA_OWNED);
 3066 
 3067         if (p->p_ksi != NULL) {
 3068                 p->p_ksi->ksi_signo  = SIGCHLD;
 3069                 p->p_ksi->ksi_code   = reason;
 3070                 p->p_ksi->ksi_status = status;
 3071                 p->p_ksi->ksi_pid    = p->p_pid;
 3072                 p->p_ksi->ksi_uid    = p->p_ucred->cr_ruid;
 3073                 if (KSI_ONQ(p->p_ksi))
 3074                         return;
 3075         }
 3076         pksignal(p->p_pptr, SIGCHLD, p->p_ksi);
 3077 }
 3078 
 3079 static void
 3080 childproc_jobstate(struct proc *p, int reason, int sig)
 3081 {
 3082         struct sigacts *ps;
 3083 
 3084         PROC_LOCK_ASSERT(p, MA_OWNED);
 3085         PROC_LOCK_ASSERT(p->p_pptr, MA_OWNED);
 3086 
 3087         /*
 3088          * Wake up parent sleeping in kern_wait(), also send
 3089          * SIGCHLD to parent, but SIGCHLD does not guarantee
 3090          * that parent will awake, because parent may masked
 3091          * the signal.
 3092          */
 3093         p->p_pptr->p_flag |= P_STATCHILD;
 3094         wakeup(p->p_pptr);
 3095 
 3096         ps = p->p_pptr->p_sigacts;
 3097         mtx_lock(&ps->ps_mtx);
 3098         if ((ps->ps_flag & PS_NOCLDSTOP) == 0) {
 3099                 mtx_unlock(&ps->ps_mtx);
 3100                 sigparent(p, reason, sig);
 3101         } else
 3102                 mtx_unlock(&ps->ps_mtx);
 3103 }
 3104 
 3105 void
 3106 childproc_stopped(struct proc *p, int reason)
 3107 {
 3108 
 3109         childproc_jobstate(p, reason, p->p_xsig);
 3110 }
 3111 
 3112 void
 3113 childproc_continued(struct proc *p)
 3114 {
 3115         childproc_jobstate(p, CLD_CONTINUED, SIGCONT);
 3116 }
 3117 
 3118 void
 3119 childproc_exited(struct proc *p)
 3120 {
 3121         int reason, status;
 3122 
 3123         if (WCOREDUMP(p->p_xsig)) {
 3124                 reason = CLD_DUMPED;
 3125                 status = WTERMSIG(p->p_xsig);
 3126         } else if (WIFSIGNALED(p->p_xsig)) {
 3127                 reason = CLD_KILLED;
 3128                 status = WTERMSIG(p->p_xsig);
 3129         } else {
 3130                 reason = CLD_EXITED;
 3131                 status = p->p_xexit;
 3132         }
 3133         /*
 3134          * XXX avoid calling wakeup(p->p_pptr), the work is
 3135          * done in exit1().
 3136          */
 3137         sigparent(p, reason, status);
 3138 }
 3139 
 3140 /*
 3141  * We only have 1 character for the core count in the format
 3142  * string, so the range will be 0-9
 3143  */
 3144 #define MAX_NUM_CORES 10
 3145 static int num_cores = 5;
 3146 
 3147 static int
 3148 sysctl_debug_num_cores_check (SYSCTL_HANDLER_ARGS)
 3149 {
 3150         int error;
 3151         int new_val;
 3152 
 3153         new_val = num_cores;
 3154         error = sysctl_handle_int(oidp, &new_val, 0, req);
 3155         if (error != 0 || req->newptr == NULL)
 3156                 return (error);
 3157         if (new_val > MAX_NUM_CORES)
 3158                 new_val = MAX_NUM_CORES;
 3159         if (new_val < 0)
 3160                 new_val = 0;
 3161         num_cores = new_val;
 3162         return (0);
 3163 }
 3164 SYSCTL_PROC(_debug, OID_AUTO, ncores, CTLTYPE_INT|CTLFLAG_RW,
 3165             0, sizeof(int), sysctl_debug_num_cores_check, "I", "");
 3166 
 3167 #define GZ_SUFFIX       ".gz"
 3168 
 3169 #ifdef GZIO
 3170 static int compress_user_cores = 1;
 3171 SYSCTL_INT(_kern, OID_AUTO, compress_user_cores, CTLFLAG_RWTUN,
 3172     &compress_user_cores, 0, "Compression of user corefiles");
 3173 
 3174 int compress_user_cores_gzlevel = 6;
 3175 SYSCTL_INT(_kern, OID_AUTO, compress_user_cores_gzlevel, CTLFLAG_RWTUN,
 3176     &compress_user_cores_gzlevel, 0, "Corefile gzip compression level");
 3177 #else
 3178 static int compress_user_cores = 0;
 3179 #endif
 3180 
 3181 /*
 3182  * Protect the access to corefilename[] by allproc_lock.
 3183  */
 3184 #define corefilename_lock       allproc_lock
 3185 
 3186 static char corefilename[MAXPATHLEN] = {"%N.core"};
 3187 TUNABLE_STR("kern.corefile", corefilename, sizeof(corefilename));
 3188 
 3189 static int
 3190 sysctl_kern_corefile(SYSCTL_HANDLER_ARGS)
 3191 {
 3192         int error;
 3193 
 3194         sx_xlock(&corefilename_lock);
 3195         error = sysctl_handle_string(oidp, corefilename, sizeof(corefilename),
 3196             req);
 3197         sx_xunlock(&corefilename_lock);
 3198 
 3199         return (error);
 3200 }
 3201 SYSCTL_PROC(_kern, OID_AUTO, corefile, CTLTYPE_STRING | CTLFLAG_RW |
 3202     CTLFLAG_MPSAFE, 0, 0, sysctl_kern_corefile, "A",
 3203     "Process corefile name format string");
 3204 
 3205 /*
 3206  * corefile_open(comm, uid, pid, td, compress, vpp, namep)
 3207  * Expand the name described in corefilename, using name, uid, and pid
 3208  * and open/create core file.
 3209  * corefilename is a printf-like string, with three format specifiers:
 3210  *      %N      name of process ("name")
 3211  *      %P      process id (pid)
 3212  *      %U      user id (uid)
 3213  * For example, "%N.core" is the default; they can be disabled completely
 3214  * by using "/dev/null", or all core files can be stored in "/cores/%U/%N-%P".
 3215  * This is controlled by the sysctl variable kern.corefile (see above).
 3216  */
 3217 static int
 3218 corefile_open(const char *comm, uid_t uid, pid_t pid, struct thread *td,
 3219     int compress, struct vnode **vpp, char **namep)
 3220 {
 3221         struct nameidata nd;
 3222         struct sbuf sb;
 3223         const char *format;
 3224         char *hostname, *name;
 3225         int indexpos, i, error, cmode, flags, oflags;
 3226 
 3227         hostname = NULL;
 3228         format = corefilename;
 3229         name = malloc(MAXPATHLEN, M_TEMP, M_WAITOK | M_ZERO);
 3230         indexpos = -1;
 3231         (void)sbuf_new(&sb, name, MAXPATHLEN, SBUF_FIXEDLEN);
 3232         sx_slock(&corefilename_lock);
 3233         for (i = 0; format[i] != '\0'; i++) {
 3234                 switch (format[i]) {
 3235                 case '%':       /* Format character */
 3236                         i++;
 3237                         switch (format[i]) {
 3238                         case '%':
 3239                                 sbuf_putc(&sb, '%');
 3240                                 break;
 3241                         case 'H':       /* hostname */
 3242                                 if (hostname == NULL) {
 3243                                         hostname = malloc(MAXHOSTNAMELEN,
 3244                                             M_TEMP, M_WAITOK);
 3245                                 }
 3246                                 getcredhostname(td->td_ucred, hostname,
 3247                                     MAXHOSTNAMELEN);
 3248                                 sbuf_printf(&sb, "%s", hostname);
 3249                                 break;
 3250                         case 'I':       /* autoincrementing index */
 3251                                 sbuf_printf(&sb, "");
 3252                                 indexpos = sbuf_len(&sb) - 1;
 3253                                 break;
 3254                         case 'N':       /* process name */
 3255                                 sbuf_printf(&sb, "%s", comm);
 3256                                 break;
 3257                         case 'P':       /* process id */
 3258                                 sbuf_printf(&sb, "%u", pid);
 3259                                 break;
 3260                         case 'U':       /* user id */
 3261                                 sbuf_printf(&sb, "%u", uid);
 3262                                 break;
 3263                         default:
 3264                                 log(LOG_ERR,
 3265                                     "Unknown format character %c in "
 3266                                     "corename `%s'\n", format[i], format);
 3267                                 break;
 3268                         }
 3269                         break;
 3270                 default:
 3271                         sbuf_putc(&sb, format[i]);
 3272                         break;
 3273                 }
 3274         }
 3275         sx_sunlock(&corefilename_lock);
 3276         free(hostname, M_TEMP);
 3277         if (compress)
 3278                 sbuf_printf(&sb, GZ_SUFFIX);
 3279         if (sbuf_error(&sb) != 0) {
 3280                 log(LOG_ERR, "pid %ld (%s), uid (%lu): corename is too "
 3281                     "long\n", (long)pid, comm, (u_long)uid);
 3282                 sbuf_delete(&sb);
 3283                 free(name, M_TEMP);
 3284                 return (ENOMEM);
 3285         }
 3286         sbuf_finish(&sb);
 3287         sbuf_delete(&sb);
 3288 
 3289         cmode = S_IRUSR | S_IWUSR;
 3290         oflags = VN_OPEN_NOAUDIT | VN_OPEN_NAMECACHE |
 3291             (capmode_coredump ? VN_OPEN_NOCAPCHECK : 0);
 3292 
 3293         /*
 3294          * If the core format has a %I in it, then we need to check
 3295          * for existing corefiles before returning a name.
 3296          * To do this we iterate over 0..num_cores to find a
 3297          * non-existing core file name to use.
 3298          */
 3299         if (indexpos != -1) {
 3300                 for (i = 0; i < num_cores; i++) {
 3301                         flags = O_CREAT | O_EXCL | FWRITE | O_NOFOLLOW;
 3302                         name[indexpos] = '' + i;
 3303                         NDINIT(&nd, LOOKUP, NOFOLLOW, UIO_SYSSPACE, name, td);
 3304                         error = vn_open_cred(&nd, &flags, cmode, oflags,
 3305                             td->td_ucred, NULL);
 3306                         if (error) {
 3307                                 if (error == EEXIST)
 3308                                         continue;
 3309                                 log(LOG_ERR,
 3310                                     "pid %d (%s), uid (%u):  Path `%s' failed "
 3311                                     "on initial open test, error = %d\n",
 3312                                     pid, comm, uid, name, error);
 3313                         }
 3314                         goto out;
 3315                 }
 3316         }
 3317 
 3318         flags = O_CREAT | FWRITE | O_NOFOLLOW;
 3319         NDINIT(&nd, LOOKUP, NOFOLLOW, UIO_SYSSPACE, name, td);
 3320         error = vn_open_cred(&nd, &flags, cmode, oflags, td->td_ucred, NULL);
 3321 out:
 3322         if (error) {
 3323 #ifdef AUDIT
 3324                 audit_proc_coredump(td, name, error);
 3325 #endif
 3326                 free(name, M_TEMP);
 3327                 return (error);
 3328         }
 3329         NDFREE(&nd, NDF_ONLY_PNBUF);
 3330         *vpp = nd.ni_vp;
 3331         *namep = name;
 3332         return (0);
 3333 }
 3334 
 3335 static int
 3336 coredump_sanitise_path(const char *path)
 3337 {
 3338         size_t i;
 3339 
 3340         /*
 3341          * Only send a subset of ASCII to devd(8) because it
 3342          * might pass these strings to sh -c.
 3343          */
 3344         for (i = 0; path[i]; i++)
 3345                 if (!(isalpha(path[i]) || isdigit(path[i])) &&
 3346                     path[i] != '/' && path[i] != '.' &&
 3347                     path[i] != '-')
 3348                         return (0);
 3349 
 3350         return (1);
 3351 }
 3352 
 3353 /*
 3354  * Dump a process' core.  The main routine does some
 3355  * policy checking, and creates the name of the coredump;
 3356  * then it passes on a vnode and a size limit to the process-specific
 3357  * coredump routine if there is one; if there _is not_ one, it returns
 3358  * ENOSYS; otherwise it returns the error from the process-specific routine.
 3359  */
 3360 
 3361 static int
 3362 coredump(struct thread *td)
 3363 {
 3364         struct proc *p = td->td_proc;
 3365         struct ucred *cred = td->td_ucred;
 3366         struct vnode *vp;
 3367         struct flock lf;
 3368         struct vattr vattr;
 3369         int error, error1, locked;
 3370         char *name;                     /* name of corefile */
 3371         void *rl_cookie;
 3372         off_t limit;
 3373         char *data = NULL;
 3374         char *fullpath, *freepath = NULL;
 3375         size_t len;
 3376         static const char comm_name[] = "comm=";
 3377         static const char core_name[] = "core=";
 3378 
 3379         PROC_LOCK_ASSERT(p, MA_OWNED);
 3380         MPASS((p->p_flag & P_HADTHREADS) == 0 || p->p_singlethread == td);
 3381         _STOPEVENT(p, S_CORE, 0);
 3382 
 3383         if (!do_coredump || (!sugid_coredump && (p->p_flag & P_SUGID) != 0) ||
 3384             (p->p_flag2 & P2_NOTRACE) != 0) {
 3385                 PROC_UNLOCK(p);
 3386                 return (EFAULT);
 3387         }
 3388 
 3389         /*
 3390          * Note that the bulk of limit checking is done after
 3391          * the corefile is created.  The exception is if the limit
 3392          * for corefiles is 0, in which case we don't bother
 3393          * creating the corefile at all.  This layout means that
 3394          * a corefile is truncated instead of not being created,
 3395          * if it is larger than the limit.
 3396          */
 3397         limit = (off_t)lim_cur(td, RLIMIT_CORE);
 3398         if (limit == 0 || racct_get_available(p, RACCT_CORE) == 0) {
 3399                 PROC_UNLOCK(p);
 3400                 return (EFBIG);
 3401         }
 3402         PROC_UNLOCK(p);
 3403 
 3404         error = corefile_open(p->p_comm, cred->cr_uid, p->p_pid, td,
 3405             compress_user_cores, &vp, &name);
 3406         if (error != 0)
 3407                 return (error);
 3408 
 3409         /*
 3410          * Don't dump to non-regular files or files with links.
 3411          * Do not dump into system files.
 3412          */
 3413         if (vp->v_type != VREG || VOP_GETATTR(vp, &vattr, cred) != 0 ||
 3414             vattr.va_nlink != 1 || (vp->v_vflag & VV_SYSTEM) != 0) {
 3415                 VOP_UNLOCK(vp, 0);
 3416                 error = EFAULT;
 3417                 goto out;
 3418         }
 3419 
 3420         VOP_UNLOCK(vp, 0);
 3421 
 3422         /* Postpone other writers, including core dumps of other processes. */
 3423         rl_cookie = vn_rangelock_wlock(vp, 0, OFF_MAX);
 3424 
 3425         lf.l_whence = SEEK_SET;
 3426         lf.l_start = 0;
 3427         lf.l_len = 0;
 3428         lf.l_type = F_WRLCK;
 3429         locked = (VOP_ADVLOCK(vp, (caddr_t)p, F_SETLK, &lf, F_FLOCK) == 0);
 3430 
 3431         VATTR_NULL(&vattr);
 3432         vattr.va_size = 0;
 3433         if (set_core_nodump_flag)
 3434                 vattr.va_flags = UF_NODUMP;
 3435         vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
 3436         VOP_SETATTR(vp, &vattr, cred);
 3437         VOP_UNLOCK(vp, 0);
 3438         PROC_LOCK(p);
 3439         p->p_acflag |= ACORE;
 3440         PROC_UNLOCK(p);
 3441 
 3442         if (p->p_sysent->sv_coredump != NULL) {
 3443                 error = p->p_sysent->sv_coredump(td, vp, limit,
 3444                     compress_user_cores ? IMGACT_CORE_COMPRESS : 0);
 3445         } else {
 3446                 error = ENOSYS;
 3447         }
 3448 
 3449         if (locked) {
 3450                 lf.l_type = F_UNLCK;
 3451                 VOP_ADVLOCK(vp, (caddr_t)p, F_UNLCK, &lf, F_FLOCK);
 3452         }
 3453         vn_rangelock_unlock(vp, rl_cookie);
 3454 
 3455         /*
 3456          * Notify the userland helper that a process triggered a core dump.
 3457          * This allows the helper to run an automated debugging session.
 3458          */
 3459         if (error != 0 || coredump_devctl == 0)
 3460                 goto out;
 3461         len = MAXPATHLEN * 2 + sizeof(comm_name) - 1 +
 3462             sizeof(' ') + sizeof(core_name) - 1;
 3463         data = malloc(len, M_TEMP, M_WAITOK);
 3464         if (vn_fullpath_global(td, p->p_textvp, &fullpath, &freepath) != 0)
 3465                 goto out;
 3466         if (!coredump_sanitise_path(fullpath))
 3467                 goto out;
 3468         snprintf(data, len, "%s%s ", comm_name, fullpath);
 3469         free(freepath, M_TEMP);
 3470         freepath = NULL;
 3471         if (vn_fullpath_global(td, vp, &fullpath, &freepath) != 0)
 3472                 goto out;
 3473         if (!coredump_sanitise_path(fullpath))
 3474                 goto out;
 3475         strlcat(data, core_name, len);
 3476         strlcat(data, fullpath, len);
 3477         devctl_notify("kernel", "signal", "coredump", data);
 3478 out:
 3479         error1 = vn_close(vp, FWRITE, cred, td);
 3480         if (error == 0)
 3481                 error = error1;
 3482 #ifdef AUDIT
 3483         audit_proc_coredump(td, name, error);
 3484 #endif
 3485         free(freepath, M_TEMP);
 3486         free(data, M_TEMP);
 3487         free(name, M_TEMP);
 3488         return (error);
 3489 }
 3490 
 3491 /*
 3492  * Nonexistent system call-- signal process (may want to handle it).  Flag
 3493  * error in case process won't see signal immediately (blocked or ignored).
 3494  */
 3495 #ifndef _SYS_SYSPROTO_H_
 3496 struct nosys_args {
 3497         int     dummy;
 3498 };
 3499 #endif
 3500 /* ARGSUSED */
 3501 int
 3502 nosys(td, args)
 3503         struct thread *td;
 3504         struct nosys_args *args;
 3505 {
 3506         struct proc *p = td->td_proc;
 3507 
 3508         PROC_LOCK(p);
 3509         tdsignal(td, SIGSYS);
 3510         PROC_UNLOCK(p);
 3511         return (ENOSYS);
 3512 }
 3513 
 3514 /*
 3515  * Send a SIGIO or SIGURG signal to a process or process group using stored
 3516  * credentials rather than those of the current process.
 3517  */
 3518 void
 3519 pgsigio(sigiop, sig, checkctty)
 3520         struct sigio **sigiop;
 3521         int sig, checkctty;
 3522 {
 3523         ksiginfo_t ksi;
 3524         struct sigio *sigio;
 3525 
 3526         ksiginfo_init(&ksi);
 3527         ksi.ksi_signo = sig;
 3528         ksi.ksi_code = SI_KERNEL;
 3529 
 3530         SIGIO_LOCK();
 3531         sigio = *sigiop;
 3532         if (sigio == NULL) {
 3533                 SIGIO_UNLOCK();
 3534                 return;
 3535         }
 3536         if (sigio->sio_pgid > 0) {
 3537                 PROC_LOCK(sigio->sio_proc);
 3538                 if (CANSIGIO(sigio->sio_ucred, sigio->sio_proc->p_ucred))
 3539                         kern_psignal(sigio->sio_proc, sig);
 3540                 PROC_UNLOCK(sigio->sio_proc);
 3541         } else if (sigio->sio_pgid < 0) {
 3542                 struct proc *p;
 3543 
 3544                 PGRP_LOCK(sigio->sio_pgrp);
 3545                 LIST_FOREACH(p, &sigio->sio_pgrp->pg_members, p_pglist) {
 3546                         PROC_LOCK(p);
 3547                         if (p->p_state == PRS_NORMAL &&
 3548                             CANSIGIO(sigio->sio_ucred, p->p_ucred) &&
 3549                             (checkctty == 0 || (p->p_flag & P_CONTROLT)))
 3550                                 kern_psignal(p, sig);
 3551                         PROC_UNLOCK(p);
 3552                 }
 3553                 PGRP_UNLOCK(sigio->sio_pgrp);
 3554         }
 3555         SIGIO_UNLOCK();
 3556 }
 3557 
 3558 static int
 3559 filt_sigattach(struct knote *kn)
 3560 {
 3561         struct proc *p = curproc;
 3562 
 3563         kn->kn_ptr.p_proc = p;
 3564         kn->kn_flags |= EV_CLEAR;               /* automatically set */
 3565 
 3566         knlist_add(p->p_klist, kn, 0);
 3567 
 3568         return (0);
 3569 }
 3570 
 3571 static void
 3572 filt_sigdetach(struct knote *kn)
 3573 {
 3574         struct proc *p = kn->kn_ptr.p_proc;
 3575 
 3576         knlist_remove(p->p_klist, kn, 0);
 3577 }
 3578 
 3579 /*
 3580  * signal knotes are shared with proc knotes, so we apply a mask to
 3581  * the hint in order to differentiate them from process hints.  This
 3582  * could be avoided by using a signal-specific knote list, but probably
 3583  * isn't worth the trouble.
 3584  */
 3585 static int
 3586 filt_signal(struct knote *kn, long hint)
 3587 {
 3588 
 3589         if (hint & NOTE_SIGNAL) {
 3590                 hint &= ~NOTE_SIGNAL;
 3591 
 3592                 if (kn->kn_id == hint)
 3593                         kn->kn_data++;
 3594         }
 3595         return (kn->kn_data != 0);
 3596 }
 3597 
 3598 struct sigacts *
 3599 sigacts_alloc(void)
 3600 {
 3601         struct sigacts *ps;
 3602 
 3603         ps = malloc(sizeof(struct sigacts), M_SUBPROC, M_WAITOK | M_ZERO);
 3604         refcount_init(&ps->ps_refcnt, 1);
 3605         mtx_init(&ps->ps_mtx, "sigacts", NULL, MTX_DEF);
 3606         return (ps);
 3607 }
 3608 
 3609 void
 3610 sigacts_free(struct sigacts *ps)
 3611 {
 3612 
 3613         if (refcount_release(&ps->ps_refcnt) == 0)
 3614                 return;
 3615         mtx_destroy(&ps->ps_mtx);
 3616         free(ps, M_SUBPROC);
 3617 }
 3618 
 3619 struct sigacts *
 3620 sigacts_hold(struct sigacts *ps)
 3621 {
 3622 
 3623         refcount_acquire(&ps->ps_refcnt);
 3624         return (ps);
 3625 }
 3626 
 3627 void
 3628 sigacts_copy(struct sigacts *dest, struct sigacts *src)
 3629 {
 3630 
 3631         KASSERT(dest->ps_refcnt == 1, ("sigacts_copy to shared dest"));
 3632         mtx_lock(&src->ps_mtx);
 3633         bcopy(src, dest, offsetof(struct sigacts, ps_refcnt));
 3634         mtx_unlock(&src->ps_mtx);
 3635 }
 3636 
 3637 int
 3638 sigacts_shared(struct sigacts *ps)
 3639 {
 3640 
 3641         return (ps->ps_refcnt > 1);
 3642 }

Cache object: 96ad981f7264e2cb98b3a70c59912c27


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.