The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_sig.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*      $NetBSD: kern_sig.c,v 1.240.2.3 2009/04/10 20:20:45 snj Exp $   */
    2 
    3 /*
    4  * Copyright (c) 1982, 1986, 1989, 1991, 1993
    5  *      The Regents of the University of California.  All rights reserved.
    6  * (c) UNIX System Laboratories, Inc.
    7  * All or some portions of this file are derived from material licensed
    8  * to the University of California by American Telephone and Telegraph
    9  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
   10  * the permission of UNIX System Laboratories, Inc.
   11  *
   12  * Redistribution and use in source and binary forms, with or without
   13  * modification, are permitted provided that the following conditions
   14  * are met:
   15  * 1. Redistributions of source code must retain the above copyright
   16  *    notice, this list of conditions and the following disclaimer.
   17  * 2. Redistributions in binary form must reproduce the above copyright
   18  *    notice, this list of conditions and the following disclaimer in the
   19  *    documentation and/or other materials provided with the distribution.
   20  * 3. Neither the name of the University nor the names of its contributors
   21  *    may be used to endorse or promote products derived from this software
   22  *    without specific prior written permission.
   23  *
   24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
   25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   34  * SUCH DAMAGE.
   35  *
   36  *      @(#)kern_sig.c  8.14 (Berkeley) 5/14/95
   37  */
   38 
   39 #include <sys/cdefs.h>
   40 __KERNEL_RCSID(0, "$NetBSD: kern_sig.c,v 1.240.2.3 2009/04/10 20:20:45 snj Exp $");
   41 
   42 #include "opt_coredump.h"
   43 #include "opt_ktrace.h"
   44 #include "opt_ptrace.h"
   45 #include "opt_multiprocessor.h"
   46 #include "opt_compat_sunos.h"
   47 #include "opt_compat_netbsd.h"
   48 #include "opt_compat_netbsd32.h"
   49 #include "opt_pax.h"
   50 
   51 #define SIGPROP         /* include signal properties table */
   52 #include <sys/param.h>
   53 #include <sys/signalvar.h>
   54 #include <sys/resourcevar.h>
   55 #include <sys/namei.h>
   56 #include <sys/vnode.h>
   57 #include <sys/proc.h>
   58 #include <sys/systm.h>
   59 #include <sys/timeb.h>
   60 #include <sys/times.h>
   61 #include <sys/buf.h>
   62 #include <sys/acct.h>
   63 #include <sys/file.h>
   64 #include <sys/kernel.h>
   65 #include <sys/wait.h>
   66 #include <sys/ktrace.h>
   67 #include <sys/syslog.h>
   68 #include <sys/stat.h>
   69 #include <sys/core.h>
   70 #include <sys/filedesc.h>
   71 #include <sys/malloc.h>
   72 #include <sys/pool.h>
   73 #include <sys/ucontext.h>
   74 #include <sys/sa.h>
   75 #include <sys/savar.h>
   76 #include <sys/exec.h>
   77 #include <sys/sysctl.h>
   78 #include <sys/kauth.h>
   79 
   80 #include <sys/mount.h>
   81 #include <sys/syscallargs.h>
   82 
   83 #include <machine/cpu.h>
   84 
   85 #include <sys/user.h>           /* for coredump */
   86 
   87 #ifdef PAX_SEGVGUARD
   88 #include <sys/pax.h>
   89 #endif /* PAX_SEGVGUARD */
   90 
   91 #include <uvm/uvm.h>
   92 #include <uvm/uvm_extern.h>
   93 
   94 #ifdef COREDUMP
   95 static int      build_corename(struct proc *, char *, const char *, size_t);
   96 #endif
   97 static void     ksiginfo_exithook(struct proc *, void *);
   98 static void     ksiginfo_queue(struct proc *, const ksiginfo_t *, ksiginfo_t **);
   99 static ksiginfo_t *ksiginfo_dequeue(struct proc *, int);
  100 static void     kpsignal2(struct proc *, const ksiginfo_t *);
  101 
  102 sigset_t        contsigmask, stopsigmask, sigcantmask;
  103 
  104 struct pool     sigacts_pool;   /* memory pool for sigacts structures */
  105 
  106 /*
  107  * struct sigacts memory pool allocator.
  108  */
  109 
  110 static void *
  111 sigacts_poolpage_alloc(struct pool *pp, int flags)
  112 {
  113 
  114         return (void *)uvm_km_alloc(kernel_map,
  115             (PAGE_SIZE)*2, (PAGE_SIZE)*2,
  116             ((flags & PR_WAITOK) ? 0 : UVM_KMF_NOWAIT | UVM_KMF_TRYLOCK)
  117             | UVM_KMF_WIRED);
  118 }
  119 
  120 static void
  121 sigacts_poolpage_free(struct pool *pp, void *v)
  122 {
  123         uvm_km_free(kernel_map, (vaddr_t)v, (PAGE_SIZE)*2, UVM_KMF_WIRED);
  124 }
  125 
  126 static struct pool_allocator sigactspool_allocator = {
  127         .pa_alloc = sigacts_poolpage_alloc,
  128         .pa_free = sigacts_poolpage_free,
  129 };
  130 
  131 static POOL_INIT(siginfo_pool, sizeof(siginfo_t), 0, 0, 0, "siginfo",
  132     &pool_allocator_nointr);
  133 static POOL_INIT(ksiginfo_pool, sizeof(ksiginfo_t), 0, 0, 0, "ksiginfo", NULL);
  134 
  135 static ksiginfo_t *
  136 ksiginfo_alloc(int prflags)
  137 {
  138         int s;
  139         ksiginfo_t *ksi;
  140 
  141         s = splvm();
  142         ksi = pool_get(&ksiginfo_pool, prflags);
  143         splx(s);
  144         return ksi;
  145 }
  146 
  147 static void
  148 ksiginfo_free(ksiginfo_t *ksi)
  149 {
  150         int s;
  151 
  152         s = splvm();
  153         pool_put(&ksiginfo_pool, ksi);
  154         splx(s);
  155 }
  156 
  157 /*
  158  * Remove and return the first ksiginfo element that matches our requested
  159  * signal, or return NULL if one not found.
  160  */
  161 static ksiginfo_t *
  162 ksiginfo_dequeue(struct proc *p, int signo)
  163 {
  164         ksiginfo_t *ksi;
  165         int s;
  166 
  167         s = splvm();
  168         simple_lock(&p->p_sigctx.ps_silock);
  169         CIRCLEQ_FOREACH(ksi, &p->p_sigctx.ps_siginfo, ksi_list) {
  170                 if (ksi->ksi_signo == signo) {
  171                         CIRCLEQ_REMOVE(&p->p_sigctx.ps_siginfo, ksi, ksi_list);
  172                         goto out;
  173                 }
  174         }
  175         ksi = NULL;
  176 out:
  177         simple_unlock(&p->p_sigctx.ps_silock);
  178         splx(s);
  179         return ksi;
  180 }
  181 
  182 /*
  183  * Append a new ksiginfo element to the list of pending ksiginfo's.
  184  * We replace non RT signals if they already existed in the queue
  185  * and we add new entries for RT signals, or for non RT signals
  186  * with non-existing entries.
  187  */
  188 static void
  189 ksiginfo_queue(struct proc *p, const ksiginfo_t *ksi, ksiginfo_t **newkp)
  190 {
  191         ksiginfo_t *kp;
  192         int s;
  193 
  194         /*
  195          * If there's no info, don't save it.
  196          */
  197         if (KSI_EMPTY_P(ksi))
  198                 return;
  199 
  200         s = splvm();
  201         simple_lock(&p->p_sigctx.ps_silock);
  202 #ifdef notyet   /* XXX: QUEUING */
  203         if (ksi->ksi_signo < SIGRTMIN)
  204 #endif
  205         {
  206                 CIRCLEQ_FOREACH(kp, &p->p_sigctx.ps_siginfo, ksi_list) {
  207                         if (kp->ksi_signo == ksi->ksi_signo) {
  208                                 KSI_COPY(ksi, kp);
  209                                 goto out;
  210                         }
  211                 }
  212         }
  213         if (newkp && *newkp) {
  214                 kp = *newkp;
  215                 *newkp = NULL;
  216         } else {
  217                 SCHED_ASSERT_UNLOCKED();
  218                 kp = ksiginfo_alloc(PR_NOWAIT);
  219                 if (kp == NULL) {
  220 #ifdef DIAGNOSTIC
  221                         printf("Out of memory allocating siginfo for pid %d\n",
  222                             p->p_pid);
  223 #endif
  224                         goto out;
  225                 }
  226         }
  227         *kp = *ksi;
  228         CIRCLEQ_INSERT_TAIL(&p->p_sigctx.ps_siginfo, kp, ksi_list);
  229 out:
  230         simple_unlock(&p->p_sigctx.ps_silock);
  231         splx(s);
  232 }
  233 
  234 /*
  235  * free all pending ksiginfo on exit
  236  */
  237 static void
  238 ksiginfo_exithook(struct proc *p, void *v)
  239 {
  240         int s;
  241 
  242         s = splvm();
  243         simple_lock(&p->p_sigctx.ps_silock);
  244         while (!CIRCLEQ_EMPTY(&p->p_sigctx.ps_siginfo)) {
  245                 ksiginfo_t *ksi = CIRCLEQ_FIRST(&p->p_sigctx.ps_siginfo);
  246                 CIRCLEQ_REMOVE(&p->p_sigctx.ps_siginfo, ksi, ksi_list);
  247                 ksiginfo_free(ksi);
  248         }
  249         simple_unlock(&p->p_sigctx.ps_silock);
  250         splx(s);
  251 }
  252 
  253 /*
  254  * Initialize signal-related data structures.
  255  */
  256 void
  257 signal_init(void)
  258 {
  259 
  260         sigactspool_allocator.pa_pagesz = (PAGE_SIZE)*2;
  261 
  262         pool_init(&sigacts_pool, sizeof(struct sigacts), 0, 0, 0, "sigapl",
  263             sizeof(struct sigacts) > PAGE_SIZE ?
  264             &sigactspool_allocator : &pool_allocator_nointr);
  265 
  266         exithook_establish(ksiginfo_exithook, NULL);
  267         exechook_establish(ksiginfo_exithook, NULL);
  268 }
  269 
  270 /*
  271  * Create an initial sigctx structure, using the same signal state
  272  * as p. If 'share' is set, share the sigctx_proc part, otherwise just
  273  * copy it from parent.
  274  */
  275 void
  276 sigactsinit(struct proc *np, struct proc *pp, int share)
  277 {
  278         struct sigacts *ps;
  279 
  280         if (share) {
  281                 np->p_sigacts = pp->p_sigacts;
  282                 pp->p_sigacts->sa_refcnt++;
  283         } else {
  284                 ps = pool_get(&sigacts_pool, PR_WAITOK);
  285                 if (pp)
  286                         memcpy(ps, pp->p_sigacts, sizeof(struct sigacts));
  287                 else
  288                         memset(ps, '\0', sizeof(struct sigacts));
  289                 ps->sa_refcnt = 1;
  290                 np->p_sigacts = ps;
  291         }
  292 }
  293 
  294 /*
  295  * Make this process not share its sigctx, maintaining all
  296  * signal state.
  297  */
  298 void
  299 sigactsunshare(struct proc *p)
  300 {
  301         struct sigacts *oldps;
  302 
  303         if (p->p_sigacts->sa_refcnt == 1)
  304                 return;
  305 
  306         oldps = p->p_sigacts;
  307         sigactsinit(p, NULL, 0);
  308 
  309         if (--oldps->sa_refcnt == 0)
  310                 pool_put(&sigacts_pool, oldps);
  311 }
  312 
  313 /*
  314  * Release a sigctx structure.
  315  */
  316 void
  317 sigactsfree(struct sigacts *ps)
  318 {
  319 
  320         if (--ps->sa_refcnt > 0)
  321                 return;
  322 
  323         pool_put(&sigacts_pool, ps);
  324 }
  325 
  326 int
  327 sigaction1(struct proc *p, int signum, const struct sigaction *nsa,
  328         struct sigaction *osa, const void *tramp, int vers)
  329 {
  330         struct sigacts  *ps;
  331         int             prop;
  332 
  333         ps = p->p_sigacts;
  334         if (signum <= 0 || signum >= NSIG)
  335                 return (EINVAL);
  336 
  337         /*
  338          * Trampoline ABI version 0 is reserved for the legacy
  339          * kernel-provided on-stack trampoline.  Conversely, if we are
  340          * using a non-0 ABI version, we must have a trampoline.  Only
  341          * validate the vers if a new sigaction was supplied. Emulations
  342          * use legacy kernel trampolines with version 0, alternatively
  343          * check for that too.
  344          */
  345         if ((vers != 0 && tramp == NULL) ||
  346 #ifdef SIGTRAMP_VALID
  347             (nsa != NULL &&
  348             ((vers == 0) ?
  349                 (p->p_emul->e_sigcode == NULL) :
  350                 !SIGTRAMP_VALID(vers))) ||
  351 #endif
  352             (vers == 0 && tramp != NULL))
  353                 return (EINVAL);
  354 
  355         if (osa)
  356                 *osa = SIGACTION_PS(ps, signum);
  357 
  358         if (nsa) {
  359                 if (nsa->sa_flags & ~SA_ALLBITS)
  360                         return (EINVAL);
  361 
  362                 prop = sigprop[signum];
  363                 if (prop & SA_CANTMASK)
  364                         return (EINVAL);
  365 
  366                 (void) splsched();      /* XXXSMP */
  367                 SIGACTION_PS(ps, signum) = *nsa;
  368                 ps->sa_sigdesc[signum].sd_tramp = tramp;
  369                 ps->sa_sigdesc[signum].sd_vers = vers;
  370                 sigminusset(&sigcantmask, &SIGACTION_PS(ps, signum).sa_mask);
  371                 if ((prop & SA_NORESET) != 0)
  372                         SIGACTION_PS(ps, signum).sa_flags &= ~SA_RESETHAND;
  373                 if (signum == SIGCHLD) {
  374                         if (nsa->sa_flags & SA_NOCLDSTOP)
  375                                 p->p_flag |= P_NOCLDSTOP;
  376                         else
  377                                 p->p_flag &= ~P_NOCLDSTOP;
  378                         if (nsa->sa_flags & SA_NOCLDWAIT) {
  379                                 /*
  380                                  * Paranoia: since SA_NOCLDWAIT is implemented
  381                                  * by reparenting the dying child to PID 1 (and
  382                                  * trust it to reap the zombie), PID 1 itself
  383                                  * is forbidden to set SA_NOCLDWAIT.
  384                                  */
  385                                 if (p->p_pid == 1)
  386                                         p->p_flag &= ~P_NOCLDWAIT;
  387                                 else
  388                                         p->p_flag |= P_NOCLDWAIT;
  389                         } else
  390                                 p->p_flag &= ~P_NOCLDWAIT;
  391 
  392                         if (nsa->sa_handler == SIG_IGN) {
  393                                 /*
  394                                  * Paranoia: same as above.
  395                                  */
  396                                 if (p->p_pid == 1)
  397                                         p->p_flag &= ~P_CLDSIGIGN;
  398                                 else
  399                                         p->p_flag |= P_CLDSIGIGN;
  400                         } else
  401                                 p->p_flag &= ~P_CLDSIGIGN;
  402                                 
  403                 }
  404                 if ((nsa->sa_flags & SA_NODEFER) == 0)
  405                         sigaddset(&SIGACTION_PS(ps, signum).sa_mask, signum);
  406                 else
  407                         sigdelset(&SIGACTION_PS(ps, signum).sa_mask, signum);
  408                 /*
  409                  * Set bit in p_sigctx.ps_sigignore for signals that are set to
  410                  * SIG_IGN, and for signals set to SIG_DFL where the default is
  411                  * to ignore. However, don't put SIGCONT in
  412                  * p_sigctx.ps_sigignore, as we have to restart the process.
  413                  */
  414                 if (nsa->sa_handler == SIG_IGN ||
  415                     (nsa->sa_handler == SIG_DFL && (prop & SA_IGNORE) != 0)) {
  416                                                 /* never to be seen again */
  417                         sigdelset(&p->p_sigctx.ps_siglist, signum);
  418                         if (signum != SIGCONT) {
  419                                                 /* easier in psignal */
  420                                 sigaddset(&p->p_sigctx.ps_sigignore, signum);
  421                         }
  422                         sigdelset(&p->p_sigctx.ps_sigcatch, signum);
  423                 } else {
  424                         sigdelset(&p->p_sigctx.ps_sigignore, signum);
  425                         if (nsa->sa_handler == SIG_DFL)
  426                                 sigdelset(&p->p_sigctx.ps_sigcatch, signum);
  427                         else
  428                                 sigaddset(&p->p_sigctx.ps_sigcatch, signum);
  429                 }
  430                 (void) spl0();
  431         }
  432 
  433         return (0);
  434 }
  435 
  436 #ifdef COMPAT_16
  437 /* ARGSUSED */
  438 int
  439 compat_16_sys___sigaction14(struct lwp *l, void *v, register_t *retval)
  440 {
  441         struct compat_16_sys___sigaction14_args /* {
  442                 syscallarg(int)                         signum;
  443                 syscallarg(const struct sigaction *)    nsa;
  444                 syscallarg(struct sigaction *)          osa;
  445         } */ *uap = v;
  446         struct proc             *p;
  447         struct sigaction        nsa, osa;
  448         int                     error;
  449 
  450         if (SCARG(uap, nsa)) {
  451                 error = copyin(SCARG(uap, nsa), &nsa, sizeof(nsa));
  452                 if (error)
  453                         return (error);
  454         }
  455         p = l->l_proc;
  456         error = sigaction1(p, SCARG(uap, signum),
  457             SCARG(uap, nsa) ? &nsa : 0, SCARG(uap, osa) ? &osa : 0,
  458             NULL, 0);
  459         if (error)
  460                 return (error);
  461         if (SCARG(uap, osa)) {
  462                 error = copyout(&osa, SCARG(uap, osa), sizeof(osa));
  463                 if (error)
  464                         return (error);
  465         }
  466         return (0);
  467 }
  468 #endif
  469 
  470 /* ARGSUSED */
  471 int
  472 sys___sigaction_sigtramp(struct lwp *l, void *v, register_t *retval)
  473 {
  474         struct sys___sigaction_sigtramp_args /* {
  475                 syscallarg(int)                         signum;
  476                 syscallarg(const struct sigaction *)    nsa;
  477                 syscallarg(struct sigaction *)          osa;
  478                 syscallarg(void *)                      tramp;
  479                 syscallarg(int)                         vers;
  480         } */ *uap = v;
  481         struct proc *p = l->l_proc;
  482         struct sigaction nsa, osa;
  483         int error;
  484 
  485         if (SCARG(uap, nsa)) {
  486                 error = copyin(SCARG(uap, nsa), &nsa, sizeof(nsa));
  487                 if (error)
  488                         return (error);
  489         }
  490         error = sigaction1(p, SCARG(uap, signum),
  491             SCARG(uap, nsa) ? &nsa : 0, SCARG(uap, osa) ? &osa : 0,
  492             SCARG(uap, tramp), SCARG(uap, vers));
  493         if (error)
  494                 return (error);
  495         if (SCARG(uap, osa)) {
  496                 error = copyout(&osa, SCARG(uap, osa), sizeof(osa));
  497                 if (error)
  498                         return (error);
  499         }
  500         return (0);
  501 }
  502 
  503 /*
  504  * Initialize signal state for process 0;
  505  * set to ignore signals that are ignored by default and disable the signal
  506  * stack.
  507  */
  508 void
  509 siginit(struct proc *p)
  510 {
  511         struct sigacts  *ps;
  512         int             signum, prop;
  513 
  514         ps = p->p_sigacts;
  515         sigemptyset(&contsigmask);
  516         sigemptyset(&stopsigmask);
  517         sigemptyset(&sigcantmask);
  518         for (signum = 1; signum < NSIG; signum++) {
  519                 prop = sigprop[signum];
  520                 if (prop & SA_CONT)
  521                         sigaddset(&contsigmask, signum);
  522                 if (prop & SA_STOP)
  523                         sigaddset(&stopsigmask, signum);
  524                 if (prop & SA_CANTMASK)
  525                         sigaddset(&sigcantmask, signum);
  526                 if (prop & SA_IGNORE && signum != SIGCONT)
  527                         sigaddset(&p->p_sigctx.ps_sigignore, signum);
  528                 sigemptyset(&SIGACTION_PS(ps, signum).sa_mask);
  529                 SIGACTION_PS(ps, signum).sa_flags = SA_RESTART;
  530         }
  531         sigemptyset(&p->p_sigctx.ps_sigcatch);
  532         p->p_sigctx.ps_sigwaited = NULL;
  533         p->p_flag &= ~P_NOCLDSTOP;
  534 
  535         /*
  536          * Reset stack state to the user stack.
  537          */
  538         p->p_sigctx.ps_sigstk.ss_flags = SS_DISABLE;
  539         p->p_sigctx.ps_sigstk.ss_size = 0;
  540         p->p_sigctx.ps_sigstk.ss_sp = 0;
  541 
  542         /* One reference. */
  543         ps->sa_refcnt = 1;
  544 }
  545 
  546 /*
  547  * Reset signals for an exec of the specified process.
  548  */
  549 void
  550 execsigs(struct proc *p)
  551 {
  552         struct sigacts  *ps;
  553         int             signum, prop;
  554 
  555         sigactsunshare(p);
  556 
  557         ps = p->p_sigacts;
  558 
  559         /*
  560          * Reset caught signals.  Held signals remain held
  561          * through p_sigctx.ps_sigmask (unless they were caught,
  562          * and are now ignored by default).
  563          */
  564         for (signum = 1; signum < NSIG; signum++) {
  565                 if (sigismember(&p->p_sigctx.ps_sigcatch, signum)) {
  566                         prop = sigprop[signum];
  567                         if (prop & SA_IGNORE) {
  568                                 if ((prop & SA_CONT) == 0)
  569                                         sigaddset(&p->p_sigctx.ps_sigignore,
  570                                             signum);
  571                                 sigdelset(&p->p_sigctx.ps_siglist, signum);
  572                         }
  573                         SIGACTION_PS(ps, signum).sa_handler = SIG_DFL;
  574                 }
  575                 sigemptyset(&SIGACTION_PS(ps, signum).sa_mask);
  576                 SIGACTION_PS(ps, signum).sa_flags = SA_RESTART;
  577         }
  578         sigemptyset(&p->p_sigctx.ps_sigcatch);
  579         p->p_sigctx.ps_sigwaited = NULL;
  580 
  581         /*
  582          * Reset no zombies if child dies flag as Solaris does.
  583          */
  584         p->p_flag &= ~(P_NOCLDWAIT | P_CLDSIGIGN);
  585         if (SIGACTION_PS(ps, SIGCHLD).sa_handler == SIG_IGN)
  586                 SIGACTION_PS(ps, SIGCHLD).sa_handler = SIG_DFL;
  587 
  588         /*
  589          * Reset stack state to the user stack.
  590          */
  591         p->p_sigctx.ps_sigstk.ss_flags = SS_DISABLE;
  592         p->p_sigctx.ps_sigstk.ss_size = 0;
  593         p->p_sigctx.ps_sigstk.ss_sp = 0;
  594 }
  595 
  596 int
  597 sigprocmask1(struct proc *p, int how, const sigset_t *nss, sigset_t *oss)
  598 {
  599 
  600         if (oss)
  601                 *oss = p->p_sigctx.ps_sigmask;
  602 
  603         if (nss) {
  604                 (void)splsched();       /* XXXSMP */
  605                 switch (how) {
  606                 case SIG_BLOCK:
  607                         sigplusset(nss, &p->p_sigctx.ps_sigmask);
  608                         break;
  609                 case SIG_UNBLOCK:
  610                         sigminusset(nss, &p->p_sigctx.ps_sigmask);
  611                         CHECKSIGS(p);
  612                         break;
  613                 case SIG_SETMASK:
  614                         p->p_sigctx.ps_sigmask = *nss;
  615                         CHECKSIGS(p);
  616                         break;
  617                 default:
  618                         (void)spl0();   /* XXXSMP */
  619                         return (EINVAL);
  620                 }
  621                 sigminusset(&sigcantmask, &p->p_sigctx.ps_sigmask);
  622                 (void)spl0();           /* XXXSMP */
  623         }
  624 
  625         return (0);
  626 }
  627 
  628 /*
  629  * Manipulate signal mask.
  630  * Note that we receive new mask, not pointer,
  631  * and return old mask as return value;
  632  * the library stub does the rest.
  633  */
  634 int
  635 sys___sigprocmask14(struct lwp *l, void *v, register_t *retval)
  636 {
  637         struct sys___sigprocmask14_args /* {
  638                 syscallarg(int)                 how;
  639                 syscallarg(const sigset_t *)    set;
  640                 syscallarg(sigset_t *)          oset;
  641         } */ *uap = v;
  642         struct proc     *p;
  643         sigset_t        nss, oss;
  644         int             error;
  645 
  646         if (SCARG(uap, set)) {
  647                 error = copyin(SCARG(uap, set), &nss, sizeof(nss));
  648                 if (error)
  649                         return (error);
  650         }
  651         p = l->l_proc;
  652         error = sigprocmask1(p, SCARG(uap, how),
  653             SCARG(uap, set) ? &nss : 0, SCARG(uap, oset) ? &oss : 0);
  654         if (error)
  655                 return (error);
  656         if (SCARG(uap, oset)) {
  657                 error = copyout(&oss, SCARG(uap, oset), sizeof(oss));
  658                 if (error)
  659                         return (error);
  660         }
  661         return (0);
  662 }
  663 
  664 void
  665 sigpending1(struct proc *p, sigset_t *ss)
  666 {
  667 
  668         *ss = p->p_sigctx.ps_siglist;
  669         sigminusset(&p->p_sigctx.ps_sigmask, ss);
  670 }
  671 
  672 /* ARGSUSED */
  673 int
  674 sys___sigpending14(struct lwp *l, void *v, register_t *retval)
  675 {
  676         struct sys___sigpending14_args /* {
  677                 syscallarg(sigset_t *)  set;
  678         } */ *uap = v;
  679         struct proc     *p;
  680         sigset_t        ss;
  681 
  682         p = l->l_proc;
  683         sigpending1(p, &ss);
  684         return (copyout(&ss, SCARG(uap, set), sizeof(ss)));
  685 }
  686 
  687 int
  688 sigsuspend1(struct proc *p, const sigset_t *ss)
  689 {
  690         struct sigacts *ps;
  691 
  692         ps = p->p_sigacts;
  693         if (ss) {
  694                 /*
  695                  * When returning from sigpause, we want
  696                  * the old mask to be restored after the
  697                  * signal handler has finished.  Thus, we
  698                  * save it here and mark the sigctx structure
  699                  * to indicate this.
  700                  */
  701                 p->p_sigctx.ps_oldmask = p->p_sigctx.ps_sigmask;
  702                 p->p_sigctx.ps_flags |= SAS_OLDMASK;
  703                 (void) splsched();      /* XXXSMP */
  704                 p->p_sigctx.ps_sigmask = *ss;
  705                 CHECKSIGS(p);
  706                 sigminusset(&sigcantmask, &p->p_sigctx.ps_sigmask);
  707                 (void) spl0();          /* XXXSMP */
  708         }
  709 
  710         while (tsleep((caddr_t) ps, PPAUSE|PCATCH, "pause", 0) == 0)
  711                 /* void */;
  712 
  713         /* always return EINTR rather than ERESTART... */
  714         return (EINTR);
  715 }
  716 
  717 /*
  718  * Suspend process until signal, providing mask to be set
  719  * in the meantime.  Note nonstandard calling convention:
  720  * libc stub passes mask, not pointer, to save a copyin.
  721  */
  722 /* ARGSUSED */
  723 int
  724 sys___sigsuspend14(struct lwp *l, void *v, register_t *retval)
  725 {
  726         struct sys___sigsuspend14_args /* {
  727                 syscallarg(const sigset_t *)    set;
  728         } */ *uap = v;
  729         struct proc     *p;
  730         sigset_t        ss;
  731         int             error;
  732 
  733         if (SCARG(uap, set)) {
  734                 error = copyin(SCARG(uap, set), &ss, sizeof(ss));
  735                 if (error)
  736                         return (error);
  737         }
  738 
  739         p = l->l_proc;
  740         return (sigsuspend1(p, SCARG(uap, set) ? &ss : 0));
  741 }
  742 
  743 int
  744 sigaltstack1(struct proc *p, const struct sigaltstack *nss,
  745         struct sigaltstack *oss)
  746 {
  747 
  748         if (oss)
  749                 *oss = p->p_sigctx.ps_sigstk;
  750 
  751         if (nss) {
  752                 if (nss->ss_flags & ~SS_ALLBITS)
  753                         return (EINVAL);
  754 
  755                 if (nss->ss_flags & SS_DISABLE) {
  756                         if (p->p_sigctx.ps_sigstk.ss_flags & SS_ONSTACK)
  757                                 return (EINVAL);
  758                 } else {
  759                         if (nss->ss_size < MINSIGSTKSZ)
  760                                 return (ENOMEM);
  761                 }
  762                 p->p_sigctx.ps_sigstk = *nss;
  763         }
  764 
  765         return (0);
  766 }
  767 
  768 /* ARGSUSED */
  769 int
  770 sys___sigaltstack14(struct lwp *l, void *v, register_t *retval)
  771 {
  772         struct sys___sigaltstack14_args /* {
  773                 syscallarg(const struct sigaltstack *)  nss;
  774                 syscallarg(struct sigaltstack *)        oss;
  775         } */ *uap = v;
  776         struct proc             *p;
  777         struct sigaltstack      nss, oss;
  778         int                     error;
  779 
  780         if (SCARG(uap, nss)) {
  781                 error = copyin(SCARG(uap, nss), &nss, sizeof(nss));
  782                 if (error)
  783                         return (error);
  784         }
  785         p = l->l_proc;
  786         error = sigaltstack1(p,
  787             SCARG(uap, nss) ? &nss : 0, SCARG(uap, oss) ? &oss : 0);
  788         if (error)
  789                 return (error);
  790         if (SCARG(uap, oss)) {
  791                 error = copyout(&oss, SCARG(uap, oss), sizeof(oss));
  792                 if (error)
  793                         return (error);
  794         }
  795         return (0);
  796 }
  797 
  798 /* ARGSUSED */
  799 int
  800 sys_kill(struct lwp *l, void *v, register_t *retval)
  801 {
  802         struct sys_kill_args /* {
  803                 syscallarg(int) pid;
  804                 syscallarg(int) signum;
  805         } */ *uap = v;
  806         struct proc     *p;
  807         ksiginfo_t      ksi;
  808         int signum = SCARG(uap, signum);
  809         int error;
  810 
  811         if ((u_int)signum >= NSIG)
  812                 return (EINVAL);
  813         KSI_INIT(&ksi);
  814         ksi.ksi_signo = signum;
  815         ksi.ksi_code = SI_USER;
  816         ksi.ksi_pid = l->l_proc->p_pid;
  817         ksi.ksi_uid = kauth_cred_geteuid(l->l_cred);
  818         if (SCARG(uap, pid) > 0) {
  819                 /* kill single process */
  820                 if ((p = pfind(SCARG(uap, pid))) == NULL)
  821                         return (ESRCH);
  822                 error = kauth_authorize_process(l->l_cred,
  823                     KAUTH_PROCESS_CANSIGNAL, p, (void *)(uintptr_t)signum,
  824                     NULL, NULL);
  825                 if (error)
  826                         return error;
  827                 if (signum)
  828                         kpsignal2(p, &ksi);
  829                 return (0);
  830         }
  831         switch (SCARG(uap, pid)) {
  832         case -1:                /* broadcast signal */
  833                 return (killpg1(l, &ksi, 0, 1));
  834         case 0:                 /* signal own process group */
  835                 return (killpg1(l, &ksi, 0, 0));
  836         default:                /* negative explicit process group */
  837                 return (killpg1(l, &ksi, -SCARG(uap, pid), 0));
  838         }
  839         /* NOTREACHED */
  840 }
  841 
  842 /*
  843  * Common code for kill process group/broadcast kill.
  844  * cp is calling process.
  845  */
  846 int
  847 killpg1(struct lwp *l, ksiginfo_t *ksi, int pgid, int all)
  848 {
  849         struct proc     *p, *cp;
  850         kauth_cred_t    pc;
  851         struct pgrp     *pgrp;
  852         int             nfound;
  853         int             signum = ksi->ksi_signo;
  854 
  855         cp = l->l_proc;
  856         pc = l->l_cred;
  857         nfound = 0;
  858         if (all) {
  859                 /*
  860                  * broadcast
  861                  */
  862                 proclist_lock_read();
  863                 PROCLIST_FOREACH(p, &allproc) {
  864                         if (p->p_pid <= 1 || p->p_flag & P_SYSTEM || p == cp ||
  865                             kauth_authorize_process(pc, KAUTH_PROCESS_CANSIGNAL,
  866                             p, (void *)(uintptr_t)signum, NULL, NULL) != 0)
  867                                 continue;
  868                         nfound++;
  869                         if (signum)
  870                                 kpsignal2(p, ksi);
  871                 }
  872                 proclist_unlock_read();
  873         } else {
  874                 if (pgid == 0)
  875                         /*
  876                          * zero pgid means send to my process group.
  877                          */
  878                         pgrp = cp->p_pgrp;
  879                 else {
  880                         pgrp = pgfind(pgid);
  881                         if (pgrp == NULL)
  882                                 return (ESRCH);
  883                 }
  884                 LIST_FOREACH(p, &pgrp->pg_members, p_pglist) {
  885                         if (p->p_pid <= 1 || p->p_flag & P_SYSTEM ||
  886                             kauth_authorize_process(pc, KAUTH_PROCESS_CANSIGNAL,
  887                             p, (void *)(uintptr_t)signum, NULL, NULL) != 0)
  888                                 continue;
  889                         nfound++;
  890                         if (signum && P_ZOMBIE(p) == 0)
  891                                 kpsignal2(p, ksi);
  892                 }
  893         }
  894         return (nfound ? 0 : ESRCH);
  895 }
  896 
  897 /*
  898  * Send a signal to a process group.
  899  */
  900 void
  901 gsignal(int pgid, int signum)
  902 {
  903         ksiginfo_t ksi;
  904         KSI_INIT_EMPTY(&ksi);
  905         ksi.ksi_signo = signum;
  906         kgsignal(pgid, &ksi, NULL);
  907 }
  908 
  909 void
  910 kgsignal(int pgid, ksiginfo_t *ksi, void *data)
  911 {
  912         struct pgrp *pgrp;
  913 
  914         if (pgid && (pgrp = pgfind(pgid)))
  915                 kpgsignal(pgrp, ksi, data, 0);
  916 }
  917 
  918 /*
  919  * Send a signal to a process group. If checktty is 1,
  920  * limit to members which have a controlling terminal.
  921  */
  922 void
  923 pgsignal(struct pgrp *pgrp, int sig, int checkctty)
  924 {
  925         ksiginfo_t ksi;
  926         KSI_INIT_EMPTY(&ksi);
  927         ksi.ksi_signo = sig;
  928         kpgsignal(pgrp, &ksi, NULL, checkctty);
  929 }
  930 
  931 void
  932 kpgsignal(struct pgrp *pgrp, ksiginfo_t *ksi, void *data, int checkctty)
  933 {
  934         struct proc *p;
  935 
  936         if (pgrp)
  937                 LIST_FOREACH(p, &pgrp->pg_members, p_pglist)
  938                         if (checkctty == 0 || p->p_flag & P_CONTROLT)
  939                                 kpsignal(p, ksi, data);
  940 }
  941 
  942 /*
  943  * Send a signal caused by a trap to the current process.
  944  * If it will be caught immediately, deliver it with correct code.
  945  * Otherwise, post it normally.
  946  */
  947 void
  948 trapsignal(struct lwp *l, const ksiginfo_t *ksi)
  949 {
  950         struct proc     *p;
  951         struct sigacts  *ps;
  952         int signum = ksi->ksi_signo;
  953 
  954         KASSERT(KSI_TRAP_P(ksi));
  955 
  956         p = l->l_proc;
  957         ps = p->p_sigacts;
  958         if ((p->p_flag & P_TRACED) == 0 &&
  959             sigismember(&p->p_sigctx.ps_sigcatch, signum) &&
  960             !sigismember(&p->p_sigctx.ps_sigmask, signum)) {
  961                 p->p_stats->p_ru.ru_nsignals++;
  962 #ifdef KTRACE
  963                 if (KTRPOINT(p, KTR_PSIG))
  964                         ktrpsig(l, signum, SIGACTION_PS(ps, signum).sa_handler,
  965                             &p->p_sigctx.ps_sigmask, ksi);
  966 #endif
  967                 kpsendsig(l, ksi, &p->p_sigctx.ps_sigmask);
  968                 (void) splsched();      /* XXXSMP */
  969                 sigplusset(&SIGACTION_PS(ps, signum).sa_mask,
  970                     &p->p_sigctx.ps_sigmask);
  971                 if (SIGACTION_PS(ps, signum).sa_flags & SA_RESETHAND) {
  972                         sigdelset(&p->p_sigctx.ps_sigcatch, signum);
  973                         if (signum != SIGCONT && sigprop[signum] & SA_IGNORE)
  974                                 sigaddset(&p->p_sigctx.ps_sigignore, signum);
  975                         SIGACTION_PS(ps, signum).sa_handler = SIG_DFL;
  976                 }
  977                 (void) spl0();          /* XXXSMP */
  978         } else {
  979                 p->p_sigctx.ps_lwp = l->l_lid;
  980                 /* XXX for core dump/debugger */
  981                 p->p_sigctx.ps_signo = ksi->ksi_signo;
  982                 p->p_sigctx.ps_code = ksi->ksi_trap;
  983                 kpsignal2(p, ksi);
  984         }
  985 }
  986 
  987 /*
  988  * Fill in signal information and signal the parent for a child status change.
  989  */
  990 void
  991 child_psignal(struct proc *p)
  992 {
  993         ksiginfo_t ksi;
  994 
  995         KSI_INIT(&ksi);
  996         ksi.ksi_signo = SIGCHLD;
  997         ksi.ksi_code = p->p_xstat == SIGCONT ? CLD_CONTINUED : CLD_STOPPED;
  998         ksi.ksi_pid = p->p_pid;
  999         ksi.ksi_uid = kauth_cred_geteuid(p->p_cred);
 1000         ksi.ksi_status = p->p_xstat;
 1001         ksi.ksi_utime = p->p_stats->p_ru.ru_utime.tv_sec;
 1002         ksi.ksi_stime = p->p_stats->p_ru.ru_stime.tv_sec;
 1003         kpsignal2(p->p_pptr, &ksi);
 1004 }
 1005 
 1006 /*
 1007  * Send the signal to the process.  If the signal has an action, the action
 1008  * is usually performed by the target process rather than the caller; we add
 1009  * the signal to the set of pending signals for the process.
 1010  *
 1011  * Exceptions:
 1012  *   o When a stop signal is sent to a sleeping process that takes the
 1013  *     default action, the process is stopped without awakening it.
 1014  *   o SIGCONT restarts stopped processes (or puts them back to sleep)
 1015  *     regardless of the signal action (eg, blocked or ignored).
 1016  *
 1017  * Other ignored signals are discarded immediately.
 1018  */
 1019 void
 1020 psignal(struct proc *p, int signum)
 1021 {
 1022         ksiginfo_t ksi;
 1023 
 1024         KSI_INIT_EMPTY(&ksi);
 1025         ksi.ksi_signo = signum;
 1026         kpsignal2(p, &ksi);
 1027 }
 1028 
 1029 void
 1030 kpsignal(struct proc *p, ksiginfo_t *ksi, void *data)
 1031 {
 1032 
 1033         if ((p->p_flag & P_WEXIT) == 0 && data) {
 1034                 size_t fd;
 1035                 struct filedesc *fdp = p->p_fd;
 1036 
 1037                 ksi->ksi_fd = -1;
 1038                 for (fd = 0; fd < fdp->fd_nfiles; fd++) {
 1039                         struct file *fp = fdp->fd_ofiles[fd];
 1040                         /* XXX: lock? */
 1041                         if (fp && fp->f_data == data) {
 1042                                 ksi->ksi_fd = fd;
 1043                                 break;
 1044                         }
 1045                 }
 1046         }
 1047         kpsignal2(p, ksi);
 1048 }
 1049 
 1050 static void
 1051 kpsignal2(struct proc *p, const ksiginfo_t *ksi)
 1052 {
 1053         struct lwp *l, *suspended = NULL;
 1054         struct sadata_vp *vp;
 1055         ksiginfo_t *newkp;
 1056         int     s = 0, prop, allsusp;
 1057         sig_t   action;
 1058         int     signum = ksi->ksi_signo;
 1059 
 1060 #ifdef DIAGNOSTIC
 1061         if (signum <= 0 || signum >= NSIG)
 1062                 panic("psignal signal number %d", signum);
 1063 
 1064         SCHED_ASSERT_UNLOCKED();
 1065 #endif
 1066 
 1067         /*
 1068          * If the process is being created by fork, is a zombie or is
 1069          * exiting, then just drop the signal here and bail out.
 1070          */
 1071         if (p->p_stat != SACTIVE && p->p_stat != SSTOP)
 1072                 return;
 1073 
 1074         /*
 1075          * Notify any interested parties in the signal.
 1076          */
 1077         KNOTE(&p->p_klist, NOTE_SIGNAL | signum);
 1078 
 1079         prop = sigprop[signum];
 1080 
 1081         /*
 1082          * If proc is traced, always give parent a chance.
 1083          */
 1084         if (p->p_flag & P_TRACED) {
 1085                 action = SIG_DFL;
 1086 
 1087                 /*
 1088                  * If the process is being traced and the signal is being
 1089                  * caught, make sure to save any ksiginfo.
 1090                  */
 1091                 if (sigismember(&p->p_sigctx.ps_sigcatch, signum)) {
 1092                         SCHED_ASSERT_UNLOCKED();
 1093                         ksiginfo_queue(p, ksi, NULL);
 1094                 }
 1095         } else {
 1096                 /*
 1097                  * If the signal was the result of a trap, reset it
 1098                  * to default action if it's currently masked, so that it would
 1099                  * coredump immediatelly instead of spinning repeatedly
 1100                  * taking the signal.
 1101                  */
 1102                 if (KSI_TRAP_P(ksi)
 1103                     && sigismember(&p->p_sigctx.ps_sigmask, signum)
 1104                     && !sigismember(&p->p_sigctx.ps_sigcatch, signum)) {
 1105                         sigdelset(&p->p_sigctx.ps_sigignore, signum);
 1106                         sigdelset(&p->p_sigctx.ps_sigcatch, signum);
 1107                         sigdelset(&p->p_sigctx.ps_sigmask, signum);
 1108                         SIGACTION(p, signum).sa_handler = SIG_DFL;
 1109                 }
 1110 
 1111                 /*
 1112                  * If the signal is being ignored,
 1113                  * then we forget about it immediately.
 1114                  * (Note: we don't set SIGCONT in p_sigctx.ps_sigignore,
 1115                  * and if it is set to SIG_IGN,
 1116                  * action will be SIG_DFL here.)
 1117                  */
 1118                 if (sigismember(&p->p_sigctx.ps_sigignore, signum))
 1119                         return;
 1120                 if (sigismember(&p->p_sigctx.ps_sigmask, signum))
 1121                         action = SIG_HOLD;
 1122                 else if (sigismember(&p->p_sigctx.ps_sigcatch, signum))
 1123                         action = SIG_CATCH;
 1124                 else {
 1125                         action = SIG_DFL;
 1126 
 1127                         if (prop & SA_KILL && p->p_nice > NZERO)
 1128                                 p->p_nice = NZERO;
 1129 
 1130                         /*
 1131                          * If sending a tty stop signal to a member of an
 1132                          * orphaned process group, discard the signal here if
 1133                          * the action is default; don't stop the process below
 1134                          * if sleeping, and don't clear any pending SIGCONT.
 1135                          */
 1136                         if (prop & SA_TTYSTOP && p->p_pgrp->pg_jobc == 0)
 1137                                 return;
 1138                 }
 1139         }
 1140 
 1141         if (prop & SA_CONT)
 1142                 sigminusset(&stopsigmask, &p->p_sigctx.ps_siglist);
 1143 
 1144         if (prop & SA_STOP)
 1145                 sigminusset(&contsigmask, &p->p_sigctx.ps_siglist);
 1146 
 1147         /*
 1148          * If the signal doesn't have SA_CANTMASK (no override for SIGKILL,
 1149          * please!), check if anything waits on it. If yes, save the
 1150          * info into provided ps_sigwaited, and wake-up the waiter.
 1151          * The signal won't be processed further here.
 1152          */
 1153         if ((prop & SA_CANTMASK) == 0
 1154             && p->p_sigctx.ps_sigwaited
 1155             && sigismember(p->p_sigctx.ps_sigwait, signum)
 1156             && p->p_stat != SSTOP) {
 1157                 p->p_sigctx.ps_sigwaited->ksi_info = ksi->ksi_info;
 1158                 p->p_sigctx.ps_sigwaited = NULL;
 1159                 wakeup_one(&p->p_sigctx.ps_sigwait);
 1160                 return;
 1161         }
 1162 
 1163         sigaddset(&p->p_sigctx.ps_siglist, signum);
 1164 
 1165         /* CHECKSIGS() is "inlined" here. */
 1166         p->p_sigctx.ps_sigcheck = 1;
 1167 
 1168         /*
 1169          * Defer further processing for signals which are held,
 1170          * except that stopped processes must be continued by SIGCONT.
 1171          */
 1172         if (action == SIG_HOLD &&
 1173             ((prop & SA_CONT) == 0 || p->p_stat != SSTOP)) {
 1174                 SCHED_ASSERT_UNLOCKED();
 1175                 ksiginfo_queue(p, ksi, NULL);
 1176                 return;
 1177         }
 1178 
 1179         /*
 1180          * Allocate a ksiginfo_t incase we need to insert it with the
 1181          * scheduler lock held, but only if this ksiginfo_t isn't empty.
 1182          */
 1183         if (!KSI_EMPTY_P(ksi)) {
 1184                 newkp = ksiginfo_alloc(PR_NOWAIT);
 1185                 if (newkp == NULL) {
 1186 #ifdef DIAGNOSTIC
 1187                         printf("kpsignal2: couldn't allocated ksiginfo\n");
 1188 #endif
 1189                         return;
 1190                 }
 1191         } else
 1192                 newkp = NULL;
 1193 
 1194         SCHED_LOCK(s);
 1195 
 1196         if (p->p_flag & P_SA) {
 1197                 allsusp = 0;
 1198                 l = NULL;
 1199                 if (p->p_stat == SACTIVE) {
 1200                         SLIST_FOREACH(vp, &p->p_sa->sa_vps, savp_next) {
 1201                                 l = vp->savp_lwp;
 1202                                 KDASSERT(l != NULL);
 1203                                 if (l->l_flag & L_SA_IDLE) {
 1204                                         /* wakeup idle LWP */
 1205                                         goto found;
 1206                                         /*NOTREACHED*/
 1207                                 } else if (l->l_flag & L_SA_YIELD) {
 1208                                         /* idle LWP is already waking up */
 1209                                         goto out;
 1210                                         /*NOTREACHED*/
 1211                                 }
 1212                         }
 1213                         SLIST_FOREACH(vp, &p->p_sa->sa_vps, savp_next) {
 1214                                 l = vp->savp_lwp;
 1215                                 if (l->l_stat == LSRUN ||
 1216                                     l->l_stat == LSONPROC) {
 1217                                         signotify(p);
 1218                                         goto out;
 1219                                         /*NOTREACHED*/
 1220                                 }
 1221                                 if (l->l_stat == LSSLEEP &&
 1222                                     l->l_flag & L_SINTR) {
 1223                                         /* ok to signal vp lwp */
 1224                                         break;
 1225                                 } else
 1226                                         l = NULL;
 1227                         }
 1228                 } else if (p->p_stat == SSTOP) {
 1229                         SLIST_FOREACH(vp, &p->p_sa->sa_vps, savp_next) {
 1230                                 l = vp->savp_lwp;
 1231                                 if (l->l_stat == LSSLEEP && (l->l_flag & L_SINTR) != 0)
 1232                                         break;
 1233                                 l = NULL;
 1234                         }
 1235                 }
 1236         } else if (p->p_nrlwps > 0 && (p->p_stat != SSTOP)) {
 1237                 /*
 1238                  * At least one LWP is running or on a run queue.
 1239                  * The signal will be noticed when one of them returns
 1240                  * to userspace.
 1241                  */
 1242                 signotify(p);
 1243                 /*
 1244                  * The signal will be noticed very soon.
 1245                  */
 1246                 goto out;
 1247                 /*NOTREACHED*/
 1248         } else {
 1249                 /*
 1250                  * Find out if any of the sleeps are interruptable,
 1251                  * and if all the live LWPs remaining are suspended.
 1252                  */
 1253                 allsusp = 1;
 1254                 LIST_FOREACH(l, &p->p_lwps, l_sibling) {
 1255                         if (l->l_stat == LSSLEEP &&
 1256                             l->l_flag & L_SINTR)
 1257                                 break;
 1258                         if (l->l_stat == LSSUSPENDED)
 1259                                 suspended = l;
 1260                         else if ((l->l_stat != LSZOMB) &&
 1261                             (l->l_stat != LSDEAD))
 1262                                 allsusp = 0;
 1263                 }
 1264         }
 1265 
 1266  found:
 1267         switch (p->p_stat) {
 1268         case SACTIVE:
 1269 
 1270                 if (l != NULL && (p->p_flag & P_TRACED))
 1271                         goto run;
 1272 
 1273                 /*
 1274                  * If SIGCONT is default (or ignored) and process is
 1275                  * asleep, we are finished; the process should not
 1276                  * be awakened.
 1277                  */
 1278                 if ((prop & SA_CONT) && action == SIG_DFL) {
 1279                         sigdelset(&p->p_sigctx.ps_siglist, signum);
 1280                         goto done;
 1281                 }
 1282 
 1283                 /*
 1284                  * When a sleeping process receives a stop
 1285                  * signal, process immediately if possible.
 1286                  */
 1287                 if ((prop & SA_STOP) && action == SIG_DFL) {
 1288                         /*
 1289                          * If a child holding parent blocked,
 1290                          * stopping could cause deadlock.
 1291                          */
 1292                         if (p->p_flag & P_PPWAIT) {
 1293                                 goto out;
 1294                         }
 1295                         sigdelset(&p->p_sigctx.ps_siglist, signum);
 1296                         p->p_xstat = signum;
 1297                         proc_stop(p, 1);        /* XXXSMP: recurse? */
 1298                         SCHED_UNLOCK(s);
 1299                         if ((p->p_pptr->p_flag & P_NOCLDSTOP) == 0) {
 1300                                 child_psignal(p);
 1301                         }
 1302                         goto done_unlocked;
 1303                 }
 1304 
 1305                 if (l == NULL) {
 1306                         /*
 1307                          * Special case: SIGKILL of a process
 1308                          * which is entirely composed of
 1309                          * suspended LWPs should succeed. We
 1310                          * make this happen by unsuspending one of
 1311                          * them.
 1312                          */
 1313                         if (allsusp && (signum == SIGKILL)) {
 1314                                 lwp_continue(suspended);
 1315                         }
 1316                         goto done;
 1317                 }
 1318                 /*
 1319                  * All other (caught or default) signals
 1320                  * cause the process to run.
 1321                  */
 1322                 goto runfast;
 1323                 /*NOTREACHED*/
 1324         case SSTOP:
 1325                 /* Process is stopped */
 1326                 /*
 1327                  * If traced process is already stopped,
 1328                  * then no further action is necessary.
 1329                  */
 1330                 if (p->p_flag & P_TRACED)
 1331                         goto done;
 1332 
 1333                 /*
 1334                  * Kill signal always sets processes running,
 1335                  * if possible.
 1336                  */
 1337                 if (signum == SIGKILL) {
 1338                         l = proc_unstop(p);
 1339                         if (l)
 1340                                 goto runfast;
 1341                         goto done;
 1342                 }
 1343 
 1344                 if (prop & SA_CONT) {
 1345                         /*
 1346                          * If SIGCONT is default (or ignored),
 1347                          * we continue the process but don't
 1348                          * leave the signal in ps_siglist, as
 1349                          * it has no further action.  If
 1350                          * SIGCONT is held, we continue the
 1351                          * process and leave the signal in
 1352                          * ps_siglist.  If the process catches
 1353                          * SIGCONT, let it handle the signal
 1354                          * itself.  If it isn't waiting on an
 1355                          * event, then it goes back to run
 1356                          * state.  Otherwise, process goes
 1357                          * back to sleep state.
 1358                          */
 1359                         if (action == SIG_DFL)
 1360                                 sigdelset(&p->p_sigctx.ps_siglist,
 1361                                     signum);
 1362                         l = proc_unstop(p);
 1363                         if (l && (action == SIG_CATCH))
 1364                                 goto runfast;
 1365                         goto out;
 1366                 }
 1367 
 1368                 if (prop & SA_STOP) {
 1369                         /*
 1370                          * Already stopped, don't need to stop again.
 1371                          * (If we did the shell could get confused.)
 1372                          */
 1373                         sigdelset(&p->p_sigctx.ps_siglist, signum);
 1374                         goto done;
 1375                 }
 1376 
 1377                 /*
 1378                  * If a lwp is sleeping interruptibly, then
 1379                  * wake it up; it will run until the kernel
 1380                  * boundary, where it will stop in issignal(),
 1381                  * since p->p_stat is still SSTOP. When the
 1382                  * process is continued, it will be made
 1383                  * runnable and can look at the signal.
 1384                  */
 1385                 if (l)
 1386                         goto run;
 1387                 goto out;
 1388         case SIDL:
 1389                 /* Process is being created by fork */
 1390                 /* XXX: We are not ready to receive signals yet */
 1391                 goto done;
 1392         default:
 1393                 /* Else what? */
 1394                 panic("psignal: Invalid process state %d.", p->p_stat);
 1395         }
 1396         /*NOTREACHED*/
 1397 
 1398  runfast:
 1399         if (action == SIG_CATCH) {
 1400                 ksiginfo_queue(p, ksi, &newkp);
 1401                 action = SIG_HOLD;
 1402         }
 1403         /*
 1404          * Raise priority to at least PUSER.
 1405          */
 1406         if (l->l_priority > PUSER)
 1407                 l->l_priority = PUSER;
 1408  run:
 1409         if (action == SIG_CATCH) {
 1410                 ksiginfo_queue(p, ksi, &newkp);
 1411                 action = SIG_HOLD;
 1412         }
 1413 
 1414         setrunnable(l);         /* XXXSMP: recurse? */
 1415  out:
 1416         if (action == SIG_CATCH)
 1417                 ksiginfo_queue(p, ksi, &newkp);
 1418  done:
 1419         SCHED_UNLOCK(s);
 1420 
 1421  done_unlocked:
 1422         if (newkp)
 1423                 ksiginfo_free(newkp);
 1424 }
 1425 
 1426 siginfo_t *
 1427 siginfo_alloc(int flags)
 1428 {
 1429 
 1430         return pool_get(&siginfo_pool, flags);
 1431 }
 1432 
 1433 void
 1434 siginfo_free(void *arg)
 1435 {
 1436 
 1437         pool_put(&siginfo_pool, arg);
 1438 }
 1439 
 1440 void
 1441 kpsendsig(struct lwp *l, const ksiginfo_t *ksi, const sigset_t *mask)
 1442 {
 1443         struct proc *p = l->l_proc;
 1444         struct lwp *le, *li;
 1445         siginfo_t *si;
 1446         int f;
 1447 
 1448         if (p->p_flag & P_SA) {
 1449 
 1450                 /* XXXUPSXXX What if not on sa_vp ? */
 1451 
 1452                 f = l->l_flag & L_SA;
 1453                 l->l_flag &= ~L_SA;
 1454                 si = siginfo_alloc(PR_WAITOK);
 1455                 si->_info = ksi->ksi_info;
 1456                 le = li = NULL;
 1457                 if (KSI_TRAP_P(ksi))
 1458                         le = l;
 1459                 else
 1460                         li = l;
 1461                 if (sa_upcall(l, SA_UPCALL_SIGNAL | SA_UPCALL_DEFER, le, li,
 1462                     sizeof(*si), si, siginfo_free) != 0) {
 1463                         siginfo_free(si);
 1464 #if 0
 1465                         if (KSI_TRAP_P(ksi))
 1466                                 /* XXX What do we do here?? */;
 1467 #endif
 1468                 }
 1469                 l->l_flag |= f;
 1470                 return;
 1471         }
 1472 
 1473         (*p->p_emul->e_sendsig)(ksi, mask);
 1474 }
 1475 
 1476 static inline int firstsig(const sigset_t *);
 1477 
 1478 static inline int
 1479 firstsig(const sigset_t *ss)
 1480 {
 1481         int sig;
 1482 
 1483         sig = ffs(ss->__bits[0]);
 1484         if (sig != 0)
 1485                 return (sig);
 1486 #if NSIG > 33
 1487         sig = ffs(ss->__bits[1]);
 1488         if (sig != 0)
 1489                 return (sig + 32);
 1490 #endif
 1491 #if NSIG > 65
 1492         sig = ffs(ss->__bits[2]);
 1493         if (sig != 0)
 1494                 return (sig + 64);
 1495 #endif
 1496 #if NSIG > 97
 1497         sig = ffs(ss->__bits[3]);
 1498         if (sig != 0)
 1499                 return (sig + 96);
 1500 #endif
 1501         return (0);
 1502 }
 1503 
 1504 /*
 1505  * If the current process has received a signal (should be caught or cause
 1506  * termination, should interrupt current syscall), return the signal number.
 1507  * Stop signals with default action are processed immediately, then cleared;
 1508  * they aren't returned.  This is checked after each entry to the system for
 1509  * a syscall or trap (though this can usually be done without calling issignal
 1510  * by checking the pending signal masks in the CURSIG macro.) The normal call
 1511  * sequence is
 1512  *
 1513  *      while (signum = CURSIG(curlwp))
 1514  *              postsig(signum);
 1515  */
 1516 int
 1517 issignal(struct lwp *l)
 1518 {
 1519         struct proc     *p = l->l_proc;
 1520         int             s, signum, prop;
 1521         sigset_t        ss;
 1522 
 1523         /* Bail out if we do not own the virtual processor */
 1524         if (l->l_flag & L_SA && l->l_savp->savp_lwp != l)
 1525                 return 0;
 1526 
 1527         KERNEL_PROC_LOCK(l);
 1528 
 1529         if (p->p_stat == SSTOP) {
 1530                 /*
 1531                  * The process is stopped/stopping. Stop ourselves now that
 1532                  * we're on the kernel/userspace boundary.
 1533                  */
 1534                 SCHED_LOCK(s);
 1535                 l->l_stat = LSSTOP;
 1536                 p->p_nrlwps--;
 1537                 if (p->p_flag & P_TRACED)
 1538                         goto sigtraceswitch;
 1539                 else
 1540                         goto sigswitch;
 1541         }
 1542         for (;;) {
 1543                 sigpending1(p, &ss);
 1544                 if (p->p_flag & P_PPWAIT)
 1545                         sigminusset(&stopsigmask, &ss);
 1546                 signum = firstsig(&ss);
 1547                 if (signum == 0) {                      /* no signal to send */
 1548                         p->p_sigctx.ps_sigcheck = 0;
 1549                         KERNEL_PROC_UNLOCK(l);
 1550                         return (0);
 1551                 }
 1552                                                         /* take the signal! */
 1553                 sigdelset(&p->p_sigctx.ps_siglist, signum);
 1554 
 1555                 /*
 1556                  * We should see pending but ignored signals
 1557                  * only if P_TRACED was on when they were posted.
 1558                  */
 1559                 if (sigismember(&p->p_sigctx.ps_sigignore, signum) &&
 1560                     (p->p_flag & P_TRACED) == 0)
 1561                         continue;
 1562 
 1563                 if (p->p_flag & P_TRACED && (p->p_flag & P_PPWAIT) == 0) {
 1564                         /*
 1565                          * If traced, always stop, and stay
 1566                          * stopped until released by the debugger.
 1567                          */
 1568                         p->p_xstat = signum;
 1569 
 1570                         /* Emulation-specific handling of signal trace */
 1571                         if ((p->p_emul->e_tracesig != NULL) &&
 1572                             ((*p->p_emul->e_tracesig)(p, signum) != 0))
 1573                                 goto childresumed;
 1574 
 1575                         if ((p->p_flag & P_FSTRACE) == 0)
 1576                                 child_psignal(p);
 1577                         SCHED_LOCK(s);
 1578                         proc_stop(p, 1);
 1579                 sigtraceswitch:
 1580                         mi_switch(l, NULL);
 1581                         SCHED_ASSERT_UNLOCKED();
 1582                         splx(s);
 1583 
 1584                 childresumed:
 1585                         /*
 1586                          * If we are no longer being traced, or the parent
 1587                          * didn't give us a signal, look for more signals.
 1588                          */
 1589                         if ((p->p_flag & P_TRACED) == 0 || p->p_xstat == 0)
 1590                                 continue;
 1591 
 1592                         /*
 1593                          * If the new signal is being masked, look for other
 1594                          * signals.
 1595                          */
 1596                         signum = p->p_xstat;
 1597                         p->p_xstat = 0;
 1598                         /*
 1599                          * `p->p_sigctx.ps_siglist |= mask' is done
 1600                          * in setrunnable().
 1601                          */
 1602                         if (sigismember(&p->p_sigctx.ps_sigmask, signum))
 1603                                 continue;
 1604                                                         /* take the signal! */
 1605                         sigdelset(&p->p_sigctx.ps_siglist, signum);
 1606                 }
 1607 
 1608                 prop = sigprop[signum];
 1609 
 1610                 /*
 1611                  * Decide whether the signal should be returned.
 1612                  * Return the signal's number, or fall through
 1613                  * to clear it from the pending mask.
 1614                  */
 1615                 switch ((long)SIGACTION(p, signum).sa_handler) {
 1616 
 1617                 case (long)SIG_DFL:
 1618                         /*
 1619                          * Don't take default actions on system processes.
 1620                          */
 1621                         if (p->p_pid <= 1) {
 1622 #ifdef DIAGNOSTIC
 1623                                 /*
 1624                                  * Are you sure you want to ignore SIGSEGV
 1625                                  * in init? XXX
 1626                                  */
 1627                                 printf("Process (pid %d) got signal %d\n",
 1628                                     p->p_pid, signum);
 1629 #endif
 1630                                 break;          /* == ignore */
 1631                         }
 1632                         /*
 1633                          * If there is a pending stop signal to process
 1634                          * with default action, stop here,
 1635                          * then clear the signal.  However,
 1636                          * if process is member of an orphaned
 1637                          * process group, ignore tty stop signals.
 1638                          */
 1639                         if (prop & SA_STOP) {
 1640                                 if (p->p_flag & P_TRACED ||
 1641                                     (p->p_pgrp->pg_jobc == 0 &&
 1642                                     prop & SA_TTYSTOP))
 1643                                         break;  /* == ignore */
 1644                                 p->p_xstat = signum;
 1645                                 if ((p->p_pptr->p_flag & P_NOCLDSTOP) == 0)
 1646                                         child_psignal(p);
 1647                                 SCHED_LOCK(s);
 1648                                 proc_stop(p, 1);
 1649                         sigswitch:
 1650                                 mi_switch(l, NULL);
 1651                                 SCHED_ASSERT_UNLOCKED();
 1652                                 splx(s);
 1653                                 break;
 1654                         } else if (prop & SA_IGNORE) {
 1655                                 /*
 1656                                  * Except for SIGCONT, shouldn't get here.
 1657                                  * Default action is to ignore; drop it.
 1658                                  */
 1659                                 break;          /* == ignore */
 1660                         } else
 1661                                 goto keep;
 1662                         /*NOTREACHED*/
 1663 
 1664                 case (long)SIG_IGN:
 1665                         /*
 1666                          * Masking above should prevent us ever trying
 1667                          * to take action on an ignored signal other
 1668                          * than SIGCONT, unless process is traced.
 1669                          */
 1670 #ifdef DEBUG_ISSIGNAL
 1671                         if ((prop & SA_CONT) == 0 &&
 1672                             (p->p_flag & P_TRACED) == 0)
 1673                                 printf("issignal\n");
 1674 #endif
 1675                         break;          /* == ignore */
 1676 
 1677                 default:
 1678                         /*
 1679                          * This signal has an action, let
 1680                          * postsig() process it.
 1681                          */
 1682                         goto keep;
 1683                 }
 1684         }
 1685         /* NOTREACHED */
 1686 
 1687  keep:
 1688                                                 /* leave the signal for later */
 1689         sigaddset(&p->p_sigctx.ps_siglist, signum);
 1690         CHECKSIGS(p);
 1691         KERNEL_PROC_UNLOCK(l);
 1692         return (signum);
 1693 }
 1694 
 1695 /*
 1696  * Put the argument process into the stopped state and notify the parent
 1697  * via wakeup.  Signals are handled elsewhere.  The process must not be
 1698  * on the run queue.
 1699  */
 1700 void
 1701 proc_stop(struct proc *p, int dowakeup)
 1702 {
 1703         struct lwp *l;
 1704         struct proc *parent;
 1705         struct sadata_vp *vp;
 1706 
 1707         SCHED_ASSERT_LOCKED();
 1708 
 1709         /* XXX lock process LWP state */
 1710         p->p_flag &= ~P_WAITED;
 1711         p->p_stat = SSTOP;
 1712         parent = p->p_pptr;
 1713         parent->p_nstopchild++;
 1714 
 1715         if (p->p_flag & P_SA) {
 1716                 /*
 1717                  * Only (try to) put the LWP on the VP in stopped
 1718                  * state.
 1719                  * All other LWPs will suspend in sa_setwoken()
 1720                  * because the VP-LWP in stopped state cannot be
 1721                  * repossessed.
 1722                  */
 1723                 SLIST_FOREACH(vp, &p->p_sa->sa_vps, savp_next) {
 1724                         l = vp->savp_lwp;
 1725                         if (l->l_stat == LSONPROC && l->l_cpu == curcpu()) {
 1726                                 l->l_stat = LSSTOP;
 1727                                 p->p_nrlwps--;
 1728                         } else if (l->l_stat == LSRUN) {
 1729                                 /* Remove LWP from the run queue */
 1730                                 remrunqueue(l);
 1731                                 l->l_stat = LSSTOP;
 1732                                 p->p_nrlwps--;
 1733                         } else if (l->l_stat == LSSLEEP &&
 1734                             l->l_flag & L_SA_IDLE) {
 1735                                 l->l_flag &= ~L_SA_IDLE;
 1736                                 l->l_stat = LSSTOP;
 1737                         }
 1738                 }
 1739                 goto out;
 1740         }
 1741 
 1742         /*
 1743          * Put as many LWP's as possible in stopped state.
 1744          * Sleeping ones will notice the stopped state as they try to
 1745          * return to userspace.
 1746          */
 1747 
 1748         LIST_FOREACH(l, &p->p_lwps, l_sibling) {
 1749                 if (l->l_stat == LSONPROC) {
 1750                         /* XXX SMP this assumes that a LWP that is LSONPROC
 1751                          * is curlwp and hence is about to be mi_switched
 1752                          * away; the only callers of proc_stop() are:
 1753                          * - psignal
 1754                          * - issignal()
 1755                          * For the former, proc_stop() is only called when
 1756                          * no processes are running, so we don't worry.
 1757                          * For the latter, proc_stop() is called right
 1758                          * before mi_switch().
 1759                          */
 1760                         l->l_stat = LSSTOP;
 1761                         p->p_nrlwps--;
 1762                 } else if (l->l_stat == LSRUN) {
 1763                         /* Remove LWP from the run queue */
 1764                         remrunqueue(l);
 1765                         l->l_stat = LSSTOP;
 1766                         p->p_nrlwps--;
 1767                 } else if ((l->l_stat == LSSLEEP) ||
 1768                     (l->l_stat == LSSUSPENDED) ||
 1769                     (l->l_stat == LSZOMB) ||
 1770                     (l->l_stat == LSDEAD)) {
 1771                         /*
 1772                          * Don't do anything; let sleeping LWPs
 1773                          * discover the stopped state of the process
 1774                          * on their way out of the kernel; otherwise,
 1775                          * things like NFS threads that sleep with
 1776                          * locks will block the rest of the system
 1777                          * from getting any work done.
 1778                          *
 1779                          * Suspended/dead/zombie LWPs aren't going
 1780                          * anywhere, so we don't need to touch them.
 1781                          */
 1782                 }
 1783 #ifdef DIAGNOSTIC
 1784                 else {
 1785                         panic("proc_stop: process %d lwp %d "
 1786                               "in unstoppable state %d.\n",
 1787                             p->p_pid, l->l_lid, l->l_stat);
 1788                 }
 1789 #endif
 1790         }
 1791 
 1792  out:
 1793         /* XXX unlock process LWP state */
 1794 
 1795         if (dowakeup)
 1796                 sched_wakeup((caddr_t)p->p_pptr);
 1797 }
 1798 
 1799 /*
 1800  * Given a process in state SSTOP, set the state back to SACTIVE and
 1801  * move LSSTOP'd LWPs to LSSLEEP or make them runnable.
 1802  *
 1803  * If no LWPs ended up runnable (and therefore able to take a signal),
 1804  * return a LWP that is sleeping interruptably. The caller can wake
 1805  * that LWP up to take a signal.
 1806  */
 1807 struct lwp *
 1808 proc_unstop(struct proc *p)
 1809 {
 1810         struct lwp *l, *lr = NULL;
 1811         struct sadata_vp *vp;
 1812         int cantake = 0;
 1813 
 1814         SCHED_ASSERT_LOCKED();
 1815 
 1816         /*
 1817          * Our caller wants to be informed if there are only sleeping
 1818          * and interruptable LWPs left after we have run so that it
 1819          * can invoke setrunnable() if required - return one of the
 1820          * interruptable LWPs if this is the case.
 1821          */
 1822 
 1823         if (!(p->p_flag & P_WAITED))
 1824                 p->p_pptr->p_nstopchild--;
 1825         p->p_stat = SACTIVE;
 1826         LIST_FOREACH(l, &p->p_lwps, l_sibling) {
 1827                 if (l->l_stat == LSRUN) {
 1828                         lr = NULL;
 1829                         cantake = 1;
 1830                 }
 1831                 if (l->l_stat != LSSTOP)
 1832                         continue;
 1833 
 1834                 if (l->l_wchan != NULL) {
 1835                         l->l_stat = LSSLEEP;
 1836                         if ((cantake == 0) && (l->l_flag & L_SINTR)) {
 1837                                 lr = l;
 1838                                 cantake = 1;
 1839                         }
 1840                 } else {
 1841                         setrunnable(l);
 1842                         lr = NULL;
 1843                         cantake = 1;
 1844                 }
 1845         }
 1846         if (p->p_flag & P_SA) {
 1847                 /* Only consider returning the LWP on the VP. */
 1848                 SLIST_FOREACH(vp, &p->p_sa->sa_vps, savp_next) {
 1849                         lr = vp->savp_lwp;
 1850                         if (lr->l_stat == LSSLEEP) {
 1851                                 if (lr->l_flag & L_SA_YIELD) {
 1852                                         setrunnable(lr);
 1853                                         break;
 1854                                 } else if (lr->l_flag & L_SINTR)
 1855                                         return lr;
 1856                         }
 1857                 }
 1858                 return NULL;
 1859         }
 1860         return lr;
 1861 }
 1862 
 1863 /*
 1864  * Take the action for the specified signal
 1865  * from the current set of pending signals.
 1866  */
 1867 void
 1868 postsig(int signum)
 1869 {
 1870         struct lwp *l;
 1871         struct proc     *p;
 1872         struct sigacts  *ps;
 1873         sig_t           action;
 1874         sigset_t        *returnmask;
 1875 
 1876         l = curlwp;
 1877         p = l->l_proc;
 1878         ps = p->p_sigacts;
 1879 #ifdef DIAGNOSTIC
 1880         if (signum == 0)
 1881                 panic("postsig");
 1882 #endif
 1883 
 1884         KERNEL_PROC_LOCK(l);
 1885 
 1886 #ifdef MULTIPROCESSOR
 1887         /*
 1888          * On MP, issignal() can return the same signal to multiple
 1889          * LWPs.  The LWPs will block above waiting for the kernel
 1890          * lock and the first LWP which gets through will then remove
 1891          * the signal from ps_siglist.  All other LWPs exit here.
 1892          */
 1893         if (!sigismember(&p->p_sigctx.ps_siglist, signum)) {
 1894                 KERNEL_PROC_UNLOCK(l);
 1895                 return;
 1896         }
 1897 #endif
 1898         sigdelset(&p->p_sigctx.ps_siglist, signum);
 1899         action = SIGACTION_PS(ps, signum).sa_handler;
 1900         if (action == SIG_DFL) {
 1901 #ifdef KTRACE
 1902                 if (KTRPOINT(p, KTR_PSIG))
 1903                         ktrpsig(l, signum, action,
 1904                             p->p_sigctx.ps_flags & SAS_OLDMASK ?
 1905                             &p->p_sigctx.ps_oldmask : &p->p_sigctx.ps_sigmask,
 1906                             NULL);
 1907 #endif
 1908                 /*
 1909                  * Default action, where the default is to kill
 1910                  * the process.  (Other cases were ignored above.)
 1911                  */
 1912                 sigexit(l, signum);
 1913                 /* NOTREACHED */
 1914         } else {
 1915                 ksiginfo_t *ksi;
 1916                 /*
 1917                  * If we get here, the signal must be caught.
 1918                  */
 1919 #ifdef DIAGNOSTIC
 1920                 if (action == SIG_IGN ||
 1921                     sigismember(&p->p_sigctx.ps_sigmask, signum))
 1922                         panic("postsig action");
 1923 #endif
 1924                 /*
 1925                  * Set the new mask value and also defer further
 1926                  * occurrences of this signal.
 1927                  *
 1928                  * Special case: user has done a sigpause.  Here the
 1929                  * current mask is not of interest, but rather the
 1930                  * mask from before the sigpause is what we want
 1931                  * restored after the signal processing is completed.
 1932                  */
 1933                 if (p->p_sigctx.ps_flags & SAS_OLDMASK) {
 1934                         returnmask = &p->p_sigctx.ps_oldmask;
 1935                         p->p_sigctx.ps_flags &= ~SAS_OLDMASK;
 1936                 } else
 1937                         returnmask = &p->p_sigctx.ps_sigmask;
 1938                 p->p_stats->p_ru.ru_nsignals++;
 1939                 ksi = ksiginfo_dequeue(p, signum);
 1940 #ifdef KTRACE
 1941                 if (KTRPOINT(p, KTR_PSIG))
 1942                         ktrpsig(l, signum, action,
 1943                             p->p_sigctx.ps_flags & SAS_OLDMASK ?
 1944                             &p->p_sigctx.ps_oldmask : &p->p_sigctx.ps_sigmask,
 1945                             ksi);
 1946 #endif
 1947                 if (ksi == NULL) {
 1948                         ksiginfo_t ksi1;
 1949                         /*
 1950                          * we did not save any siginfo for this, either
 1951                          * because the signal was not caught, or because the
 1952                          * user did not request SA_SIGINFO
 1953                          */
 1954                         KSI_INIT_EMPTY(&ksi1);
 1955                         ksi1.ksi_signo = signum;
 1956                         kpsendsig(l, &ksi1, returnmask);
 1957                 } else {
 1958                         kpsendsig(l, ksi, returnmask);
 1959                         ksiginfo_free(ksi);
 1960                 }
 1961                 p->p_sigctx.ps_lwp = 0;
 1962                 p->p_sigctx.ps_code = 0;
 1963                 p->p_sigctx.ps_signo = 0;
 1964                 (void) splsched();      /* XXXSMP */
 1965                 sigplusset(&SIGACTION_PS(ps, signum).sa_mask,
 1966                     &p->p_sigctx.ps_sigmask);
 1967                 if (SIGACTION_PS(ps, signum).sa_flags & SA_RESETHAND) {
 1968                         sigdelset(&p->p_sigctx.ps_sigcatch, signum);
 1969                         if (signum != SIGCONT && sigprop[signum] & SA_IGNORE)
 1970                                 sigaddset(&p->p_sigctx.ps_sigignore, signum);
 1971                         SIGACTION_PS(ps, signum).sa_handler = SIG_DFL;
 1972                 }
 1973                 (void) spl0();          /* XXXSMP */
 1974         }
 1975 
 1976         KERNEL_PROC_UNLOCK(l);
 1977 }
 1978 
 1979 /*
 1980  * Kill the current process for stated reason.
 1981  */
 1982 void
 1983 killproc(struct proc *p, const char *why)
 1984 {
 1985         log(LOG_ERR, "pid %d was killed: %s\n", p->p_pid, why);
 1986         uprintf("sorry, pid %d was killed: %s\n", p->p_pid, why);
 1987         psignal(p, SIGKILL);
 1988 }
 1989 
 1990 /*
 1991  * Force the current process to exit with the specified signal, dumping core
 1992  * if appropriate.  We bypass the normal tests for masked and caught signals,
 1993  * allowing unrecoverable failures to terminate the process without changing
 1994  * signal state.  Mark the accounting record with the signal termination.
 1995  * If dumping core, save the signal number for the debugger.  Calls exit and
 1996  * does not return.
 1997  */
 1998 
 1999 #if defined(DEBUG)
 2000 int     kern_logsigexit = 1;    /* not static to make public for sysctl */
 2001 #else
 2002 int     kern_logsigexit = 0;    /* not static to make public for sysctl */
 2003 #endif
 2004 
 2005 static  const char logcoredump[] =
 2006         "pid %d (%s), uid %d: exited on signal %d (core dumped)\n";
 2007 static  const char lognocoredump[] =
 2008         "pid %d (%s), uid %d: exited on signal %d (core not dumped, err = %d)\n";
 2009 
 2010 /* Wrapper function for use in p_userret */
 2011 static void
 2012 lwp_coredump_hook(struct lwp *l, void *arg)
 2013 {
 2014         int s;
 2015 
 2016         /*
 2017          * Suspend ourselves, so that the kernel stack and therefore
 2018          * the userland registers saved in the trapframe are around
 2019          * for coredump() to write them out.
 2020          */
 2021         KERNEL_PROC_LOCK(l);
 2022         l->l_flag &= ~L_DETACHED;
 2023         SCHED_LOCK(s);
 2024         l->l_stat = LSSUSPENDED;
 2025         l->l_proc->p_nrlwps--;
 2026         /* XXX NJWLWP check if this makes sense here: */
 2027         l->l_proc->p_stats->p_ru.ru_nvcsw++;
 2028         mi_switch(l, NULL);
 2029         SCHED_ASSERT_UNLOCKED();
 2030         splx(s);
 2031 
 2032         lwp_exit(l);
 2033 }
 2034 
 2035 void
 2036 sigexit(struct lwp *l, int signum)
 2037 {
 2038         struct proc     *p;
 2039 #if 0
 2040         struct lwp      *l2;
 2041 #endif
 2042         int             exitsig;
 2043 #ifdef COREDUMP
 2044         int             error;
 2045 #endif
 2046 
 2047         p = l->l_proc;
 2048 
 2049         /*
 2050          * Don't permit coredump() or exit1() multiple times
 2051          * in the same process.
 2052          */
 2053         if (p->p_flag & P_WEXIT) {
 2054                 KERNEL_PROC_UNLOCK(l);
 2055                 (*p->p_userret)(l, p->p_userret_arg);
 2056         }
 2057         p->p_flag |= P_WEXIT;
 2058         /* We don't want to switch away from exiting. */
 2059         /* XXX multiprocessor: stop LWPs on other processors. */
 2060 #if 0
 2061         if (p->p_flag & P_SA) {
 2062                 LIST_FOREACH(l2, &p->p_lwps, l_sibling)
 2063                     l2->l_flag &= ~L_SA;
 2064                 p->p_flag &= ~P_SA;
 2065         }
 2066 #endif
 2067 
 2068         /* Make other LWPs stick around long enough to be dumped */
 2069         p->p_userret = lwp_coredump_hook;
 2070         p->p_userret_arg = NULL;
 2071 
 2072         exitsig = signum;
 2073         p->p_acflag |= AXSIG;
 2074         if (sigprop[signum] & SA_CORE) {
 2075                 p->p_sigctx.ps_signo = signum;
 2076 #ifdef COREDUMP
 2077                 if ((error = coredump(l, NULL)) == 0)
 2078                         exitsig |= WCOREFLAG;
 2079 #endif
 2080 
 2081                 if (kern_logsigexit) {
 2082                         /* XXX What if we ever have really large UIDs? */
 2083                         int uid = l->l_cred ?
 2084                             (int)kauth_cred_geteuid(l->l_cred) : -1;
 2085 
 2086 #ifdef COREDUMP
 2087                         if (error)
 2088                                 log(LOG_INFO, lognocoredump, p->p_pid,
 2089                                     p->p_comm, uid, signum, error);
 2090                         else
 2091 #endif
 2092                                 log(LOG_INFO, logcoredump, p->p_pid,
 2093                                     p->p_comm, uid, signum);
 2094                 }
 2095 
 2096 #ifdef PAX_SEGVGUARD
 2097                 pax_segvguard(l, p->p_textvp, p->p_comm, TRUE);
 2098 #endif /* PAX_SEGVGUARD */
 2099         }
 2100 
 2101         exit1(l, W_EXITCODE(0, exitsig));
 2102         /* NOTREACHED */
 2103 }
 2104 
 2105 #ifdef COREDUMP
 2106 struct coredump_iostate {
 2107         struct lwp *io_lwp;
 2108         struct vnode *io_vp;
 2109         kauth_cred_t io_cred;
 2110         off_t io_offset;
 2111 };
 2112 
 2113 int
 2114 coredump_write(void *cookie, enum uio_seg segflg, const void *data, size_t len)
 2115 {
 2116         struct coredump_iostate *io = cookie;
 2117         int error;
 2118 
 2119         error = vn_rdwr(UIO_WRITE, io->io_vp, __UNCONST(data), len,
 2120             io->io_offset, segflg,
 2121             IO_NODELOCKED|IO_UNIT, io->io_cred, NULL,
 2122             segflg == UIO_USERSPACE ? io->io_lwp : NULL);
 2123         if (error) {
 2124                 printf("pid %d (%s): %s write of %zu@%p at %lld failed: %d\n",
 2125                     io->io_lwp->l_proc->p_pid, io->io_lwp->l_proc->p_comm,
 2126                     segflg == UIO_USERSPACE ? "user" : "system",
 2127                     len, data, (long long) io->io_offset, error);
 2128                 return (error);
 2129         }
 2130 
 2131         io->io_offset += len;
 2132         return (0);
 2133 }
 2134 
 2135 /*
 2136  * Dump core, into a file named "progname.core" or "core" (depending on the
 2137  * value of shortcorename), unless the process was setuid/setgid.
 2138  */
 2139 int
 2140 coredump(struct lwp *l, const char *pattern)
 2141 {
 2142         struct vnode            *vp;
 2143         struct proc             *p;
 2144         struct vmspace          *vm;
 2145         kauth_cred_t            cred;
 2146         struct nameidata        nd;
 2147         struct vattr            vattr;
 2148         struct mount            *mp;
 2149         struct coredump_iostate io;
 2150         int                     error, error1;
 2151         char                    *name = NULL;
 2152 
 2153         p = l->l_proc;
 2154         vm = p->p_vmspace;
 2155         cred = l->l_cred;
 2156 
 2157         /*
 2158          * Make sure the process has not set-id, to prevent data leaks,
 2159          * unless it was specifically requested to allow set-id coredumps.
 2160          */
 2161         if ((p->p_flag & P_SUGID) && !security_setidcore_dump)
 2162                 return EPERM;
 2163 
 2164         /*
 2165          * Refuse to core if the data + stack + user size is larger than
 2166          * the core dump limit.  XXX THIS IS WRONG, because of mapped
 2167          * data.
 2168          */
 2169         if (USPACE + ctob(vm->vm_dsize + vm->vm_ssize) >=
 2170             p->p_rlimit[RLIMIT_CORE].rlim_cur)
 2171                 return EFBIG;           /* better error code? */
 2172 
 2173 restart:
 2174         /*
 2175          * The core dump will go in the current working directory.  Make
 2176          * sure that the directory is still there and that the mount flags
 2177          * allow us to write core dumps there.
 2178          */
 2179         vp = p->p_cwdi->cwdi_cdir;
 2180         if (vp->v_mount == NULL ||
 2181             (vp->v_mount->mnt_flag & MNT_NOCOREDUMP) != 0) {
 2182                 error = EPERM;
 2183                 goto done;
 2184         }
 2185 
 2186         if ((p->p_flag & P_SUGID) && security_setidcore_dump)
 2187                 pattern = security_setidcore_path;
 2188 
 2189         if (pattern == NULL)
 2190                 pattern = p->p_limit->pl_corename;
 2191         if (name == NULL) {
 2192                 name = PNBUF_GET();
 2193         }
 2194         if ((error = build_corename(p, name, pattern, MAXPATHLEN)) != 0)
 2195                 goto done;
 2196         NDINIT(&nd, LOOKUP, NOFOLLOW, UIO_SYSSPACE, name, l);
 2197         if ((error = vn_open(&nd, O_CREAT | O_NOFOLLOW | FWRITE,
 2198             S_IRUSR | S_IWUSR)) != 0)
 2199                 goto done;
 2200         vp = nd.ni_vp;
 2201 
 2202         if (vn_start_write(vp, &mp, V_NOWAIT) != 0) {
 2203                 VOP_UNLOCK(vp, 0);
 2204                 if ((error = vn_close(vp, FWRITE, cred, l)) != 0)
 2205                         goto done;
 2206                 if ((error = vn_start_write(NULL, &mp,
 2207                     V_WAIT | V_SLEEPONLY | V_PCATCH)) != 0)
 2208                         goto done;
 2209                 goto restart;
 2210         }
 2211 
 2212         /* Don't dump to non-regular files or files with links. */
 2213         if (vp->v_type != VREG ||
 2214             VOP_GETATTR(vp, &vattr, cred, l) || vattr.va_nlink != 1) {
 2215                 error = EINVAL;
 2216                 goto out;
 2217         }
 2218         VATTR_NULL(&vattr);
 2219         vattr.va_size = 0;
 2220 
 2221         if ((p->p_flag & P_SUGID) && security_setidcore_dump) {
 2222                 vattr.va_uid = security_setidcore_owner;
 2223                 vattr.va_gid = security_setidcore_group;
 2224                 vattr.va_mode = security_setidcore_mode;
 2225         }
 2226 
 2227         VOP_LEASE(vp, l, cred, LEASE_WRITE);
 2228         VOP_SETATTR(vp, &vattr, cred, l);
 2229         p->p_acflag |= ACORE;
 2230 
 2231         io.io_lwp = l;
 2232         io.io_vp = vp;
 2233         io.io_cred = cred;
 2234         io.io_offset = 0;
 2235 
 2236         /* Now dump the actual core file. */
 2237         error = (*p->p_execsw->es_coredump)(l, &io);
 2238  out:
 2239         VOP_UNLOCK(vp, 0);
 2240         vn_finished_write(mp, 0);
 2241         error1 = vn_close(vp, FWRITE, cred, l);
 2242         if (error == 0)
 2243                 error = error1;
 2244 done:
 2245         if (name != NULL)
 2246                 PNBUF_PUT(name);
 2247         return error;
 2248 }
 2249 #endif /* COREDUMP */
 2250 
 2251 /*
 2252  * Nonexistent system call-- signal process (may want to handle it).
 2253  * Flag error in case process won't see signal immediately (blocked or ignored).
 2254  */
 2255 #ifndef PTRACE
 2256 __weak_alias(sys_ptrace, sys_nosys);
 2257 #endif
 2258 
 2259 /* ARGSUSED */
 2260 int
 2261 sys_nosys(struct lwp *l, void *v, register_t *retval)
 2262 {
 2263         struct proc     *p;
 2264 
 2265         p = l->l_proc;
 2266         psignal(p, SIGSYS);
 2267         return (ENOSYS);
 2268 }
 2269 
 2270 #ifdef COREDUMP
 2271 static int
 2272 build_corename(struct proc *p, char *dst, const char *src, size_t len)
 2273 {
 2274         const char      *s;
 2275         char            *d, *end;
 2276         int             i;
 2277 
 2278         for (s = src, d = dst, end = d + len; *s != '\0'; s++) {
 2279                 if (*s == '%') {
 2280                         switch (*(s + 1)) {
 2281                         case 'n':
 2282                                 i = snprintf(d, end - d, "%s", p->p_comm);
 2283                                 break;
 2284                         case 'p':
 2285                                 i = snprintf(d, end - d, "%d", p->p_pid);
 2286                                 break;
 2287                         case 'u':
 2288                                 i = snprintf(d, end - d, "%.*s",
 2289                                     (int)sizeof p->p_pgrp->pg_session->s_login,
 2290                                     p->p_pgrp->pg_session->s_login);
 2291                                 break;
 2292                         case 't':
 2293                                 i = snprintf(d, end - d, "%ld",
 2294                                     p->p_stats->p_start.tv_sec);
 2295                                 break;
 2296                         default:
 2297                                 goto copy;
 2298                         }
 2299                         d += i;
 2300                         s++;
 2301                 } else {
 2302  copy:                  *d = *s;
 2303                         d++;
 2304                 }
 2305                 if (d >= end)
 2306                         return (ENAMETOOLONG);
 2307         }
 2308         *d = '\0';
 2309         return 0;
 2310 }
 2311 #endif /* COREDUMP */
 2312 
 2313 void
 2314 getucontext(struct lwp *l, ucontext_t *ucp)
 2315 {
 2316         struct proc     *p;
 2317 
 2318         p = l->l_proc;
 2319 
 2320         ucp->uc_flags = 0;
 2321         ucp->uc_link = l->l_ctxlink;
 2322 
 2323         (void)sigprocmask1(p, 0, NULL, &ucp->uc_sigmask);
 2324         ucp->uc_flags |= _UC_SIGMASK;
 2325 
 2326         /*
 2327          * The (unsupplied) definition of the `current execution stack'
 2328          * in the System V Interface Definition appears to allow returning
 2329          * the main context stack.
 2330          */
 2331         if ((p->p_sigctx.ps_sigstk.ss_flags & SS_ONSTACK) == 0) {
 2332                 ucp->uc_stack.ss_sp = (void *)USRSTACK;
 2333                 ucp->uc_stack.ss_size = ctob(p->p_vmspace->vm_ssize);
 2334                 ucp->uc_stack.ss_flags = 0;     /* XXX, def. is Very Fishy */
 2335         } else {
 2336                 /* Simply copy alternate signal execution stack. */
 2337                 ucp->uc_stack = p->p_sigctx.ps_sigstk;
 2338         }
 2339         ucp->uc_flags |= _UC_STACK;
 2340 
 2341         cpu_getmcontext(l, &ucp->uc_mcontext, &ucp->uc_flags);
 2342 }
 2343 
 2344 /* ARGSUSED */
 2345 int
 2346 sys_getcontext(struct lwp *l, void *v, register_t *retval)
 2347 {
 2348         struct sys_getcontext_args /* {
 2349                 syscallarg(struct __ucontext *) ucp;
 2350         } */ *uap = v;
 2351         ucontext_t uc;
 2352 
 2353         getucontext(l, &uc);
 2354 
 2355         return (copyout(&uc, SCARG(uap, ucp), sizeof (*SCARG(uap, ucp))));
 2356 }
 2357 
 2358 int
 2359 setucontext(struct lwp *l, const ucontext_t *ucp)
 2360 {
 2361         struct proc     *p;
 2362         int             error;
 2363 
 2364         p = l->l_proc;
 2365         if ((error = cpu_setmcontext(l, &ucp->uc_mcontext, ucp->uc_flags)) != 0)
 2366                 return (error);
 2367         l->l_ctxlink = ucp->uc_link;
 2368 
 2369         if ((ucp->uc_flags & _UC_SIGMASK) != 0)
 2370                 sigprocmask1(p, SIG_SETMASK, &ucp->uc_sigmask, NULL);
 2371 
 2372         /*
 2373          * If there was stack information, update whether or not we are
 2374          * still running on an alternate signal stack.
 2375          */
 2376         if ((ucp->uc_flags & _UC_STACK) != 0) {
 2377                 if (ucp->uc_stack.ss_flags & SS_ONSTACK)
 2378                         p->p_sigctx.ps_sigstk.ss_flags |= SS_ONSTACK;
 2379                 else
 2380                         p->p_sigctx.ps_sigstk.ss_flags &= ~SS_ONSTACK;
 2381         }
 2382 
 2383         return 0;
 2384 }
 2385 
 2386 /* ARGSUSED */
 2387 int
 2388 sys_setcontext(struct lwp *l, void *v, register_t *retval)
 2389 {
 2390         struct sys_setcontext_args /* {
 2391                 syscallarg(const ucontext_t *) ucp;
 2392         } */ *uap = v;
 2393         ucontext_t uc;
 2394         int error;
 2395 
 2396         error = copyin(SCARG(uap, ucp), &uc, sizeof (uc));
 2397         if (error)
 2398                 return (error);
 2399         if (!(uc.uc_flags & _UC_CPU))
 2400                 return (EINVAL);
 2401         error = setucontext(l, &uc);
 2402         if (error)
 2403                 return (error);
 2404 
 2405         return (EJUSTRETURN);
 2406 }
 2407 
 2408 /*
 2409  * sigtimedwait(2) system call, used also for implementation
 2410  * of sigwaitinfo() and sigwait().
 2411  *
 2412  * This only handles single LWP in signal wait. libpthread provides
 2413  * it's own sigtimedwait() wrapper to DTRT WRT individual threads.
 2414  */
 2415 int
 2416 sys___sigtimedwait(struct lwp *l, void *v, register_t *retval)
 2417 {
 2418         return __sigtimedwait1(l, v, retval, copyout, copyin, copyout);
 2419 }
 2420 
 2421 int
 2422 __sigtimedwait1(struct lwp *l, void *v, register_t *retval,
 2423     copyout_t put_info, copyin_t fetch_timeout, copyout_t put_timeout)
 2424 {
 2425         struct sys___sigtimedwait_args /* {
 2426                 syscallarg(const sigset_t *) set;
 2427                 syscallarg(siginfo_t *) info;
 2428                 syscallarg(struct timespec *) timeout;
 2429         } */ *uap = v;
 2430         sigset_t *waitset, twaitset;
 2431         struct proc *p = l->l_proc;
 2432         int error, signum;
 2433         int timo = 0;
 2434         struct timespec ts, tsstart;
 2435         ksiginfo_t *ksi;
 2436 
 2437         memset(&tsstart, 0, sizeof tsstart);     /* XXX gcc */
 2438 
 2439         MALLOC(waitset, sigset_t *, sizeof(sigset_t), M_TEMP, M_WAITOK);
 2440 
 2441         if ((error = copyin(SCARG(uap, set), waitset, sizeof(sigset_t))))
 2442                 goto free_waitset;
 2443 
 2444         /*
 2445          * Silently ignore SA_CANTMASK signals. psignal() would
 2446          * ignore SA_CANTMASK signals in waitset, we do this
 2447          * only for the below siglist check.
 2448          */
 2449         sigminusset(&sigcantmask, waitset);
 2450 
 2451         /*
 2452          * First scan siglist and check if there is signal from
 2453          * our waitset already pending.
 2454          */
 2455         twaitset = *waitset;
 2456         __sigandset(&p->p_sigctx.ps_siglist, &twaitset);
 2457         if ((signum = firstsig(&twaitset))) {
 2458                 /* found pending signal */
 2459                 sigdelset(&p->p_sigctx.ps_siglist, signum);
 2460                 ksi = ksiginfo_dequeue(p, signum);
 2461                 if (!ksi) {
 2462                         /* No queued siginfo, manufacture one */
 2463                         ksi = ksiginfo_alloc(PR_WAITOK);
 2464                         KSI_INIT(ksi);
 2465                         ksi->ksi_info._signo = signum;
 2466                         ksi->ksi_info._code = SI_USER;
 2467                 }
 2468 
 2469                 goto sig;
 2470         }
 2471 
 2472         /*
 2473          * Calculate timeout, if it was specified.
 2474          */
 2475         if (SCARG(uap, timeout)) {
 2476                 uint64_t ms;
 2477 
 2478                 if ((error = (*fetch_timeout)(SCARG(uap, timeout),
 2479                     &ts, sizeof(ts))))
 2480                         goto free_waitset;
 2481 
 2482                 ms = (ts.tv_sec * 1000) + (ts.tv_nsec / 1000000);
 2483                 timo = mstohz(ms);
 2484                 if (timo == 0 && ts.tv_sec == 0 && ts.tv_nsec > 0)
 2485                         timo = 1;
 2486                 if (timo <= 0) {
 2487                         error = EAGAIN;
 2488                         goto free_waitset;
 2489                 }
 2490 
 2491                 /*
 2492                  * Remember current uptime, it would be used in
 2493                  * ECANCELED/ERESTART case.
 2494                  */
 2495                 getnanouptime(&tsstart);
 2496         }
 2497 
 2498         /*
 2499          * Setup ps_sigwait list. Pass pointer to malloced memory
 2500          * here; it's not possible to pass pointer to a structure
 2501          * on current process's stack, the current process might
 2502          * be swapped out at the time the signal would get delivered.
 2503          */
 2504         ksi = ksiginfo_alloc(PR_WAITOK);
 2505         p->p_sigctx.ps_sigwaited = ksi;
 2506         p->p_sigctx.ps_sigwait = waitset;
 2507 
 2508         /*
 2509          * Wait for signal to arrive. We can either be woken up or
 2510          * time out.
 2511          */
 2512         error = tsleep(&p->p_sigctx.ps_sigwait, PPAUSE|PCATCH, "sigwait", timo);
 2513 
 2514         /*
 2515          * Need to find out if we woke as a result of lwp_wakeup()
 2516          * or a signal outside our wait set.
 2517          */
 2518         if (error == EINTR && p->p_sigctx.ps_sigwaited
 2519             && !firstsig(&p->p_sigctx.ps_siglist)) {
 2520                 /* wakeup via _lwp_wakeup() */
 2521                 error = ECANCELED;
 2522         } else if (!error && p->p_sigctx.ps_sigwaited) {
 2523                 /* spurious wakeup - arrange for syscall restart */
 2524                 error = ERESTART;
 2525                 goto free_ksiginfo;
 2526         }
 2527 
 2528         /*
 2529          * On error, clear sigwait indication. psignal() clears it
 2530          * in !error case.
 2531          */
 2532         if (error) {
 2533                 p->p_sigctx.ps_sigwaited = NULL;
 2534 
 2535                 /*
 2536                  * If the sleep was interrupted (either by signal or wakeup),
 2537                  * update the timeout and copyout new value back.
 2538                  * It would be used when the syscall would be restarted
 2539                  * or called again.
 2540                  */
 2541                 if (timo && (error == ERESTART || error == ECANCELED)) {
 2542                         struct timespec tsnow;
 2543                         int err;
 2544 
 2545 /* XXX double check the following change */
 2546                         getnanouptime(&tsnow);
 2547 
 2548                         /* compute how much time has passed since start */
 2549                         timespecsub(&tsnow, &tsstart, &tsnow);
 2550                         /* substract passed time from timeout */
 2551                         timespecsub(&ts, &tsnow, &ts);
 2552 
 2553                         if (ts.tv_sec < 0) {
 2554                                 error = EAGAIN;
 2555                                 goto free_ksiginfo;
 2556                         }
 2557 /* XXX double check the previous change */
 2558 
 2559                         /* copy updated timeout to userland */
 2560                         if ((err = (*put_timeout)(&ts, SCARG(uap, timeout),
 2561                             sizeof(ts)))) {
 2562                                 error = err;
 2563                                 goto free_ksiginfo;
 2564                         }
 2565                 }
 2566 
 2567                 goto free_ksiginfo;
 2568         }
 2569 
 2570         /*
 2571          * If a signal from the wait set arrived, copy it to userland.
 2572          * Copy only the used part of siginfo, the padding part is
 2573          * left unchanged (userland is not supposed to touch it anyway).
 2574          */
 2575  sig:
 2576         error = (*put_info)(&ksi->ksi_info, SCARG(uap, info),
 2577             sizeof(ksi->ksi_info));
 2578         /* FALLTHROUGH */
 2579 
 2580  free_ksiginfo:
 2581         ksiginfo_free(ksi);
 2582         p->p_sigctx.ps_sigwait = NULL;
 2583  free_waitset:
 2584         FREE(waitset, M_TEMP);
 2585 
 2586         return (error);
 2587 }
 2588 
 2589 /*
 2590  * Returns true if signal is ignored or masked for passed process.
 2591  */
 2592 int
 2593 sigismasked(struct proc *p, int sig)
 2594 {
 2595 
 2596         return (sigismember(&p->p_sigctx.ps_sigignore, sig) ||
 2597             sigismember(&p->p_sigctx.ps_sigmask, sig));
 2598 }
 2599 
 2600 static int
 2601 filt_sigattach(struct knote *kn)
 2602 {
 2603         struct proc *p = curproc;
 2604 
 2605         kn->kn_ptr.p_proc = p;
 2606         kn->kn_flags |= EV_CLEAR;               /* automatically set */
 2607 
 2608         SLIST_INSERT_HEAD(&p->p_klist, kn, kn_selnext);
 2609 
 2610         return (0);
 2611 }
 2612 
 2613 static void
 2614 filt_sigdetach(struct knote *kn)
 2615 {
 2616         struct proc *p = kn->kn_ptr.p_proc;
 2617 
 2618         SLIST_REMOVE(&p->p_klist, kn, knote, kn_selnext);
 2619 }
 2620 
 2621 /*
 2622  * signal knotes are shared with proc knotes, so we apply a mask to
 2623  * the hint in order to differentiate them from process hints.  This
 2624  * could be avoided by using a signal-specific knote list, but probably
 2625  * isn't worth the trouble.
 2626  */
 2627 static int
 2628 filt_signal(struct knote *kn, long hint)
 2629 {
 2630 
 2631         if (hint & NOTE_SIGNAL) {
 2632                 hint &= ~NOTE_SIGNAL;
 2633 
 2634                 if (kn->kn_id == hint)
 2635                         kn->kn_data++;
 2636         }
 2637         return (kn->kn_data != 0);
 2638 }
 2639 
 2640 const struct filterops sig_filtops = {
 2641         0, filt_sigattach, filt_sigdetach, filt_signal
 2642 };

Cache object: 55981fc0299bd71081e26d37ff35b17a


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.