The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_sig.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*      $OpenBSD: kern_sig.c,v 1.304 2023/01/31 15:18:56 deraadt Exp $  */
    2 /*      $NetBSD: kern_sig.c,v 1.54 1996/04/22 01:38:32 christos Exp $   */
    3 
    4 /*
    5  * Copyright (c) 1997 Theo de Raadt. All rights reserved. 
    6  * Copyright (c) 1982, 1986, 1989, 1991, 1993
    7  *      The Regents of the University of California.  All rights reserved.
    8  * (c) UNIX System Laboratories, Inc.
    9  * All or some portions of this file are derived from material licensed
   10  * to the University of California by American Telephone and Telegraph
   11  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
   12  * the permission of UNIX System Laboratories, Inc.
   13  *
   14  * Redistribution and use in source and binary forms, with or without
   15  * modification, are permitted provided that the following conditions
   16  * are met:
   17  * 1. Redistributions of source code must retain the above copyright
   18  *    notice, this list of conditions and the following disclaimer.
   19  * 2. Redistributions in binary form must reproduce the above copyright
   20  *    notice, this list of conditions and the following disclaimer in the
   21  *    documentation and/or other materials provided with the distribution.
   22  * 3. Neither the name of the University nor the names of its contributors
   23  *    may be used to endorse or promote products derived from this software
   24  *    without specific prior written permission.
   25  *
   26  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
   27  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   28  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   29  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   30  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   31  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   32  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   33  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   34  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   35  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   36  * SUCH DAMAGE.
   37  *
   38  *      @(#)kern_sig.c  8.7 (Berkeley) 4/18/94
   39  */
   40 
   41 #include <sys/param.h>
   42 #include <sys/signalvar.h>
   43 #include <sys/queue.h>
   44 #include <sys/namei.h>
   45 #include <sys/vnode.h>
   46 #include <sys/event.h>
   47 #include <sys/proc.h>
   48 #include <sys/systm.h>
   49 #include <sys/acct.h>
   50 #include <sys/fcntl.h>
   51 #include <sys/filedesc.h>
   52 #include <sys/wait.h>
   53 #include <sys/ktrace.h>
   54 #include <sys/stat.h>
   55 #include <sys/malloc.h>
   56 #include <sys/pool.h>
   57 #include <sys/sched.h>
   58 #include <sys/user.h>
   59 #include <sys/syslog.h>
   60 #include <sys/ttycom.h>
   61 #include <sys/pledge.h>
   62 #include <sys/witness.h>
   63 #include <sys/exec_elf.h>
   64 
   65 #include <sys/mount.h>
   66 #include <sys/syscallargs.h>
   67 
   68 #include <uvm/uvm_extern.h>
   69 #include <machine/tcb.h>
   70 
   71 int nosuidcoredump = 1;
   72 
   73 int     filt_sigattach(struct knote *kn);
   74 void    filt_sigdetach(struct knote *kn);
   75 int     filt_signal(struct knote *kn, long hint);
   76 
   77 const struct filterops sig_filtops = {
   78         .f_flags        = 0,
   79         .f_attach       = filt_sigattach,
   80         .f_detach       = filt_sigdetach,
   81         .f_event        = filt_signal,
   82 };
   83 
   84 /*
   85  * The array below categorizes the signals and their default actions.
   86  */
   87 const int sigprop[NSIG] = {
   88         0,                      /* unused */
   89         SA_KILL,                /* SIGHUP */
   90         SA_KILL,                /* SIGINT */
   91         SA_KILL|SA_CORE,        /* SIGQUIT */
   92         SA_KILL|SA_CORE,        /* SIGILL */
   93         SA_KILL|SA_CORE,        /* SIGTRAP */
   94         SA_KILL|SA_CORE,        /* SIGABRT */
   95         SA_KILL|SA_CORE,        /* SIGEMT */
   96         SA_KILL|SA_CORE,        /* SIGFPE */
   97         SA_KILL,                /* SIGKILL */
   98         SA_KILL|SA_CORE,        /* SIGBUS */
   99         SA_KILL|SA_CORE,        /* SIGSEGV */
  100         SA_KILL|SA_CORE,        /* SIGSYS */
  101         SA_KILL,                /* SIGPIPE */
  102         SA_KILL,                /* SIGALRM */
  103         SA_KILL,                /* SIGTERM */
  104         SA_IGNORE,              /* SIGURG */
  105         SA_STOP,                /* SIGSTOP */
  106         SA_STOP|SA_TTYSTOP,     /* SIGTSTP */
  107         SA_IGNORE|SA_CONT,      /* SIGCONT */
  108         SA_IGNORE,              /* SIGCHLD */
  109         SA_STOP|SA_TTYSTOP,     /* SIGTTIN */
  110         SA_STOP|SA_TTYSTOP,     /* SIGTTOU */
  111         SA_IGNORE,              /* SIGIO */
  112         SA_KILL,                /* SIGXCPU */
  113         SA_KILL,                /* SIGXFSZ */
  114         SA_KILL,                /* SIGVTALRM */
  115         SA_KILL,                /* SIGPROF */
  116         SA_IGNORE,              /* SIGWINCH  */
  117         SA_IGNORE,              /* SIGINFO */
  118         SA_KILL,                /* SIGUSR1 */
  119         SA_KILL,                /* SIGUSR2 */
  120         SA_IGNORE,              /* SIGTHR */
  121 };
  122 
  123 #define CONTSIGMASK     (sigmask(SIGCONT))
  124 #define STOPSIGMASK     (sigmask(SIGSTOP) | sigmask(SIGTSTP) | \
  125                             sigmask(SIGTTIN) | sigmask(SIGTTOU))
  126 
  127 void setsigvec(struct proc *, int, struct sigaction *);
  128 
  129 void proc_stop(struct proc *p, int);
  130 void proc_stop_sweep(void *);
  131 void *proc_stop_si;
  132 
  133 void setsigctx(struct proc *, int, struct sigctx *);
  134 void postsig_done(struct proc *, int, sigset_t, int);
  135 void postsig(struct proc *, int, struct sigctx *);
  136 int cansignal(struct proc *, struct process *, int);
  137 
  138 struct pool sigacts_pool;       /* memory pool for sigacts structures */
  139 
  140 void sigio_del(struct sigiolst *);
  141 void sigio_unlink(struct sigio_ref *, struct sigiolst *);
  142 struct mutex sigio_lock = MUTEX_INITIALIZER(IPL_HIGH);
  143 
  144 /*
  145  * Can thread p, send the signal signum to process qr?
  146  */
  147 int
  148 cansignal(struct proc *p, struct process *qr, int signum)
  149 {
  150         struct process *pr = p->p_p;
  151         struct ucred *uc = p->p_ucred;
  152         struct ucred *quc = qr->ps_ucred;
  153 
  154         if (uc->cr_uid == 0)
  155                 return (1);             /* root can always signal */
  156 
  157         if (pr == qr)
  158                 return (1);             /* process can always signal itself */
  159 
  160         /* optimization: if the same creds then the tests below will pass */
  161         if (uc == quc)
  162                 return (1);
  163 
  164         if (signum == SIGCONT && qr->ps_session == pr->ps_session)
  165                 return (1);             /* SIGCONT in session */
  166 
  167         /*
  168          * Using kill(), only certain signals can be sent to setugid
  169          * child processes
  170          */
  171         if (qr->ps_flags & PS_SUGID) {
  172                 switch (signum) {
  173                 case 0:
  174                 case SIGKILL:
  175                 case SIGINT:
  176                 case SIGTERM:
  177                 case SIGALRM:
  178                 case SIGSTOP:
  179                 case SIGTTIN:
  180                 case SIGTTOU:
  181                 case SIGTSTP:
  182                 case SIGHUP:
  183                 case SIGUSR1:
  184                 case SIGUSR2:
  185                         if (uc->cr_ruid == quc->cr_ruid ||
  186                             uc->cr_uid == quc->cr_ruid)
  187                                 return (1);
  188                 }
  189                 return (0);
  190         }
  191 
  192         if (uc->cr_ruid == quc->cr_ruid ||
  193             uc->cr_ruid == quc->cr_svuid ||
  194             uc->cr_uid == quc->cr_ruid ||
  195             uc->cr_uid == quc->cr_svuid)
  196                 return (1);
  197         return (0);
  198 }
  199 
  200 /*
  201  * Initialize signal-related data structures.
  202  */
  203 void
  204 signal_init(void)
  205 {
  206         proc_stop_si = softintr_establish(IPL_SOFTCLOCK, proc_stop_sweep,
  207             NULL);
  208         if (proc_stop_si == NULL)
  209                 panic("signal_init failed to register softintr");
  210 
  211         pool_init(&sigacts_pool, sizeof(struct sigacts), 0, IPL_NONE,
  212             PR_WAITOK, "sigapl", NULL);
  213 }
  214 
  215 /*
  216  * Initialize a new sigaltstack structure.
  217  */
  218 void
  219 sigstkinit(struct sigaltstack *ss)
  220 {
  221         ss->ss_flags = SS_DISABLE;
  222         ss->ss_size = 0;
  223         ss->ss_sp = NULL;
  224 }
  225 
  226 /*
  227  * Create an initial sigacts structure, using the same signal state
  228  * as pr.
  229  */
  230 struct sigacts *
  231 sigactsinit(struct process *pr)
  232 {
  233         struct sigacts *ps;
  234 
  235         ps = pool_get(&sigacts_pool, PR_WAITOK);
  236         memcpy(ps, pr->ps_sigacts, sizeof(struct sigacts));
  237         return (ps);
  238 }
  239 
  240 /*
  241  * Release a sigacts structure.
  242  */
  243 void
  244 sigactsfree(struct sigacts *ps)
  245 {
  246         pool_put(&sigacts_pool, ps);
  247 }
  248 
  249 int
  250 sys_sigaction(struct proc *p, void *v, register_t *retval)
  251 {
  252         struct sys_sigaction_args /* {
  253                 syscallarg(int) signum;
  254                 syscallarg(const struct sigaction *) nsa;
  255                 syscallarg(struct sigaction *) osa;
  256         } */ *uap = v;
  257         struct sigaction vec;
  258 #ifdef KTRACE
  259         struct sigaction ovec;
  260 #endif
  261         struct sigaction *sa;
  262         const struct sigaction *nsa;
  263         struct sigaction *osa;
  264         struct sigacts *ps = p->p_p->ps_sigacts;
  265         int signum;
  266         int bit, error;
  267 
  268         signum = SCARG(uap, signum);
  269         nsa = SCARG(uap, nsa);
  270         osa = SCARG(uap, osa);
  271 
  272         if (signum <= 0 || signum >= NSIG ||
  273             (nsa && (signum == SIGKILL || signum == SIGSTOP)))
  274                 return (EINVAL);
  275         sa = &vec;
  276         if (osa) {
  277                 mtx_enter(&p->p_p->ps_mtx);
  278                 sa->sa_handler = ps->ps_sigact[signum];
  279                 sa->sa_mask = ps->ps_catchmask[signum];
  280                 bit = sigmask(signum);
  281                 sa->sa_flags = 0;
  282                 if ((ps->ps_sigonstack & bit) != 0)
  283                         sa->sa_flags |= SA_ONSTACK;
  284                 if ((ps->ps_sigintr & bit) == 0)
  285                         sa->sa_flags |= SA_RESTART;
  286                 if ((ps->ps_sigreset & bit) != 0)
  287                         sa->sa_flags |= SA_RESETHAND;
  288                 if ((ps->ps_siginfo & bit) != 0)
  289                         sa->sa_flags |= SA_SIGINFO;
  290                 if (signum == SIGCHLD) {
  291                         if ((ps->ps_sigflags & SAS_NOCLDSTOP) != 0)
  292                                 sa->sa_flags |= SA_NOCLDSTOP;
  293                         if ((ps->ps_sigflags & SAS_NOCLDWAIT) != 0)
  294                                 sa->sa_flags |= SA_NOCLDWAIT;
  295                 }
  296                 mtx_leave(&p->p_p->ps_mtx);
  297                 if ((sa->sa_mask & bit) == 0)
  298                         sa->sa_flags |= SA_NODEFER;
  299                 sa->sa_mask &= ~bit;
  300                 error = copyout(sa, osa, sizeof (vec));
  301                 if (error)
  302                         return (error);
  303 #ifdef KTRACE
  304                 if (KTRPOINT(p, KTR_STRUCT))
  305                         ovec = vec;
  306 #endif
  307         }
  308         if (nsa) {
  309                 error = copyin(nsa, sa, sizeof (vec));
  310                 if (error)
  311                         return (error);
  312 #ifdef KTRACE
  313                 if (KTRPOINT(p, KTR_STRUCT))
  314                         ktrsigaction(p, sa);
  315 #endif
  316                 setsigvec(p, signum, sa);
  317         }
  318 #ifdef KTRACE
  319         if (osa && KTRPOINT(p, KTR_STRUCT))
  320                 ktrsigaction(p, &ovec);
  321 #endif
  322         return (0);
  323 }
  324 
  325 void
  326 setsigvec(struct proc *p, int signum, struct sigaction *sa)
  327 {
  328         struct sigacts *ps = p->p_p->ps_sigacts;
  329         int bit;
  330 
  331         bit = sigmask(signum);
  332 
  333         mtx_enter(&p->p_p->ps_mtx);
  334         ps->ps_sigact[signum] = sa->sa_handler;
  335         if ((sa->sa_flags & SA_NODEFER) == 0)
  336                 sa->sa_mask |= sigmask(signum);
  337         ps->ps_catchmask[signum] = sa->sa_mask &~ sigcantmask;
  338         if (signum == SIGCHLD) {
  339                 if (sa->sa_flags & SA_NOCLDSTOP)
  340                         atomic_setbits_int(&ps->ps_sigflags, SAS_NOCLDSTOP);
  341                 else
  342                         atomic_clearbits_int(&ps->ps_sigflags, SAS_NOCLDSTOP);
  343                 /*
  344                  * If the SA_NOCLDWAIT flag is set or the handler
  345                  * is SIG_IGN we reparent the dying child to PID 1
  346                  * (init) which will reap the zombie.  Because we use
  347                  * init to do our dirty work we never set SAS_NOCLDWAIT
  348                  * for PID 1.
  349                  * XXX exit1 rework means this is unnecessary?
  350                  */
  351                 if (initprocess->ps_sigacts != ps &&
  352                     ((sa->sa_flags & SA_NOCLDWAIT) ||
  353                     sa->sa_handler == SIG_IGN))
  354                         atomic_setbits_int(&ps->ps_sigflags, SAS_NOCLDWAIT);
  355                 else
  356                         atomic_clearbits_int(&ps->ps_sigflags, SAS_NOCLDWAIT);
  357         }
  358         if ((sa->sa_flags & SA_RESETHAND) != 0)
  359                 ps->ps_sigreset |= bit;
  360         else
  361                 ps->ps_sigreset &= ~bit;
  362         if ((sa->sa_flags & SA_SIGINFO) != 0)
  363                 ps->ps_siginfo |= bit;
  364         else
  365                 ps->ps_siginfo &= ~bit;
  366         if ((sa->sa_flags & SA_RESTART) == 0)
  367                 ps->ps_sigintr |= bit;
  368         else
  369                 ps->ps_sigintr &= ~bit;
  370         if ((sa->sa_flags & SA_ONSTACK) != 0)
  371                 ps->ps_sigonstack |= bit;
  372         else
  373                 ps->ps_sigonstack &= ~bit;
  374         /*
  375          * Set bit in ps_sigignore for signals that are set to SIG_IGN,
  376          * and for signals set to SIG_DFL where the default is to ignore.
  377          * However, don't put SIGCONT in ps_sigignore,
  378          * as we have to restart the process.
  379          */
  380         if (sa->sa_handler == SIG_IGN ||
  381             (sigprop[signum] & SA_IGNORE && sa->sa_handler == SIG_DFL)) {
  382                 atomic_clearbits_int(&p->p_siglist, bit);
  383                 atomic_clearbits_int(&p->p_p->ps_siglist, bit);
  384                 if (signum != SIGCONT)
  385                         ps->ps_sigignore |= bit;        /* easier in psignal */
  386                 ps->ps_sigcatch &= ~bit;
  387         } else {
  388                 ps->ps_sigignore &= ~bit;
  389                 if (sa->sa_handler == SIG_DFL)
  390                         ps->ps_sigcatch &= ~bit;
  391                 else
  392                         ps->ps_sigcatch |= bit;
  393         }
  394         mtx_leave(&p->p_p->ps_mtx);
  395 }
  396 
  397 /*
  398  * Initialize signal state for process 0;
  399  * set to ignore signals that are ignored by default.
  400  */
  401 void
  402 siginit(struct sigacts *ps)
  403 {
  404         int i;
  405 
  406         for (i = 0; i < NSIG; i++)
  407                 if (sigprop[i] & SA_IGNORE && i != SIGCONT)
  408                         ps->ps_sigignore |= sigmask(i);
  409         ps->ps_sigflags = SAS_NOCLDWAIT | SAS_NOCLDSTOP;
  410 }
  411 
  412 /*
  413  * Reset signals for an exec by the specified thread.
  414  */
  415 void
  416 execsigs(struct proc *p)
  417 {
  418         struct sigacts *ps;
  419         int nc, mask;
  420 
  421         ps = p->p_p->ps_sigacts;
  422         mtx_enter(&p->p_p->ps_mtx);
  423 
  424         /*
  425          * Reset caught signals.  Held signals remain held
  426          * through p_sigmask (unless they were caught,
  427          * and are now ignored by default).
  428          */
  429         while (ps->ps_sigcatch) {
  430                 nc = ffs((long)ps->ps_sigcatch);
  431                 mask = sigmask(nc);
  432                 ps->ps_sigcatch &= ~mask;
  433                 if (sigprop[nc] & SA_IGNORE) {
  434                         if (nc != SIGCONT)
  435                                 ps->ps_sigignore |= mask;
  436                         atomic_clearbits_int(&p->p_siglist, mask);
  437                         atomic_clearbits_int(&p->p_p->ps_siglist, mask);
  438                 }
  439                 ps->ps_sigact[nc] = SIG_DFL;
  440         }
  441         /*
  442          * Reset stack state to the user stack.
  443          * Clear set of signals caught on the signal stack.
  444          */
  445         sigstkinit(&p->p_sigstk);
  446         atomic_clearbits_int(&ps->ps_sigflags, SAS_NOCLDWAIT);
  447         if (ps->ps_sigact[SIGCHLD] == SIG_IGN)
  448                 ps->ps_sigact[SIGCHLD] = SIG_DFL;
  449         mtx_leave(&p->p_p->ps_mtx);
  450 }
  451 
  452 /*
  453  * Manipulate signal mask.
  454  * Note that we receive new mask, not pointer,
  455  * and return old mask as return value;
  456  * the library stub does the rest.
  457  */
  458 int
  459 sys_sigprocmask(struct proc *p, void *v, register_t *retval)
  460 {
  461         struct sys_sigprocmask_args /* {
  462                 syscallarg(int) how;
  463                 syscallarg(sigset_t) mask;
  464         } */ *uap = v;
  465         int error = 0;
  466         sigset_t mask;
  467 
  468         KASSERT(p == curproc);
  469 
  470         *retval = p->p_sigmask;
  471         mask = SCARG(uap, mask) &~ sigcantmask;
  472 
  473         switch (SCARG(uap, how)) {
  474         case SIG_BLOCK:
  475                 atomic_setbits_int(&p->p_sigmask, mask);
  476                 break;
  477         case SIG_UNBLOCK:
  478                 atomic_clearbits_int(&p->p_sigmask, mask);
  479                 break;
  480         case SIG_SETMASK:
  481                 p->p_sigmask = mask;
  482                 break;
  483         default:
  484                 error = EINVAL;
  485                 break;
  486         }
  487         return (error);
  488 }
  489 
  490 int
  491 sys_sigpending(struct proc *p, void *v, register_t *retval)
  492 {
  493         *retval = p->p_siglist | p->p_p->ps_siglist;
  494         return (0);
  495 }
  496 
  497 /*
  498  * Temporarily replace calling proc's signal mask for the duration of a
  499  * system call.  Original signal mask will be restored by userret().
  500  */
  501 void
  502 dosigsuspend(struct proc *p, sigset_t newmask)
  503 {
  504         KASSERT(p == curproc);
  505 
  506         p->p_oldmask = p->p_sigmask;
  507         atomic_setbits_int(&p->p_flag, P_SIGSUSPEND);
  508         p->p_sigmask = newmask;
  509 }
  510 
  511 /*
  512  * Suspend thread until signal, providing mask to be set
  513  * in the meantime.  Note nonstandard calling convention:
  514  * libc stub passes mask, not pointer, to save a copyin.
  515  */
  516 int
  517 sys_sigsuspend(struct proc *p, void *v, register_t *retval)
  518 {
  519         struct sys_sigsuspend_args /* {
  520                 syscallarg(int) mask;
  521         } */ *uap = v;
  522 
  523         dosigsuspend(p, SCARG(uap, mask) &~ sigcantmask);
  524         while (tsleep_nsec(&nowake, PPAUSE|PCATCH, "sigsusp", INFSLP) == 0)
  525                 continue;
  526         /* always return EINTR rather than ERESTART... */
  527         return (EINTR);
  528 }
  529 
  530 int
  531 sigonstack(size_t stack)
  532 {
  533         const struct sigaltstack *ss = &curproc->p_sigstk;
  534 
  535         return (ss->ss_flags & SS_DISABLE ? 0 :
  536             (stack - (size_t)ss->ss_sp < ss->ss_size));
  537 }
  538 
  539 int
  540 sys_sigaltstack(struct proc *p, void *v, register_t *retval)
  541 {
  542         struct sys_sigaltstack_args /* {
  543                 syscallarg(const struct sigaltstack *) nss;
  544                 syscallarg(struct sigaltstack *) oss;
  545         } */ *uap = v;
  546         struct sigaltstack ss;
  547         const struct sigaltstack *nss;
  548         struct sigaltstack *oss;
  549         int onstack = sigonstack(PROC_STACK(p));
  550         int error;
  551 
  552         nss = SCARG(uap, nss);
  553         oss = SCARG(uap, oss);
  554 
  555         if (oss != NULL) {
  556                 ss = p->p_sigstk;
  557                 if (onstack)
  558                         ss.ss_flags |= SS_ONSTACK;
  559                 if ((error = copyout(&ss, oss, sizeof(ss))))
  560                         return (error);
  561         }
  562         if (nss == NULL)
  563                 return (0);
  564         error = copyin(nss, &ss, sizeof(ss));
  565         if (error)
  566                 return (error);
  567         if (onstack)
  568                 return (EPERM);
  569         if (ss.ss_flags & ~SS_DISABLE)
  570                 return (EINVAL);
  571         if (ss.ss_flags & SS_DISABLE) {
  572                 p->p_sigstk.ss_flags = ss.ss_flags;
  573                 return (0);
  574         }
  575         if (ss.ss_size < MINSIGSTKSZ)
  576                 return (ENOMEM);
  577 
  578         error = uvm_map_remap_as_stack(p, (vaddr_t)ss.ss_sp, ss.ss_size);
  579         if (error)
  580                 return (error);
  581 
  582         p->p_sigstk = ss;
  583         return (0);
  584 }
  585 
  586 int
  587 sys_kill(struct proc *cp, void *v, register_t *retval)
  588 {
  589         struct sys_kill_args /* {
  590                 syscallarg(int) pid;
  591                 syscallarg(int) signum;
  592         } */ *uap = v;
  593         struct process *pr;
  594         int pid = SCARG(uap, pid);
  595         int signum = SCARG(uap, signum);
  596         int error;
  597         int zombie = 0;
  598 
  599         if ((error = pledge_kill(cp, pid)) != 0)
  600                 return (error);
  601         if (((u_int)signum) >= NSIG)
  602                 return (EINVAL);
  603         if (pid > 0) {
  604                 if ((pr = prfind(pid)) == NULL) {
  605                         if ((pr = zombiefind(pid)) == NULL)
  606                                 return (ESRCH);
  607                         else
  608                                 zombie = 1;
  609                 }
  610                 if (!cansignal(cp, pr, signum))
  611                         return (EPERM);
  612 
  613                 /* kill single process */
  614                 if (signum && !zombie)
  615                         prsignal(pr, signum);
  616                 return (0);
  617         }
  618         switch (pid) {
  619         case -1:                /* broadcast signal */
  620                 return (killpg1(cp, signum, 0, 1));
  621         case 0:                 /* signal own process group */
  622                 return (killpg1(cp, signum, 0, 0));
  623         default:                /* negative explicit process group */
  624                 return (killpg1(cp, signum, -pid, 0));
  625         }
  626 }
  627 
  628 int
  629 sys_thrkill(struct proc *cp, void *v, register_t *retval)
  630 {
  631         struct sys_thrkill_args /* {
  632                 syscallarg(pid_t) tid;
  633                 syscallarg(int) signum;
  634                 syscallarg(void *) tcb;
  635         } */ *uap = v;
  636         struct proc *p;
  637         int tid = SCARG(uap, tid);
  638         int signum = SCARG(uap, signum);
  639         void *tcb;
  640 
  641         if (((u_int)signum) >= NSIG)
  642                 return (EINVAL);
  643 
  644         p = tid ? tfind_user(tid, cp->p_p) : cp;
  645         if (p == NULL)
  646                 return (ESRCH);
  647 
  648         /* optionally require the target thread to have the given tcb addr */
  649         tcb = SCARG(uap, tcb);
  650         if (tcb != NULL && tcb != TCB_GET(p))
  651                 return (ESRCH);
  652 
  653         if (signum)
  654                 ptsignal(p, signum, STHREAD);
  655         return (0);
  656 }
  657 
  658 /*
  659  * Common code for kill process group/broadcast kill.
  660  * cp is calling process.
  661  */
  662 int
  663 killpg1(struct proc *cp, int signum, int pgid, int all)
  664 {
  665         struct process *pr;
  666         struct pgrp *pgrp;
  667         int nfound = 0;
  668 
  669         if (all) {
  670                 /* 
  671                  * broadcast
  672                  */
  673                 LIST_FOREACH(pr, &allprocess, ps_list) {
  674                         if (pr->ps_pid <= 1 ||
  675                             pr->ps_flags & (PS_SYSTEM | PS_NOBROADCASTKILL) ||
  676                             pr == cp->p_p || !cansignal(cp, pr, signum))
  677                                 continue;
  678                         nfound++;
  679                         if (signum)
  680                                 prsignal(pr, signum);
  681                 }
  682         } else {
  683                 if (pgid == 0)
  684                         /*
  685                          * zero pgid means send to my process group.
  686                          */
  687                         pgrp = cp->p_p->ps_pgrp;
  688                 else {
  689                         pgrp = pgfind(pgid);
  690                         if (pgrp == NULL)
  691                                 return (ESRCH);
  692                 }
  693                 LIST_FOREACH(pr, &pgrp->pg_members, ps_pglist) {
  694                         if (pr->ps_pid <= 1 || pr->ps_flags & PS_SYSTEM ||
  695                             !cansignal(cp, pr, signum))
  696                                 continue;
  697                         nfound++;
  698                         if (signum)
  699                                 prsignal(pr, signum);
  700                 }
  701         }
  702         return (nfound ? 0 : ESRCH);
  703 }
  704 
  705 #define CANDELIVER(uid, euid, pr) \
  706         (euid == 0 || \
  707         (uid) == (pr)->ps_ucred->cr_ruid || \
  708         (uid) == (pr)->ps_ucred->cr_svuid || \
  709         (uid) == (pr)->ps_ucred->cr_uid || \
  710         (euid) == (pr)->ps_ucred->cr_ruid || \
  711         (euid) == (pr)->ps_ucred->cr_svuid || \
  712         (euid) == (pr)->ps_ucred->cr_uid)
  713 
  714 #define CANSIGIO(cr, pr) \
  715         CANDELIVER((cr)->cr_ruid, (cr)->cr_uid, (pr))
  716 
  717 /*
  718  * Send a signal to a process group.  If checktty is 1,
  719  * limit to members which have a controlling terminal.
  720  */
  721 void
  722 pgsignal(struct pgrp *pgrp, int signum, int checkctty)
  723 {
  724         struct process *pr;
  725 
  726         if (pgrp)
  727                 LIST_FOREACH(pr, &pgrp->pg_members, ps_pglist)
  728                         if (checkctty == 0 || pr->ps_flags & PS_CONTROLT)
  729                                 prsignal(pr, signum);
  730 }
  731 
  732 /*
  733  * Send a SIGIO or SIGURG signal to a process or process group using stored
  734  * credentials rather than those of the current process.
  735  */
  736 void
  737 pgsigio(struct sigio_ref *sir, int sig, int checkctty)
  738 {
  739         struct process *pr;
  740         struct sigio *sigio;
  741 
  742         if (sir->sir_sigio == NULL)
  743                 return;
  744 
  745         KERNEL_LOCK();
  746         mtx_enter(&sigio_lock);
  747         sigio = sir->sir_sigio;
  748         if (sigio == NULL)
  749                 goto out;
  750         if (sigio->sio_pgid > 0) {
  751                 if (CANSIGIO(sigio->sio_ucred, sigio->sio_proc))
  752                         prsignal(sigio->sio_proc, sig);
  753         } else if (sigio->sio_pgid < 0) {
  754                 LIST_FOREACH(pr, &sigio->sio_pgrp->pg_members, ps_pglist) {
  755                         if (CANSIGIO(sigio->sio_ucred, pr) &&
  756                             (checkctty == 0 || (pr->ps_flags & PS_CONTROLT)))
  757                                 prsignal(pr, sig);
  758                 }
  759         }
  760 out:
  761         mtx_leave(&sigio_lock);
  762         KERNEL_UNLOCK();
  763 }
  764 
  765 /*
  766  * Recalculate the signal mask and reset the signal disposition after
  767  * usermode frame for delivery is formed.
  768  */
  769 void
  770 postsig_done(struct proc *p, int signum, sigset_t catchmask, int reset)
  771 {
  772         p->p_ru.ru_nsignals++;
  773         atomic_setbits_int(&p->p_sigmask, catchmask);
  774         if (reset != 0) {
  775                 sigset_t mask = sigmask(signum);
  776                 struct sigacts *ps = p->p_p->ps_sigacts;
  777 
  778                 mtx_enter(&p->p_p->ps_mtx);
  779                 ps->ps_sigcatch &= ~mask;
  780                 if (signum != SIGCONT && sigprop[signum] & SA_IGNORE)
  781                         ps->ps_sigignore |= mask;
  782                 ps->ps_sigact[signum] = SIG_DFL;
  783                 mtx_leave(&p->p_p->ps_mtx);
  784         }
  785 }
  786 
  787 /*
  788  * Send a signal caused by a trap to the current thread
  789  * If it will be caught immediately, deliver it with correct code.
  790  * Otherwise, post it normally.
  791  */
  792 void
  793 trapsignal(struct proc *p, int signum, u_long trapno, int code,
  794     union sigval sigval)
  795 {
  796         struct process *pr = p->p_p;
  797         struct sigctx ctx;
  798         int mask;
  799 
  800         switch (signum) {
  801         case SIGILL:
  802         case SIGBUS:
  803         case SIGSEGV:
  804                 pr->ps_acflag |= ATRAP;
  805                 break;
  806         }
  807 
  808         mask = sigmask(signum);
  809         setsigctx(p, signum, &ctx);
  810         if ((pr->ps_flags & PS_TRACED) == 0 && ctx.sig_catch != 0 &&
  811             (p->p_sigmask & mask) == 0) {
  812                 siginfo_t si;
  813 
  814                 initsiginfo(&si, signum, trapno, code, sigval);
  815 #ifdef KTRACE
  816                 if (KTRPOINT(p, KTR_PSIG)) {
  817                         ktrpsig(p, signum, ctx.sig_action,
  818                             p->p_sigmask, code, &si);
  819                 }
  820 #endif
  821                 if (sendsig(ctx.sig_action, signum, p->p_sigmask, &si,
  822                     ctx.sig_info, ctx.sig_onstack)) {
  823                         KERNEL_LOCK();
  824                         sigexit(p, SIGILL);
  825                         /* NOTREACHED */
  826                 }
  827                 postsig_done(p, signum, ctx.sig_catchmask, ctx.sig_reset);
  828         } else {
  829                 p->p_sisig = signum;
  830                 p->p_sitrapno = trapno; /* XXX for core dump/debugger */
  831                 p->p_sicode = code;
  832                 p->p_sigval = sigval;
  833 
  834                 /*
  835                  * If traced, stop if signal is masked, and stay stopped
  836                  * until released by the debugger.  If our parent process
  837                  * is waiting for us, don't hang as we could deadlock.
  838                  */
  839                 if (((pr->ps_flags & (PS_TRACED | PS_PPWAIT)) == PS_TRACED) &&
  840                     signum != SIGKILL && (p->p_sigmask & mask) != 0) {
  841                         int s;
  842 
  843                         single_thread_set(p, SINGLE_SUSPEND, 0);
  844                         pr->ps_xsig = signum;
  845 
  846                         SCHED_LOCK(s);
  847                         proc_stop(p, 1);
  848                         SCHED_UNLOCK(s);
  849 
  850                         signum = pr->ps_xsig;
  851                         single_thread_clear(p, 0);
  852 
  853                         /*
  854                          * If we are no longer being traced, or the parent
  855                          * didn't give us a signal, skip sending the signal.
  856                          */
  857                         if ((pr->ps_flags & PS_TRACED) == 0 ||
  858                             signum == 0)
  859                                 return;
  860 
  861                         /* update signal info */
  862                         p->p_sisig = signum;
  863                         mask = sigmask(signum);
  864                 }
  865 
  866                 /*
  867                  * Signals like SIGBUS and SIGSEGV should not, when
  868                  * generated by the kernel, be ignorable or blockable.
  869                  * If it is and we're not being traced, then just kill
  870                  * the process.
  871                  * After vfs_shutdown(9), init(8) cannot receive signals
  872                  * because new code pages of the signal handler cannot be
  873                  * mapped from halted storage.  init(8) may not die or the
  874                  * kernel panics.  Better loop between signal handler and
  875                  * page fault trap until the machine is halted.
  876                  */
  877                 if ((pr->ps_flags & PS_TRACED) == 0 &&
  878                     (sigprop[signum] & SA_KILL) &&
  879                     ((p->p_sigmask & mask) || ctx.sig_ignore) &&
  880                     pr->ps_pid != 1) {
  881                         KERNEL_LOCK();
  882                         sigexit(p, signum);
  883                         /* NOTREACHED */
  884                 }
  885                 KERNEL_LOCK();
  886                 ptsignal(p, signum, STHREAD);
  887                 KERNEL_UNLOCK();
  888         }
  889 }
  890 
  891 /*
  892  * Send the signal to the process.  If the signal has an action, the action
  893  * is usually performed by the target process rather than the caller; we add
  894  * the signal to the set of pending signals for the process.
  895  *
  896  * Exceptions:
  897  *   o When a stop signal is sent to a sleeping process that takes the
  898  *     default action, the process is stopped without awakening it.
  899  *   o SIGCONT restarts stopped processes (or puts them back to sleep)
  900  *     regardless of the signal action (eg, blocked or ignored).
  901  *
  902  * Other ignored signals are discarded immediately.
  903  */
  904 void
  905 psignal(struct proc *p, int signum)
  906 {
  907         ptsignal(p, signum, SPROCESS);
  908 }
  909 
  910 /*
  911  * type = SPROCESS      process signal, can be diverted (sigwait())
  912  * type = STHREAD       thread signal, but should be propagated if unhandled
  913  * type = SPROPAGATED   propagated to this thread, so don't propagate again
  914  */
  915 void
  916 ptsignal(struct proc *p, int signum, enum signal_type type)
  917 {
  918         int s, prop;
  919         sig_t action;
  920         int mask;
  921         int *siglist;
  922         struct process *pr = p->p_p;
  923         struct proc *q;
  924         int wakeparent = 0;
  925 
  926         KERNEL_ASSERT_LOCKED();
  927 
  928 #ifdef DIAGNOSTIC
  929         if ((u_int)signum >= NSIG || signum == 0)
  930                 panic("psignal signal number");
  931 #endif
  932 
  933         /* Ignore signal if the target process is exiting */
  934         if (pr->ps_flags & PS_EXITING)
  935                 return;
  936 
  937         mask = sigmask(signum);
  938 
  939         if (type == SPROCESS) {
  940                 /* Accept SIGKILL to coredumping processes */
  941                 if (pr->ps_flags & PS_COREDUMP && signum == SIGKILL) {
  942                         atomic_setbits_int(&pr->ps_siglist, mask);
  943                         return;
  944                 }
  945 
  946                 /*
  947                  * If the current thread can process the signal
  948                  * immediately (it's unblocked) then have it take it.
  949                  */
  950                 q = curproc;
  951                 if (q != NULL && q->p_p == pr && (q->p_flag & P_WEXIT) == 0 &&
  952                     (q->p_sigmask & mask) == 0)
  953                         p = q;
  954                 else {
  955                         /*
  956                          * A process-wide signal can be diverted to a
  957                          * different thread that's in sigwait() for this
  958                          * signal.  If there isn't such a thread, then
  959                          * pick a thread that doesn't have it blocked so
  960                          * that the stop/kill consideration isn't
  961                          * delayed.  Otherwise, mark it pending on the
  962                          * main thread.
  963                          */
  964                         TAILQ_FOREACH(q, &pr->ps_threads, p_thr_link) {
  965                                 /* ignore exiting threads */
  966                                 if (q->p_flag & P_WEXIT)
  967                                         continue;
  968 
  969                                 /* skip threads that have the signal blocked */
  970                                 if ((q->p_sigmask & mask) != 0)
  971                                         continue;
  972 
  973                                 /* okay, could send to this thread */
  974                                 p = q;
  975 
  976                                 /*
  977                                  * sigsuspend, sigwait, ppoll/pselect, etc?
  978                                  * Definitely go to this thread, as it's
  979                                  * already blocked in the kernel.
  980                                  */
  981                                 if (q->p_flag & P_SIGSUSPEND)
  982                                         break;
  983                         }
  984                 }
  985         }
  986 
  987         if (type != SPROPAGATED)
  988                 KNOTE(&pr->ps_klist, NOTE_SIGNAL | signum);
  989 
  990         prop = sigprop[signum];
  991 
  992         /*
  993          * If proc is traced, always give parent a chance.
  994          */
  995         if (pr->ps_flags & PS_TRACED) {
  996                 action = SIG_DFL;
  997         } else {
  998                 sigset_t sigcatch, sigignore;
  999 
 1000                 /*
 1001                  * If the signal is being ignored,
 1002                  * then we forget about it immediately.
 1003                  * (Note: we don't set SIGCONT in ps_sigignore,
 1004                  * and if it is set to SIG_IGN,
 1005                  * action will be SIG_DFL here.)
 1006                  */
 1007                 mtx_enter(&pr->ps_mtx);
 1008                 sigignore = pr->ps_sigacts->ps_sigignore;
 1009                 sigcatch = pr->ps_sigacts->ps_sigcatch;
 1010                 mtx_leave(&pr->ps_mtx);
 1011 
 1012                 if (sigignore & mask)
 1013                         return;
 1014                 if (p->p_sigmask & mask) {
 1015                         action = SIG_HOLD;
 1016                 } else if (sigcatch & mask) {
 1017                         action = SIG_CATCH;
 1018                 } else {
 1019                         action = SIG_DFL;
 1020 
 1021                         if (prop & SA_KILL && pr->ps_nice > NZERO)
 1022                                  pr->ps_nice = NZERO;
 1023 
 1024                         /*
 1025                          * If sending a tty stop signal to a member of an
 1026                          * orphaned process group, discard the signal here if
 1027                          * the action is default; don't stop the process below
 1028                          * if sleeping, and don't clear any pending SIGCONT.
 1029                          */
 1030                         if (prop & SA_TTYSTOP && pr->ps_pgrp->pg_jobc == 0)
 1031                                 return;
 1032                 }
 1033         }
 1034         /*
 1035          * If delivered to process, mark as pending there.  Continue and stop
 1036          * signals will be propagated to all threads.  So they are always
 1037          * marked at thread level.
 1038          */
 1039         siglist = (type == SPROCESS) ? &pr->ps_siglist : &p->p_siglist;
 1040         if (prop & SA_CONT) {
 1041                 siglist = &p->p_siglist;
 1042                 atomic_clearbits_int(siglist, STOPSIGMASK);
 1043         }
 1044         if (prop & SA_STOP) {
 1045                 siglist = &p->p_siglist;
 1046                 atomic_clearbits_int(siglist, CONTSIGMASK);
 1047                 atomic_clearbits_int(&p->p_flag, P_CONTINUED);
 1048         }
 1049 
 1050         /*
 1051          * XXX delay processing of SA_STOP signals unless action == SIG_DFL?
 1052          */
 1053         if (prop & (SA_CONT | SA_STOP) && type != SPROPAGATED)
 1054                 TAILQ_FOREACH(q, &pr->ps_threads, p_thr_link)
 1055                         if (q != p)
 1056                                 ptsignal(q, signum, SPROPAGATED);
 1057 
 1058         /*
 1059          * Defer further processing for signals which are held,
 1060          * except that stopped processes must be continued by SIGCONT.
 1061          */
 1062         if (action == SIG_HOLD && ((prop & SA_CONT) == 0 ||
 1063             p->p_stat != SSTOP)) {
 1064                 atomic_setbits_int(siglist, mask);
 1065                 return;
 1066         }
 1067 
 1068         SCHED_LOCK(s);
 1069 
 1070         switch (p->p_stat) {
 1071 
 1072         case SSLEEP:
 1073                 /*
 1074                  * If process is sleeping uninterruptibly
 1075                  * we can't interrupt the sleep... the signal will
 1076                  * be noticed when the process returns through
 1077                  * trap() or syscall().
 1078                  */
 1079                 if ((p->p_flag & P_SINTR) == 0)
 1080                         goto out;
 1081                 /*
 1082                  * Process is sleeping and traced... make it runnable
 1083                  * so it can discover the signal in cursig() and stop
 1084                  * for the parent.
 1085                  */
 1086                 if (pr->ps_flags & PS_TRACED)
 1087                         goto run;
 1088                 /*
 1089                  * If SIGCONT is default (or ignored) and process is
 1090                  * asleep, we are finished; the process should not
 1091                  * be awakened.
 1092                  */
 1093                 if ((prop & SA_CONT) && action == SIG_DFL) {
 1094                         mask = 0;
 1095                         goto out;
 1096                 }
 1097                 /*
 1098                  * When a sleeping process receives a stop
 1099                  * signal, process immediately if possible.
 1100                  */
 1101                 if ((prop & SA_STOP) && action == SIG_DFL) {
 1102                         /*
 1103                          * If a child holding parent blocked,
 1104                          * stopping could cause deadlock.
 1105                          */
 1106                         if (pr->ps_flags & PS_PPWAIT)
 1107                                 goto out;
 1108                         mask = 0;
 1109                         pr->ps_xsig = signum;
 1110                         proc_stop(p, 0);
 1111                         goto out;
 1112                 }
 1113                 /*
 1114                  * All other (caught or default) signals
 1115                  * cause the process to run.
 1116                  */
 1117                 goto runfast;
 1118                 /* NOTREACHED */
 1119 
 1120         case SSTOP:
 1121                 /*
 1122                  * If traced process is already stopped,
 1123                  * then no further action is necessary.
 1124                  */
 1125                 if (pr->ps_flags & PS_TRACED)
 1126                         goto out;
 1127 
 1128                 /*
 1129                  * Kill signal always sets processes running.
 1130                  */
 1131                 if (signum == SIGKILL) {
 1132                         atomic_clearbits_int(&p->p_flag, P_SUSPSIG);
 1133                         goto runfast;
 1134                 }
 1135 
 1136                 if (prop & SA_CONT) {
 1137                         /*
 1138                          * If SIGCONT is default (or ignored), we continue the
 1139                          * process but don't leave the signal in p_siglist, as
 1140                          * it has no further action.  If SIGCONT is held, we
 1141                          * continue the process and leave the signal in
 1142                          * p_siglist.  If the process catches SIGCONT, let it
 1143                          * handle the signal itself.  If it isn't waiting on
 1144                          * an event, then it goes back to run state.
 1145                          * Otherwise, process goes back to sleep state.
 1146                          */
 1147                         atomic_setbits_int(&p->p_flag, P_CONTINUED);
 1148                         atomic_clearbits_int(&p->p_flag, P_SUSPSIG);
 1149                         wakeparent = 1;
 1150                         if (action == SIG_DFL)
 1151                                 atomic_clearbits_int(siglist, mask);
 1152                         if (action == SIG_CATCH)
 1153                                 goto runfast;
 1154                         if (p->p_wchan == NULL)
 1155                                 goto run;
 1156                         p->p_stat = SSLEEP;
 1157                         goto out;
 1158                 }
 1159 
 1160                 if (prop & SA_STOP) {
 1161                         /*
 1162                          * Already stopped, don't need to stop again.
 1163                          * (If we did the shell could get confused.)
 1164                          */
 1165                         mask = 0;
 1166                         goto out;
 1167                 }
 1168 
 1169                 /*
 1170                  * If process is sleeping interruptibly, then simulate a
 1171                  * wakeup so that when it is continued, it will be made
 1172                  * runnable and can look at the signal.  But don't make
 1173                  * the process runnable, leave it stopped.
 1174                  */
 1175                 if (p->p_flag & P_SINTR)
 1176                         unsleep(p);
 1177                 goto out;
 1178 
 1179         case SONPROC:
 1180                 /* set siglist before issuing the ast */
 1181                 atomic_setbits_int(siglist, mask);
 1182                 mask = 0;
 1183                 signotify(p);
 1184                 /* FALLTHROUGH */
 1185         default:
 1186                 /*
 1187                  * SRUN, SIDL, SDEAD do nothing with the signal,
 1188                  * other than kicking ourselves if we are running.
 1189                  * It will either never be noticed, or noticed very soon.
 1190                  */
 1191                 goto out;
 1192         }
 1193         /* NOTREACHED */
 1194 
 1195 runfast:
 1196         /*
 1197          * Raise priority to at least PUSER.
 1198          */
 1199         if (p->p_usrpri > PUSER)
 1200                 p->p_usrpri = PUSER;
 1201 run:
 1202         setrunnable(p);
 1203 out:
 1204         /* finally adjust siglist */
 1205         if (mask)
 1206                 atomic_setbits_int(siglist, mask);
 1207         SCHED_UNLOCK(s);
 1208         if (wakeparent)
 1209                 wakeup(pr->ps_pptr);
 1210 }
 1211 
 1212 /* fill the signal context which should be used by postsig() and issignal() */
 1213 void
 1214 setsigctx(struct proc *p, int signum, struct sigctx *sctx)
 1215 {
 1216         struct sigacts *ps = p->p_p->ps_sigacts;
 1217         sigset_t mask;
 1218 
 1219         mtx_enter(&p->p_p->ps_mtx);
 1220         mask = sigmask(signum);
 1221         sctx->sig_action = ps->ps_sigact[signum];
 1222         sctx->sig_catchmask = ps->ps_catchmask[signum];
 1223         sctx->sig_reset = (ps->ps_sigreset & mask) != 0;
 1224         sctx->sig_info = (ps->ps_siginfo & mask) != 0;
 1225         sctx->sig_intr = (ps->ps_sigintr & mask) != 0;
 1226         sctx->sig_onstack = (ps->ps_sigonstack & mask) != 0;
 1227         sctx->sig_ignore = (ps->ps_sigignore & mask) != 0;
 1228         sctx->sig_catch = (ps->ps_sigcatch & mask) != 0;
 1229         mtx_leave(&p->p_p->ps_mtx);
 1230 }
 1231 
 1232 /*
 1233  * Determine signal that should be delivered to process p, the current
 1234  * process, 0 if none.
 1235  *
 1236  * If the current process has received a signal (should be caught or cause
 1237  * termination, should interrupt current syscall), return the signal number.
 1238  * Stop signals with default action are processed immediately, then cleared;
 1239  * they aren't returned.  This is checked after each entry to the system for
 1240  * a syscall or trap. The normal call sequence is
 1241  *
 1242  *      while (signum = cursig(curproc, &ctx))
 1243  *              postsig(signum, &ctx);
 1244  *
 1245  * Assumes that if the P_SINTR flag is set, we're holding both the
 1246  * kernel and scheduler locks.
 1247  */
 1248 int
 1249 cursig(struct proc *p, struct sigctx *sctx)
 1250 {
 1251         struct process *pr = p->p_p;
 1252         int signum, mask, prop;
 1253         int dolock = (p->p_flag & P_SINTR) == 0;
 1254         sigset_t ps_siglist;
 1255         int s;
 1256 
 1257         KASSERT(p == curproc);
 1258 
 1259         for (;;) {
 1260                 ps_siglist = READ_ONCE(pr->ps_siglist);
 1261                 membar_consumer();
 1262                 mask = SIGPENDING(p);
 1263                 if (pr->ps_flags & PS_PPWAIT)
 1264                         mask &= ~STOPSIGMASK;
 1265                 if (mask == 0)          /* no signal to send */
 1266                         return (0);
 1267                 signum = ffs((long)mask);
 1268                 mask = sigmask(signum);
 1269 
 1270                 /* take the signal! */
 1271                 if (atomic_cas_uint(&pr->ps_siglist, ps_siglist,
 1272                     ps_siglist & ~mask) != ps_siglist) {
 1273                         /* lost race taking the process signal, restart */
 1274                         continue;
 1275                 }
 1276                 atomic_clearbits_int(&p->p_siglist, mask);
 1277                 setsigctx(p, signum, sctx);
 1278 
 1279                 /*
 1280                  * We should see pending but ignored signals
 1281                  * only if PS_TRACED was on when they were posted.
 1282                  */
 1283                 if (sctx->sig_ignore && (pr->ps_flags & PS_TRACED) == 0)
 1284                         continue;
 1285 
 1286                 /*
 1287                  * If traced, always stop, and stay stopped until released
 1288                  * by the debugger.  If our parent process is waiting for
 1289                  * us, don't hang as we could deadlock.
 1290                  */
 1291                 if (((pr->ps_flags & (PS_TRACED | PS_PPWAIT)) == PS_TRACED) &&
 1292                     signum != SIGKILL) {
 1293                         single_thread_set(p, SINGLE_SUSPEND, 0);
 1294                         pr->ps_xsig = signum;
 1295 
 1296                         if (dolock)
 1297                                 SCHED_LOCK(s);
 1298                         proc_stop(p, 1);
 1299                         if (dolock)
 1300                                 SCHED_UNLOCK(s);
 1301 
 1302                         /*
 1303                          * re-take the signal before releasing
 1304                          * the other threads. Must check the continue
 1305                          * conditions below and only take the signal if
 1306                          * those are not true.
 1307                          */
 1308                         signum = pr->ps_xsig;
 1309                         mask = sigmask(signum);
 1310                         setsigctx(p, signum, sctx);
 1311                         if (!((pr->ps_flags & PS_TRACED) == 0 ||
 1312                             signum == 0 ||
 1313                             (p->p_sigmask & mask) != 0)) {
 1314                                 atomic_clearbits_int(&p->p_siglist, mask);
 1315                                 atomic_clearbits_int(&pr->ps_siglist, mask);
 1316                         }
 1317 
 1318                         single_thread_clear(p, 0);
 1319 
 1320                         /*
 1321                          * If we are no longer being traced, or the parent
 1322                          * didn't give us a signal, look for more signals.
 1323                          */
 1324                         if ((pr->ps_flags & PS_TRACED) == 0 ||
 1325                             signum == 0)
 1326                                 continue;
 1327 
 1328                         /*
 1329                          * If the new signal is being masked, look for other
 1330                          * signals.
 1331                          */
 1332                         if ((p->p_sigmask & mask) != 0)
 1333                                 continue;
 1334 
 1335                 }
 1336 
 1337                 prop = sigprop[signum];
 1338 
 1339                 /*
 1340                  * Decide whether the signal should be returned.
 1341                  * Return the signal's number, or fall through
 1342                  * to clear it from the pending mask.
 1343                  */
 1344                 switch ((long)sctx->sig_action) {
 1345                 case (long)SIG_DFL:
 1346                         /*
 1347                          * Don't take default actions on system processes.
 1348                          */
 1349                         if (pr->ps_pid <= 1) {
 1350 #ifdef DIAGNOSTIC
 1351                                 /*
 1352                                  * Are you sure you want to ignore SIGSEGV
 1353                                  * in init? XXX
 1354                                  */
 1355                                 printf("Process (pid %d) got signal"
 1356                                     " %d\n", pr->ps_pid, signum);
 1357 #endif
 1358                                 break;          /* == ignore */
 1359                         }
 1360                         /*
 1361                          * If there is a pending stop signal to process
 1362                          * with default action, stop here,
 1363                          * then clear the signal.  However,
 1364                          * if process is member of an orphaned
 1365                          * process group, ignore tty stop signals.
 1366                          */
 1367                         if (prop & SA_STOP) {
 1368                                 if (pr->ps_flags & PS_TRACED ||
 1369                                     (pr->ps_pgrp->pg_jobc == 0 &&
 1370                                     prop & SA_TTYSTOP))
 1371                                         break;  /* == ignore */
 1372                                 pr->ps_xsig = signum;
 1373                                 if (dolock)
 1374                                         SCHED_LOCK(s);
 1375                                 proc_stop(p, 1);
 1376                                 if (dolock)
 1377                                         SCHED_UNLOCK(s);
 1378                                 break;
 1379                         } else if (prop & SA_IGNORE) {
 1380                                 /*
 1381                                  * Except for SIGCONT, shouldn't get here.
 1382                                  * Default action is to ignore; drop it.
 1383                                  */
 1384                                 break;          /* == ignore */
 1385                         } else
 1386                                 goto keep;
 1387                         /* NOTREACHED */
 1388                 case (long)SIG_IGN:
 1389                         /*
 1390                          * Masking above should prevent us ever trying
 1391                          * to take action on an ignored signal other
 1392                          * than SIGCONT, unless process is traced.
 1393                          */
 1394                         if ((prop & SA_CONT) == 0 &&
 1395                             (pr->ps_flags & PS_TRACED) == 0)
 1396                                 printf("%s\n", __func__);
 1397                         break;          /* == ignore */
 1398                 default:
 1399                         /*
 1400                          * This signal has an action, let
 1401                          * postsig() process it.
 1402                          */
 1403                         goto keep;
 1404                 }
 1405         }
 1406         /* NOTREACHED */
 1407 
 1408 keep:
 1409         atomic_setbits_int(&p->p_siglist, mask); /*leave the signal for later */
 1410         return (signum);
 1411 }
 1412 
 1413 /*
 1414  * Put the argument process into the stopped state and notify the parent
 1415  * via wakeup.  Signals are handled elsewhere.  The process must not be
 1416  * on the run queue.
 1417  */
 1418 void
 1419 proc_stop(struct proc *p, int sw)
 1420 {
 1421         struct process *pr = p->p_p;
 1422 
 1423 #ifdef MULTIPROCESSOR
 1424         SCHED_ASSERT_LOCKED();
 1425 #endif
 1426 
 1427         p->p_stat = SSTOP;
 1428         atomic_clearbits_int(&pr->ps_flags, PS_WAITED);
 1429         atomic_setbits_int(&pr->ps_flags, PS_STOPPED);
 1430         atomic_setbits_int(&p->p_flag, P_SUSPSIG);
 1431         /*
 1432          * We need this soft interrupt to be handled fast.
 1433          * Extra calls to softclock don't hurt.
 1434          */
 1435         softintr_schedule(proc_stop_si);
 1436         if (sw)
 1437                 mi_switch();
 1438 }
 1439 
 1440 /*
 1441  * Called from a soft interrupt to send signals to the parents of stopped
 1442  * processes.
 1443  * We can't do this in proc_stop because it's called with nasty locks held
 1444  * and we would need recursive scheduler lock to deal with that.
 1445  */
 1446 void
 1447 proc_stop_sweep(void *v)
 1448 {
 1449         struct process *pr;
 1450 
 1451         LIST_FOREACH(pr, &allprocess, ps_list) {
 1452                 if ((pr->ps_flags & PS_STOPPED) == 0)
 1453                         continue;
 1454                 atomic_clearbits_int(&pr->ps_flags, PS_STOPPED);
 1455 
 1456                 if ((pr->ps_pptr->ps_sigacts->ps_sigflags & SAS_NOCLDSTOP) == 0)
 1457                         prsignal(pr->ps_pptr, SIGCHLD);
 1458                 wakeup(pr->ps_pptr);
 1459         }
 1460 }
 1461 
 1462 /*
 1463  * Take the action for the specified signal
 1464  * from the current set of pending signals.
 1465  */
 1466 void
 1467 postsig(struct proc *p, int signum, struct sigctx *sctx)
 1468 {
 1469         u_long trapno;
 1470         int mask, returnmask;
 1471         siginfo_t si;
 1472         union sigval sigval;
 1473         int code;
 1474 
 1475         KASSERT(signum != 0);
 1476 
 1477         mask = sigmask(signum);
 1478         atomic_clearbits_int(&p->p_siglist, mask);
 1479         sigval.sival_ptr = NULL;
 1480 
 1481         if (p->p_sisig != signum) {
 1482                 trapno = 0;
 1483                 code = SI_USER;
 1484                 sigval.sival_ptr = NULL;
 1485         } else {
 1486                 trapno = p->p_sitrapno;
 1487                 code = p->p_sicode;
 1488                 sigval = p->p_sigval;
 1489         }
 1490         initsiginfo(&si, signum, trapno, code, sigval);
 1491 
 1492 #ifdef KTRACE
 1493         if (KTRPOINT(p, KTR_PSIG)) {
 1494                 ktrpsig(p, signum, sctx->sig_action, p->p_flag & P_SIGSUSPEND ?
 1495                     p->p_oldmask : p->p_sigmask, code, &si);
 1496         }
 1497 #endif
 1498         if (sctx->sig_action == SIG_DFL) {
 1499                 /*
 1500                  * Default action, where the default is to kill
 1501                  * the process.  (Other cases were ignored above.)
 1502                  */
 1503                 KERNEL_LOCK();
 1504                 sigexit(p, signum);
 1505                 /* NOTREACHED */
 1506         } else {
 1507                 /*
 1508                  * If we get here, the signal must be caught.
 1509                  */
 1510 #ifdef DIAGNOSTIC
 1511                 if (sctx->sig_action == SIG_IGN || (p->p_sigmask & mask))
 1512                         panic("postsig action");
 1513 #endif
 1514                 /*
 1515                  * Set the new mask value and also defer further
 1516                  * occurrences of this signal.
 1517                  *
 1518                  * Special case: user has done a sigpause.  Here the
 1519                  * current mask is not of interest, but rather the
 1520                  * mask from before the sigpause is what we want
 1521                  * restored after the signal processing is completed.
 1522                  */
 1523                 if (p->p_flag & P_SIGSUSPEND) {
 1524                         atomic_clearbits_int(&p->p_flag, P_SIGSUSPEND);
 1525                         returnmask = p->p_oldmask;
 1526                 } else {
 1527                         returnmask = p->p_sigmask;
 1528                 }
 1529                 if (p->p_sisig == signum) {
 1530                         p->p_sisig = 0;
 1531                         p->p_sitrapno = 0;
 1532                         p->p_sicode = SI_USER;
 1533                         p->p_sigval.sival_ptr = NULL;
 1534                 }
 1535 
 1536                 if (sendsig(sctx->sig_action, signum, returnmask, &si,
 1537                     sctx->sig_info, sctx->sig_onstack)) {
 1538                         KERNEL_LOCK();
 1539                         sigexit(p, SIGILL);
 1540                         /* NOTREACHED */
 1541                 }
 1542                 postsig_done(p, signum, sctx->sig_catchmask, sctx->sig_reset);
 1543         }
 1544 }
 1545 
 1546 /*
 1547  * Force the current process to exit with the specified signal, dumping core
 1548  * if appropriate.  We bypass the normal tests for masked and caught signals,
 1549  * allowing unrecoverable failures to terminate the process without changing
 1550  * signal state.  Mark the accounting record with the signal termination.
 1551  * If dumping core, save the signal number for the debugger.  Calls exit and
 1552  * does not return.
 1553  */
 1554 void
 1555 sigexit(struct proc *p, int signum)
 1556 {
 1557         /* Mark process as going away */
 1558         atomic_setbits_int(&p->p_flag, P_WEXIT);
 1559 
 1560         p->p_p->ps_acflag |= AXSIG;
 1561         if (sigprop[signum] & SA_CORE) {
 1562                 p->p_sisig = signum;
 1563 
 1564                 /* if there are other threads, pause them */
 1565                 if (P_HASSIBLING(p))
 1566                         single_thread_set(p, SINGLE_SUSPEND, 1);
 1567 
 1568                 if (coredump(p) == 0)
 1569                         signum |= WCOREFLAG;
 1570         }
 1571         exit1(p, 0, signum, EXIT_NORMAL);
 1572         /* NOTREACHED */
 1573 }
 1574 
 1575 /*
 1576  * Send uncatchable SIGABRT for coredump.
 1577  */
 1578 void
 1579 sigabort(struct proc *p)
 1580 {
 1581         struct sigaction sa;
 1582 
 1583         memset(&sa, 0, sizeof sa);
 1584         sa.sa_handler = SIG_DFL;
 1585         setsigvec(p, SIGABRT, &sa);
 1586         atomic_clearbits_int(&p->p_sigmask, sigmask(SIGABRT));
 1587         psignal(p, SIGABRT);
 1588 }
 1589 
 1590 /*
 1591  * Return 1 if `sig', a given signal, is ignored or masked for `p', a given
 1592  * thread, and 0 otherwise.
 1593  */
 1594 int
 1595 sigismasked(struct proc *p, int sig)
 1596 {
 1597         struct process *pr = p->p_p;
 1598         int rv;
 1599 
 1600         mtx_enter(&pr->ps_mtx);
 1601         rv = (pr->ps_sigacts->ps_sigignore & sigmask(sig)) ||
 1602             (p->p_sigmask & sigmask(sig));
 1603         mtx_leave(&pr->ps_mtx);
 1604 
 1605         return !!rv;
 1606 }
 1607 
 1608 struct coredump_iostate {
 1609         struct proc *io_proc;
 1610         struct vnode *io_vp;
 1611         struct ucred *io_cred;
 1612         off_t io_offset;
 1613 };
 1614 
 1615 /*
 1616  * Dump core, into a file named "progname.core", unless the process was
 1617  * setuid/setgid.
 1618  */
 1619 int
 1620 coredump(struct proc *p)
 1621 {
 1622 #ifdef SMALL_KERNEL
 1623         return EPERM;
 1624 #else
 1625         struct process *pr = p->p_p;
 1626         struct vnode *vp;
 1627         struct ucred *cred = p->p_ucred;
 1628         struct vmspace *vm = p->p_vmspace;
 1629         struct nameidata nd;
 1630         struct vattr vattr;
 1631         struct coredump_iostate io;
 1632         int error, len, incrash = 0;
 1633         char *name;
 1634         const char *dir = "/var/crash";
 1635 
 1636         atomic_setbits_int(&pr->ps_flags, PS_COREDUMP);
 1637 
 1638 #ifdef PMAP_CHECK_COPYIN
 1639         /* disable copyin checks, so we can write out text sections if needed */
 1640         p->p_vmspace->vm_map.check_copyin_count = 0;
 1641 #endif
 1642 
 1643         /* Don't dump if will exceed file size limit. */
 1644         if (USPACE + ptoa(vm->vm_dsize + vm->vm_ssize) >= lim_cur(RLIMIT_CORE))
 1645                 return (EFBIG);
 1646 
 1647         name = pool_get(&namei_pool, PR_WAITOK);
 1648 
 1649         /*
 1650          * If the process has inconsistent uids, nosuidcoredump
 1651          * determines coredump placement policy.
 1652          */
 1653         if (((pr->ps_flags & PS_SUGID) && (error = suser(p))) ||
 1654            ((pr->ps_flags & PS_SUGID) && nosuidcoredump)) {
 1655                 if (nosuidcoredump == 3) {
 1656                         /*
 1657                          * If the program directory does not exist, dumps of
 1658                          * that core will silently fail.
 1659                          */
 1660                         len = snprintf(name, MAXPATHLEN, "%s/%s/%u.core",
 1661                             dir, pr->ps_comm, pr->ps_pid);
 1662                         incrash = KERNELPATH;
 1663                 } else if (nosuidcoredump == 2) {
 1664                         len = snprintf(name, MAXPATHLEN, "%s/%s.core",
 1665                             dir, pr->ps_comm);
 1666                         incrash = KERNELPATH;
 1667                 } else {
 1668                         pool_put(&namei_pool, name);
 1669                         return (EPERM);
 1670                 }
 1671         } else
 1672                 len = snprintf(name, MAXPATHLEN, "%s.core", pr->ps_comm);
 1673 
 1674         if (len >= MAXPATHLEN) {
 1675                 pool_put(&namei_pool, name);
 1676                 return (EACCES);
 1677         }
 1678 
 1679         /*
 1680          * Control the UID used to write out.  The normal case uses
 1681          * the real UID.  If the sugid case is going to write into the
 1682          * controlled directory, we do so as root.
 1683          */
 1684         if (incrash == 0) {
 1685                 cred = crdup(cred);
 1686                 cred->cr_uid = cred->cr_ruid;
 1687                 cred->cr_gid = cred->cr_rgid;
 1688         } else {
 1689                 if (p->p_fd->fd_rdir) {
 1690                         vrele(p->p_fd->fd_rdir);
 1691                         p->p_fd->fd_rdir = NULL;
 1692                 }
 1693                 p->p_ucred = crdup(p->p_ucred);
 1694                 crfree(cred);
 1695                 cred = p->p_ucred;
 1696                 crhold(cred);
 1697                 cred->cr_uid = 0;
 1698                 cred->cr_gid = 0;
 1699         }
 1700 
 1701         /* incrash should be 0 or KERNELPATH only */
 1702         NDINIT(&nd, 0, incrash, UIO_SYSSPACE, name, p);
 1703 
 1704         error = vn_open(&nd, O_CREAT | FWRITE | O_NOFOLLOW | O_NONBLOCK,
 1705             S_IRUSR | S_IWUSR);
 1706 
 1707         if (error)
 1708                 goto out;
 1709 
 1710         /*
 1711          * Don't dump to non-regular files, files with links, or files
 1712          * owned by someone else.
 1713          */
 1714         vp = nd.ni_vp;
 1715         if ((error = VOP_GETATTR(vp, &vattr, cred, p)) != 0) {
 1716                 VOP_UNLOCK(vp);
 1717                 vn_close(vp, FWRITE, cred, p);
 1718                 goto out;
 1719         }
 1720         if (vp->v_type != VREG || vattr.va_nlink != 1 ||
 1721             vattr.va_mode & ((VREAD | VWRITE) >> 3 | (VREAD | VWRITE) >> 6) ||
 1722             vattr.va_uid != cred->cr_uid) {
 1723                 error = EACCES;
 1724                 VOP_UNLOCK(vp);
 1725                 vn_close(vp, FWRITE, cred, p);
 1726                 goto out;
 1727         }
 1728         VATTR_NULL(&vattr);
 1729         vattr.va_size = 0;
 1730         VOP_SETATTR(vp, &vattr, cred, p);
 1731         pr->ps_acflag |= ACORE;
 1732 
 1733         io.io_proc = p;
 1734         io.io_vp = vp;
 1735         io.io_cred = cred;
 1736         io.io_offset = 0;
 1737         VOP_UNLOCK(vp);
 1738         vref(vp);
 1739         error = vn_close(vp, FWRITE, cred, p);
 1740         if (error == 0)
 1741                 error = coredump_elf(p, &io);
 1742         vrele(vp);
 1743 out:
 1744         crfree(cred);
 1745         pool_put(&namei_pool, name);
 1746         return (error);
 1747 #endif
 1748 }
 1749 
 1750 #ifndef SMALL_KERNEL
 1751 int
 1752 coredump_write(void *cookie, enum uio_seg segflg, const void *data, size_t len)
 1753 {
 1754         struct coredump_iostate *io = cookie;
 1755         off_t coffset = 0;
 1756         size_t csize;
 1757         int chunk, error;
 1758 
 1759         csize = len;
 1760         do {
 1761                 if (sigmask(SIGKILL) &
 1762                     (io->io_proc->p_siglist | io->io_proc->p_p->ps_siglist))
 1763                         return (EINTR);
 1764 
 1765                 /* Rest of the loop sleeps with lock held, so... */
 1766                 yield();
 1767 
 1768                 chunk = MIN(csize, MAXPHYS);
 1769                 error = vn_rdwr(UIO_WRITE, io->io_vp,
 1770                     (caddr_t)data + coffset, chunk,
 1771                     io->io_offset + coffset, segflg,
 1772                     IO_UNIT, io->io_cred, NULL, io->io_proc);
 1773                 if (error) {
 1774                         struct process *pr = io->io_proc->p_p;
 1775 
 1776                         if (error == ENOSPC)
 1777                                 log(LOG_ERR,
 1778                                     "coredump of %s(%d) failed, filesystem full\n",
 1779                                     pr->ps_comm, pr->ps_pid);
 1780                         else
 1781                                 log(LOG_ERR,
 1782                                     "coredump of %s(%d), write failed: errno %d\n",
 1783                                     pr->ps_comm, pr->ps_pid, error);
 1784                         return (error);
 1785                 }
 1786 
 1787                 coffset += chunk;
 1788                 csize -= chunk;
 1789         } while (csize > 0);
 1790 
 1791         io->io_offset += len;
 1792         return (0);
 1793 }
 1794 
 1795 void
 1796 coredump_unmap(void *cookie, vaddr_t start, vaddr_t end)
 1797 {
 1798         struct coredump_iostate *io = cookie;
 1799 
 1800         uvm_unmap(&io->io_proc->p_vmspace->vm_map, start, end);
 1801 }
 1802 
 1803 #endif  /* !SMALL_KERNEL */
 1804 
 1805 /*
 1806  * Nonexistent system call-- signal process (may want to handle it).
 1807  * Flag error in case process won't see signal immediately (blocked or ignored).
 1808  */
 1809 int
 1810 sys_nosys(struct proc *p, void *v, register_t *retval)
 1811 {
 1812         ptsignal(p, SIGSYS, STHREAD);
 1813         return (ENOSYS);
 1814 }
 1815 
 1816 int
 1817 sys___thrsigdivert(struct proc *p, void *v, register_t *retval)
 1818 {
 1819         static int sigwaitsleep;
 1820         struct sys___thrsigdivert_args /* {
 1821                 syscallarg(sigset_t) sigmask;
 1822                 syscallarg(siginfo_t *) info;
 1823                 syscallarg(const struct timespec *) timeout;
 1824         } */ *uap = v;
 1825         struct sigctx ctx;
 1826         sigset_t mask = SCARG(uap, sigmask) &~ sigcantmask;
 1827         siginfo_t si;
 1828         uint64_t nsecs = INFSLP;
 1829         int timeinvalid = 0;
 1830         int error = 0;
 1831 
 1832         memset(&si, 0, sizeof(si));
 1833 
 1834         if (SCARG(uap, timeout) != NULL) {
 1835                 struct timespec ts;
 1836                 if ((error = copyin(SCARG(uap, timeout), &ts, sizeof(ts))) != 0)
 1837                         return (error);
 1838 #ifdef KTRACE
 1839                 if (KTRPOINT(p, KTR_STRUCT))
 1840                         ktrreltimespec(p, &ts);
 1841 #endif
 1842                 if (!timespecisvalid(&ts))
 1843                         timeinvalid = 1;
 1844                 else
 1845                         nsecs = TIMESPEC_TO_NSEC(&ts);
 1846         }
 1847 
 1848         dosigsuspend(p, p->p_sigmask &~ mask);
 1849         for (;;) {
 1850                 si.si_signo = cursig(p, &ctx);
 1851                 if (si.si_signo != 0) {
 1852                         sigset_t smask = sigmask(si.si_signo);
 1853                         if (smask & mask) {
 1854                                 atomic_clearbits_int(&p->p_siglist, smask);
 1855                                 error = 0;
 1856                                 break;
 1857                         }
 1858                 }
 1859 
 1860                 /* per-POSIX, delay this error until after the above */
 1861                 if (timeinvalid)
 1862                         error = EINVAL;
 1863                 /* per-POSIX, return immediately if timeout is zero-valued */
 1864                 if (nsecs == 0)
 1865                         error = EAGAIN;
 1866 
 1867                 if (error != 0)
 1868                         break;
 1869 
 1870                 error = tsleep_nsec(&sigwaitsleep, PPAUSE|PCATCH, "sigwait",
 1871                     nsecs);
 1872         }
 1873 
 1874         if (error == 0) {
 1875                 *retval = si.si_signo;
 1876                 if (SCARG(uap, info) != NULL) {
 1877                         error = copyout(&si, SCARG(uap, info), sizeof(si));
 1878 #ifdef KTRACE
 1879                         if (error == 0 && KTRPOINT(p, KTR_STRUCT))
 1880                                 ktrsiginfo(p, &si);
 1881 #endif
 1882                 }
 1883         } else if (error == ERESTART && SCARG(uap, timeout) != NULL) {
 1884                 /*
 1885                  * Restarting is wrong if there's a timeout, as it'll be
 1886                  * for the same interval again
 1887                  */
 1888                 error = EINTR;
 1889         }
 1890 
 1891         return (error);
 1892 }
 1893 
 1894 void
 1895 initsiginfo(siginfo_t *si, int sig, u_long trapno, int code, union sigval val)
 1896 {
 1897         memset(si, 0, sizeof(*si));
 1898 
 1899         si->si_signo = sig;
 1900         si->si_code = code;
 1901         if (code == SI_USER) {
 1902                 si->si_value = val;
 1903         } else {
 1904                 switch (sig) {
 1905                 case SIGSEGV:
 1906                 case SIGILL:
 1907                 case SIGBUS:
 1908                 case SIGFPE:
 1909                         si->si_addr = val.sival_ptr;
 1910                         si->si_trapno = trapno;
 1911                         break;
 1912                 case SIGXFSZ:
 1913                         break;
 1914                 }
 1915         }
 1916 }
 1917 
 1918 int
 1919 filt_sigattach(struct knote *kn)
 1920 {
 1921         struct process *pr = curproc->p_p;
 1922         int s;
 1923 
 1924         if (kn->kn_id >= NSIG)
 1925                 return EINVAL;
 1926 
 1927         kn->kn_ptr.p_process = pr;
 1928         kn->kn_flags |= EV_CLEAR;               /* automatically set */
 1929 
 1930         s = splhigh();
 1931         klist_insert_locked(&pr->ps_klist, kn);
 1932         splx(s);
 1933 
 1934         return (0);
 1935 }
 1936 
 1937 void
 1938 filt_sigdetach(struct knote *kn)
 1939 {
 1940         struct process *pr = kn->kn_ptr.p_process;
 1941         int s;
 1942 
 1943         s = splhigh();
 1944         klist_remove_locked(&pr->ps_klist, kn);
 1945         splx(s);
 1946 }
 1947 
 1948 /*
 1949  * signal knotes are shared with proc knotes, so we apply a mask to
 1950  * the hint in order to differentiate them from process hints.  This
 1951  * could be avoided by using a signal-specific knote list, but probably
 1952  * isn't worth the trouble.
 1953  */
 1954 int
 1955 filt_signal(struct knote *kn, long hint)
 1956 {
 1957 
 1958         if (hint & NOTE_SIGNAL) {
 1959                 hint &= ~NOTE_SIGNAL;
 1960 
 1961                 if (kn->kn_id == hint)
 1962                         kn->kn_data++;
 1963         }
 1964         return (kn->kn_data != 0);
 1965 }
 1966 
 1967 void
 1968 userret(struct proc *p)
 1969 {
 1970         struct sigctx ctx;
 1971         int signum;
 1972 
 1973         /* send SIGPROF or SIGVTALRM if their timers interrupted this thread */
 1974         if (p->p_flag & P_PROFPEND) {
 1975                 atomic_clearbits_int(&p->p_flag, P_PROFPEND);
 1976                 KERNEL_LOCK();
 1977                 psignal(p, SIGPROF);
 1978                 KERNEL_UNLOCK();
 1979         }
 1980         if (p->p_flag & P_ALRMPEND) {
 1981                 atomic_clearbits_int(&p->p_flag, P_ALRMPEND);
 1982                 KERNEL_LOCK();
 1983                 psignal(p, SIGVTALRM);
 1984                 KERNEL_UNLOCK();
 1985         }
 1986 
 1987         if (SIGPENDING(p) != 0) {
 1988                 while ((signum = cursig(p, &ctx)) != 0)
 1989                         postsig(p, signum, &ctx);
 1990         }
 1991 
 1992         /*
 1993          * If P_SIGSUSPEND is still set here, then we still need to restore
 1994          * the original sigmask before returning to userspace.  Also, this
 1995          * might unmask some pending signals, so we need to check a second
 1996          * time for signals to post.
 1997          */
 1998         if (p->p_flag & P_SIGSUSPEND) {
 1999                 atomic_clearbits_int(&p->p_flag, P_SIGSUSPEND);
 2000                 p->p_sigmask = p->p_oldmask;
 2001 
 2002                 while ((signum = cursig(p, &ctx)) != 0)
 2003                         postsig(p, signum, &ctx);
 2004         }
 2005 
 2006         if (p->p_flag & P_SUSPSINGLE)
 2007                 single_thread_check(p, 0);
 2008 
 2009         WITNESS_WARN(WARN_PANIC, NULL, "userret: returning");
 2010 
 2011         p->p_cpu->ci_schedstate.spc_curpriority = p->p_usrpri;
 2012 }
 2013 
 2014 int
 2015 single_thread_check_locked(struct proc *p, int deep, int s)
 2016 {
 2017         struct process *pr = p->p_p;
 2018 
 2019         SCHED_ASSERT_LOCKED();
 2020 
 2021         if (pr->ps_single != NULL && pr->ps_single != p) {
 2022                 do {
 2023                         /* if we're in deep, we need to unwind to the edge */
 2024                         if (deep) {
 2025                                 if (pr->ps_flags & PS_SINGLEUNWIND)
 2026                                         return (ERESTART);
 2027                                 if (pr->ps_flags & PS_SINGLEEXIT)
 2028                                         return (EINTR);
 2029                         }
 2030 
 2031                         if (atomic_dec_int_nv(&pr->ps_singlecount) == 0)
 2032                                 wakeup(&pr->ps_singlecount);
 2033 
 2034                         if (pr->ps_flags & PS_SINGLEEXIT) {
 2035                                 SCHED_UNLOCK(s);
 2036                                 KERNEL_LOCK();
 2037                                 exit1(p, 0, 0, EXIT_THREAD_NOCHECK);
 2038                                 /* NOTREACHED */
 2039                         }
 2040 
 2041                         /* not exiting and don't need to unwind, so suspend */
 2042                         p->p_stat = SSTOP;
 2043                         mi_switch();
 2044                 } while (pr->ps_single != NULL);
 2045         }
 2046 
 2047         return (0);
 2048 }
 2049 
 2050 int
 2051 single_thread_check(struct proc *p, int deep)
 2052 {
 2053         int s, error;
 2054 
 2055         SCHED_LOCK(s);
 2056         error = single_thread_check_locked(p, deep, s);
 2057         SCHED_UNLOCK(s);
 2058 
 2059         return error;
 2060 }
 2061 
 2062 /*
 2063  * Stop other threads in the process.  The mode controls how and
 2064  * where the other threads should stop:
 2065  *  - SINGLE_SUSPEND: stop wherever they are, will later either be told to exit
 2066  *    (by setting to SINGLE_EXIT) or be released (via single_thread_clear())
 2067  *  - SINGLE_UNWIND: just unwind to kernel boundary, will be told to exit
 2068  *    or released as with SINGLE_SUSPEND
 2069  *  - SINGLE_EXIT: unwind to kernel boundary and exit
 2070  */
 2071 int
 2072 single_thread_set(struct proc *p, enum single_thread_mode mode, int wait)
 2073 {
 2074         struct process *pr = p->p_p;
 2075         struct proc *q;
 2076         int error, s;
 2077 
 2078         KASSERT(curproc == p);
 2079 
 2080         SCHED_LOCK(s);
 2081         error = single_thread_check_locked(p, (mode == SINGLE_UNWIND), s);
 2082         if (error) {
 2083                 SCHED_UNLOCK(s);
 2084                 return error;
 2085         }
 2086 
 2087         switch (mode) {
 2088         case SINGLE_SUSPEND:
 2089                 break;
 2090         case SINGLE_UNWIND:
 2091                 atomic_setbits_int(&pr->ps_flags, PS_SINGLEUNWIND);
 2092                 break;
 2093         case SINGLE_EXIT:
 2094                 atomic_setbits_int(&pr->ps_flags, PS_SINGLEEXIT);
 2095                 atomic_clearbits_int(&pr->ps_flags, PS_SINGLEUNWIND);
 2096                 break;
 2097 #ifdef DIAGNOSTIC
 2098         default:
 2099                 panic("single_thread_mode = %d", mode);
 2100 #endif
 2101         }
 2102         pr->ps_singlecount = 0;
 2103         membar_producer();
 2104         pr->ps_single = p;
 2105         TAILQ_FOREACH(q, &pr->ps_threads, p_thr_link) {
 2106                 if (q == p)
 2107                         continue;
 2108                 if (q->p_flag & P_WEXIT) {
 2109                         if (mode == SINGLE_EXIT) {
 2110                                 if (q->p_stat == SSTOP) {
 2111                                         setrunnable(q);
 2112                                         atomic_inc_int(&pr->ps_singlecount);
 2113                                 }
 2114                         }
 2115                         continue;
 2116                 }
 2117                 atomic_setbits_int(&q->p_flag, P_SUSPSINGLE);
 2118                 switch (q->p_stat) {
 2119                 case SIDL:
 2120                 case SRUN:
 2121                         atomic_inc_int(&pr->ps_singlecount);
 2122                         break;
 2123                 case SSLEEP:
 2124                         /* if it's not interruptible, then just have to wait */
 2125                         if (q->p_flag & P_SINTR) {
 2126                                 /* merely need to suspend?  just stop it */
 2127                                 if (mode == SINGLE_SUSPEND) {
 2128                                         q->p_stat = SSTOP;
 2129                                         break;
 2130                                 }
 2131                                 /* need to unwind or exit, so wake it */
 2132                                 setrunnable(q);
 2133                         }
 2134                         atomic_inc_int(&pr->ps_singlecount);
 2135                         break;
 2136                 case SSTOP:
 2137                         if (mode == SINGLE_EXIT) {
 2138                                 setrunnable(q);
 2139                                 atomic_inc_int(&pr->ps_singlecount);
 2140                         }
 2141                         break;
 2142                 case SDEAD:
 2143                         break;
 2144                 case SONPROC:
 2145                         atomic_inc_int(&pr->ps_singlecount);
 2146                         signotify(q);
 2147                         break;
 2148                 }
 2149         }
 2150         SCHED_UNLOCK(s);
 2151 
 2152         if (wait)
 2153                 single_thread_wait(pr, 1);
 2154 
 2155         return 0;
 2156 }
 2157 
 2158 /*
 2159  * Wait for other threads to stop. If recheck is false then the function
 2160  * returns non-zero if the caller needs to restart the check else 0 is
 2161  * returned. If recheck is true the return value is always 0.
 2162  */
 2163 int
 2164 single_thread_wait(struct process *pr, int recheck)
 2165 {
 2166         struct sleep_state sls;
 2167         int wait;
 2168 
 2169         /* wait until they're all suspended */
 2170         wait = pr->ps_singlecount > 0;
 2171         while (wait) {
 2172                 sleep_setup(&sls, &pr->ps_singlecount, PWAIT, "suspend", 0);
 2173                 wait = pr->ps_singlecount > 0;
 2174                 sleep_finish(&sls, wait);
 2175                 if (!recheck)
 2176                         break;
 2177         }
 2178 
 2179         return wait;
 2180 }
 2181 
 2182 void
 2183 single_thread_clear(struct proc *p, int flag)
 2184 {
 2185         struct process *pr = p->p_p;
 2186         struct proc *q;
 2187         int s;
 2188 
 2189         KASSERT(pr->ps_single == p);
 2190         KASSERT(curproc == p);
 2191 
 2192         SCHED_LOCK(s);
 2193         pr->ps_single = NULL;
 2194         atomic_clearbits_int(&pr->ps_flags, PS_SINGLEUNWIND | PS_SINGLEEXIT);
 2195         TAILQ_FOREACH(q, &pr->ps_threads, p_thr_link) {
 2196                 if (q == p || (q->p_flag & P_SUSPSINGLE) == 0)
 2197                         continue;
 2198                 atomic_clearbits_int(&q->p_flag, P_SUSPSINGLE);
 2199 
 2200                 /*
 2201                  * if the thread was only stopped for single threading
 2202                  * then clearing that either makes it runnable or puts
 2203                  * it back into some sleep queue
 2204                  */
 2205                 if (q->p_stat == SSTOP && (q->p_flag & flag) == 0) {
 2206                         if (q->p_wchan == NULL)
 2207                                 setrunnable(q);
 2208                         else
 2209                                 q->p_stat = SSLEEP;
 2210                 }
 2211         }
 2212         SCHED_UNLOCK(s);
 2213 }
 2214 
 2215 void
 2216 sigio_del(struct sigiolst *rmlist)
 2217 {
 2218         struct sigio *sigio;
 2219 
 2220         while ((sigio = LIST_FIRST(rmlist)) != NULL) {
 2221                 LIST_REMOVE(sigio, sio_pgsigio);
 2222                 crfree(sigio->sio_ucred);
 2223                 free(sigio, M_SIGIO, sizeof(*sigio));
 2224         }
 2225 }
 2226 
 2227 void
 2228 sigio_unlink(struct sigio_ref *sir, struct sigiolst *rmlist)
 2229 {
 2230         struct sigio *sigio;
 2231 
 2232         MUTEX_ASSERT_LOCKED(&sigio_lock);
 2233 
 2234         sigio = sir->sir_sigio;
 2235         if (sigio != NULL) {
 2236                 KASSERT(sigio->sio_myref == sir);
 2237                 sir->sir_sigio = NULL;
 2238 
 2239                 if (sigio->sio_pgid > 0)
 2240                         sigio->sio_proc = NULL;
 2241                 else
 2242                         sigio->sio_pgrp = NULL;
 2243                 LIST_REMOVE(sigio, sio_pgsigio);
 2244 
 2245                 LIST_INSERT_HEAD(rmlist, sigio, sio_pgsigio);
 2246         }
 2247 }
 2248 
 2249 void
 2250 sigio_free(struct sigio_ref *sir)
 2251 {
 2252         struct sigiolst rmlist;
 2253 
 2254         if (sir->sir_sigio == NULL)
 2255                 return;
 2256 
 2257         LIST_INIT(&rmlist);
 2258 
 2259         mtx_enter(&sigio_lock);
 2260         sigio_unlink(sir, &rmlist);
 2261         mtx_leave(&sigio_lock);
 2262 
 2263         sigio_del(&rmlist);
 2264 }
 2265 
 2266 void
 2267 sigio_freelist(struct sigiolst *sigiolst)
 2268 {
 2269         struct sigiolst rmlist;
 2270         struct sigio *sigio;
 2271 
 2272         if (LIST_EMPTY(sigiolst))
 2273                 return;
 2274 
 2275         LIST_INIT(&rmlist);
 2276 
 2277         mtx_enter(&sigio_lock);
 2278         while ((sigio = LIST_FIRST(sigiolst)) != NULL)
 2279                 sigio_unlink(sigio->sio_myref, &rmlist);
 2280         mtx_leave(&sigio_lock);
 2281 
 2282         sigio_del(&rmlist);
 2283 }
 2284 
 2285 int
 2286 sigio_setown(struct sigio_ref *sir, u_long cmd, caddr_t data)
 2287 {
 2288         struct sigiolst rmlist;
 2289         struct proc *p = curproc;
 2290         struct pgrp *pgrp = NULL;
 2291         struct process *pr = NULL;
 2292         struct sigio *sigio;
 2293         int error;
 2294         pid_t pgid = *(int *)data;
 2295 
 2296         if (pgid == 0) {
 2297                 sigio_free(sir);
 2298                 return (0);
 2299         }
 2300 
 2301         if (cmd == TIOCSPGRP) {
 2302                 if (pgid < 0)
 2303                         return (EINVAL);
 2304                 pgid = -pgid;
 2305         }
 2306 
 2307         sigio = malloc(sizeof(*sigio), M_SIGIO, M_WAITOK);
 2308         sigio->sio_pgid = pgid;
 2309         sigio->sio_ucred = crhold(p->p_ucred);
 2310         sigio->sio_myref = sir;
 2311 
 2312         LIST_INIT(&rmlist);
 2313 
 2314         /*
 2315          * The kernel lock, and not sleeping between prfind()/pgfind() and
 2316          * linking of the sigio ensure that the process or process group does
 2317          * not disappear unexpectedly.
 2318          */
 2319         KERNEL_LOCK();
 2320         mtx_enter(&sigio_lock);
 2321 
 2322         if (pgid > 0) {
 2323                 pr = prfind(pgid);
 2324                 if (pr == NULL) {
 2325                         error = ESRCH;
 2326                         goto fail;
 2327                 }
 2328 
 2329                 /*
 2330                  * Policy - Don't allow a process to FSETOWN a process
 2331                  * in another session.
 2332                  *
 2333                  * Remove this test to allow maximum flexibility or
 2334                  * restrict FSETOWN to the current process or process
 2335                  * group for maximum safety.
 2336                  */
 2337                 if (pr->ps_session != p->p_p->ps_session) {
 2338                         error = EPERM;
 2339                         goto fail;
 2340                 }
 2341 
 2342                 if ((pr->ps_flags & PS_EXITING) != 0) {
 2343                         error = ESRCH;
 2344                         goto fail;
 2345                 }
 2346         } else /* if (pgid < 0) */ {
 2347                 pgrp = pgfind(-pgid);
 2348                 if (pgrp == NULL) {
 2349                         error = ESRCH;
 2350                         goto fail;
 2351                 }
 2352 
 2353                 /*
 2354                  * Policy - Don't allow a process to FSETOWN a process
 2355                  * in another session.
 2356                  *
 2357                  * Remove this test to allow maximum flexibility or
 2358                  * restrict FSETOWN to the current process or process
 2359                  * group for maximum safety.
 2360                  */
 2361                 if (pgrp->pg_session != p->p_p->ps_session) {
 2362                         error = EPERM;
 2363                         goto fail;
 2364                 }
 2365         }
 2366 
 2367         if (pgid > 0) {
 2368                 sigio->sio_proc = pr;
 2369                 LIST_INSERT_HEAD(&pr->ps_sigiolst, sigio, sio_pgsigio);
 2370         } else {
 2371                 sigio->sio_pgrp = pgrp;
 2372                 LIST_INSERT_HEAD(&pgrp->pg_sigiolst, sigio, sio_pgsigio);
 2373         }
 2374 
 2375         sigio_unlink(sir, &rmlist);
 2376         sir->sir_sigio = sigio;
 2377 
 2378         mtx_leave(&sigio_lock);
 2379         KERNEL_UNLOCK();
 2380 
 2381         sigio_del(&rmlist);
 2382 
 2383         return (0);
 2384 
 2385 fail:
 2386         mtx_leave(&sigio_lock);
 2387         KERNEL_UNLOCK();
 2388 
 2389         crfree(sigio->sio_ucred);
 2390         free(sigio, M_SIGIO, sizeof(*sigio));
 2391 
 2392         return (error);
 2393 }
 2394 
 2395 void
 2396 sigio_getown(struct sigio_ref *sir, u_long cmd, caddr_t data)
 2397 {
 2398         struct sigio *sigio;
 2399         pid_t pgid = 0;
 2400 
 2401         mtx_enter(&sigio_lock);
 2402         sigio = sir->sir_sigio;
 2403         if (sigio != NULL)
 2404                 pgid = sigio->sio_pgid;
 2405         mtx_leave(&sigio_lock);
 2406 
 2407         if (cmd == TIOCGPGRP)
 2408                 pgid = -pgid;
 2409 
 2410         *(int *)data = pgid;
 2411 }
 2412 
 2413 void
 2414 sigio_copy(struct sigio_ref *dst, struct sigio_ref *src)
 2415 {
 2416         struct sigiolst rmlist;
 2417         struct sigio *newsigio, *sigio;
 2418 
 2419         sigio_free(dst);
 2420 
 2421         if (src->sir_sigio == NULL)
 2422                 return;
 2423 
 2424         newsigio = malloc(sizeof(*newsigio), M_SIGIO, M_WAITOK);
 2425         LIST_INIT(&rmlist);
 2426 
 2427         mtx_enter(&sigio_lock);
 2428 
 2429         sigio = src->sir_sigio;
 2430         if (sigio == NULL) {
 2431                 mtx_leave(&sigio_lock);
 2432                 free(newsigio, M_SIGIO, sizeof(*newsigio));
 2433                 return;
 2434         }
 2435 
 2436         newsigio->sio_pgid = sigio->sio_pgid;
 2437         newsigio->sio_ucred = crhold(sigio->sio_ucred);
 2438         newsigio->sio_myref = dst;
 2439         if (newsigio->sio_pgid > 0) {
 2440                 newsigio->sio_proc = sigio->sio_proc;
 2441                 LIST_INSERT_HEAD(&newsigio->sio_proc->ps_sigiolst, newsigio,
 2442                     sio_pgsigio);
 2443         } else {
 2444                 newsigio->sio_pgrp = sigio->sio_pgrp;
 2445                 LIST_INSERT_HEAD(&newsigio->sio_pgrp->pg_sigiolst, newsigio,
 2446                     sio_pgsigio);
 2447         }
 2448 
 2449         sigio_unlink(dst, &rmlist);
 2450         dst->sir_sigio = newsigio;
 2451 
 2452         mtx_leave(&sigio_lock);
 2453 
 2454         sigio_del(&rmlist);
 2455 }

Cache object: ce255f791c449445a4efbfee65d81500


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.