The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_sleepq.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*      $NetBSD: kern_sleepq.c,v 1.35 2008/10/15 06:51:20 wrstuden Exp $        */
    2 
    3 /*-
    4  * Copyright (c) 2006, 2007, 2008 The NetBSD Foundation, Inc.
    5  * All rights reserved.
    6  *
    7  * This code is derived from software contributed to The NetBSD Foundation
    8  * by Andrew Doran.
    9  *
   10  * Redistribution and use in source and binary forms, with or without
   11  * modification, are permitted provided that the following conditions
   12  * are met:
   13  * 1. Redistributions of source code must retain the above copyright
   14  *    notice, this list of conditions and the following disclaimer.
   15  * 2. Redistributions in binary form must reproduce the above copyright
   16  *    notice, this list of conditions and the following disclaimer in the
   17  *    documentation and/or other materials provided with the distribution.
   18  *
   19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
   20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
   21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
   22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
   23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
   24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
   25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
   26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
   27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
   28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
   29  * POSSIBILITY OF SUCH DAMAGE.
   30  */
   31 
   32 /*
   33  * Sleep queue implementation, used by turnstiles and general sleep/wakeup
   34  * interfaces.
   35  */
   36 
   37 #include <sys/cdefs.h>
   38 __KERNEL_RCSID(0, "$NetBSD: kern_sleepq.c,v 1.35 2008/10/15 06:51:20 wrstuden Exp $");
   39 
   40 #include <sys/param.h>
   41 #include <sys/kernel.h>
   42 #include <sys/cpu.h>
   43 #include <sys/pool.h>
   44 #include <sys/proc.h> 
   45 #include <sys/resourcevar.h>
   46 #include <sys/sa.h>
   47 #include <sys/savar.h>
   48 #include <sys/sched.h>
   49 #include <sys/systm.h>
   50 #include <sys/sleepq.h>
   51 #include <sys/ktrace.h>
   52 
   53 #include <uvm/uvm_extern.h>
   54 
   55 #include "opt_sa.h"
   56 
   57 int     sleepq_sigtoerror(lwp_t *, int);
   58 
   59 /* General purpose sleep table, used by ltsleep() and condition variables. */
   60 sleeptab_t      sleeptab;
   61 
   62 /*
   63  * sleeptab_init:
   64  *
   65  *      Initialize a sleep table.
   66  */
   67 void
   68 sleeptab_init(sleeptab_t *st)
   69 {
   70         sleepq_t *sq;
   71         int i;
   72 
   73         for (i = 0; i < SLEEPTAB_HASH_SIZE; i++) {
   74                 sq = &st->st_queues[i].st_queue;
   75                 mutex_init(&st->st_queues[i].st_mutex, MUTEX_DEFAULT,
   76                     IPL_SCHED);
   77                 sleepq_init(sq);
   78         }
   79 }
   80 
   81 /*
   82  * sleepq_init:
   83  *
   84  *      Prepare a sleep queue for use.
   85  */
   86 void
   87 sleepq_init(sleepq_t *sq)
   88 {
   89 
   90         TAILQ_INIT(sq);
   91 }
   92 
   93 /*
   94  * sleepq_remove:
   95  *
   96  *      Remove an LWP from a sleep queue and wake it up.  Return non-zero if
   97  *      the LWP is swapped out; if so the caller needs to awaken the swapper
   98  *      to bring the LWP into memory.
   99  */
  100 int
  101 sleepq_remove(sleepq_t *sq, lwp_t *l)
  102 {
  103         struct schedstate_percpu *spc;
  104         struct cpu_info *ci;
  105 
  106         KASSERT(lwp_locked(l, NULL));
  107 
  108         TAILQ_REMOVE(sq, l, l_sleepchain);
  109         l->l_syncobj = &sched_syncobj;
  110         l->l_wchan = NULL;
  111         l->l_sleepq = NULL;
  112         l->l_flag &= ~LW_SINTR;
  113 
  114         ci = l->l_cpu;
  115         spc = &ci->ci_schedstate;
  116 
  117         /*
  118          * If not sleeping, the LWP must have been suspended.  Let whoever
  119          * holds it stopped set it running again.
  120          */
  121         if (l->l_stat != LSSLEEP) {
  122                 KASSERT(l->l_stat == LSSTOP || l->l_stat == LSSUSPENDED);
  123                 lwp_setlock(l, spc->spc_lwplock);
  124                 return 0;
  125         }
  126 
  127         /*
  128          * If the LWP is still on the CPU, mark it as LSONPROC.  It may be
  129          * about to call mi_switch(), in which case it will yield.
  130          */
  131         if ((l->l_pflag & LP_RUNNING) != 0) {
  132                 l->l_stat = LSONPROC;
  133                 l->l_slptime = 0;
  134                 lwp_setlock(l, spc->spc_lwplock);
  135                 return 0;
  136         }
  137 
  138         /* Update sleep time delta, call the wake-up handler of scheduler */
  139         l->l_slpticksum += (hardclock_ticks - l->l_slpticks);
  140         sched_wakeup(l);
  141 
  142         /* Look for a CPU to wake up */
  143         l->l_cpu = sched_takecpu(l);
  144         ci = l->l_cpu;
  145         spc = &ci->ci_schedstate;
  146 
  147         /*
  148          * Set it running.
  149          */
  150         spc_lock(ci);
  151         lwp_setlock(l, spc->spc_mutex);
  152 #ifdef KERN_SA
  153         if (l->l_proc->p_sa != NULL)
  154                 sa_awaken(l);
  155 #endif /* KERN_SA */
  156         sched_setrunnable(l);
  157         l->l_stat = LSRUN;
  158         l->l_slptime = 0;
  159         if ((l->l_flag & LW_INMEM) != 0) {
  160                 sched_enqueue(l, false);
  161                 spc_unlock(ci);
  162                 return 0;
  163         }
  164         spc_unlock(ci);
  165         return 1;
  166 }
  167 
  168 /*
  169  * sleepq_insert:
  170  *
  171  *      Insert an LWP into the sleep queue, optionally sorting by priority.
  172  */
  173 inline void
  174 sleepq_insert(sleepq_t *sq, lwp_t *l, syncobj_t *sobj)
  175 {
  176         lwp_t *l2;
  177         const int pri = lwp_eprio(l);
  178 
  179         if ((sobj->sobj_flag & SOBJ_SLEEPQ_SORTED) != 0) {
  180                 TAILQ_FOREACH(l2, sq, l_sleepchain) {
  181                         if (lwp_eprio(l2) < pri) {
  182                                 TAILQ_INSERT_BEFORE(l2, l, l_sleepchain);
  183                                 return;
  184                         }
  185                 }
  186         }
  187 
  188         if ((sobj->sobj_flag & SOBJ_SLEEPQ_LIFO) != 0)
  189                 TAILQ_INSERT_HEAD(sq, l, l_sleepchain);
  190         else
  191                 TAILQ_INSERT_TAIL(sq, l, l_sleepchain);
  192 }
  193 
  194 /*
  195  * sleepq_enqueue:
  196  *
  197  *      Enter an LWP into the sleep queue and prepare for sleep.  The sleep
  198  *      queue must already be locked, and any interlock (such as the kernel
  199  *      lock) must have be released (see sleeptab_lookup(), sleepq_enter()).
  200  */
  201 void
  202 sleepq_enqueue(sleepq_t *sq, wchan_t wchan, const char *wmesg, syncobj_t *sobj)
  203 {
  204         lwp_t *l = curlwp;
  205 
  206         KASSERT(lwp_locked(l, NULL));
  207         KASSERT(l->l_stat == LSONPROC);
  208         KASSERT(l->l_wchan == NULL && l->l_sleepq == NULL);
  209 
  210         l->l_syncobj = sobj;
  211         l->l_wchan = wchan;
  212         l->l_sleepq = sq;
  213         l->l_wmesg = wmesg;
  214         l->l_slptime = 0;
  215         l->l_stat = LSSLEEP;
  216         l->l_sleeperr = 0;
  217 
  218         sleepq_insert(sq, l, sobj);
  219 
  220         /* Save the time when thread has slept */
  221         l->l_slpticks = hardclock_ticks;
  222         sched_slept(l);
  223 }
  224 
  225 /*
  226  * sleepq_block:
  227  *
  228  *      After any intermediate step such as releasing an interlock, switch.
  229  *      sleepq_block() may return early under exceptional conditions, for
  230  *      example if the LWP's containing process is exiting.
  231  */
  232 int
  233 sleepq_block(int timo, bool catch)
  234 {
  235         int error = 0, sig;
  236         struct proc *p;
  237         lwp_t *l = curlwp;
  238         bool early = false;
  239         int biglocks = l->l_biglocks;
  240 
  241         ktrcsw(1, 0);
  242 
  243         /*
  244          * If sleeping interruptably, check for pending signals, exits or
  245          * core dump events.
  246          */
  247         if (catch) {
  248                 l->l_flag |= LW_SINTR;
  249                 if ((l->l_flag & (LW_CANCELLED|LW_WEXIT|LW_WCORE)) != 0) {
  250                         l->l_flag &= ~LW_CANCELLED;
  251                         error = EINTR;
  252                         early = true;
  253                 } else if ((l->l_flag & LW_PENDSIG) != 0 && sigispending(l, 0))
  254                         early = true;
  255         }
  256 
  257         if (early) {
  258                 /* lwp_unsleep() will release the lock */
  259                 lwp_unsleep(l, true);
  260         } else {
  261                 if (timo)
  262                         callout_schedule(&l->l_timeout_ch, timo);
  263 
  264 #ifdef KERN_SA
  265                 if (((l->l_flag & LW_SA) != 0) && (~l->l_pflag & LP_SA_NOBLOCK))
  266                         sa_switch(l);
  267                 else
  268 #endif
  269                         mi_switch(l);
  270 
  271                 /* The LWP and sleep queue are now unlocked. */
  272                 if (timo) {
  273                         /*
  274                          * Even if the callout appears to have fired, we need to
  275                          * stop it in order to synchronise with other CPUs.
  276                          */
  277                         if (callout_halt(&l->l_timeout_ch, NULL))
  278                                 error = EWOULDBLOCK;
  279                 }
  280         }
  281 
  282         if (catch && error == 0) {
  283                 p = l->l_proc;
  284                 if ((l->l_flag & (LW_CANCELLED | LW_WEXIT | LW_WCORE)) != 0)
  285                         error = EINTR;
  286                 else if ((l->l_flag & LW_PENDSIG) != 0) {
  287                         /*
  288                          * Acquiring p_lock may cause us to recurse
  289                          * through the sleep path and back into this
  290                          * routine, but is safe because LWPs sleeping
  291                          * on locks are non-interruptable.  We will
  292                          * not recurse again.
  293                          */
  294                         mutex_enter(p->p_lock);
  295                         if ((sig = issignal(l)) != 0)
  296                                 error = sleepq_sigtoerror(l, sig);
  297                         mutex_exit(p->p_lock);
  298                 }
  299         }
  300 
  301         ktrcsw(0, 0);
  302         if (__predict_false(biglocks != 0)) {
  303                 KERNEL_LOCK(biglocks, NULL);
  304         }
  305         return error;
  306 }
  307 
  308 /*
  309  * sleepq_wake:
  310  *
  311  *      Wake zero or more LWPs blocked on a single wait channel.
  312  */
  313 lwp_t *
  314 sleepq_wake(sleepq_t *sq, wchan_t wchan, u_int expected, kmutex_t *mp)
  315 {
  316         lwp_t *l, *next;
  317         int swapin = 0;
  318 
  319         KASSERT(mutex_owned(mp));
  320 
  321         for (l = TAILQ_FIRST(sq); l != NULL; l = next) {
  322                 KASSERT(l->l_sleepq == sq);
  323                 KASSERT(l->l_mutex == mp);
  324                 next = TAILQ_NEXT(l, l_sleepchain);
  325                 if (l->l_wchan != wchan)
  326                         continue;
  327                 swapin |= sleepq_remove(sq, l);
  328                 if (--expected == 0)
  329                         break;
  330         }
  331 
  332         mutex_spin_exit(mp);
  333 
  334         /*
  335          * If there are newly awakend threads that need to be swapped in,
  336          * then kick the swapper into action.
  337          */
  338         if (swapin)
  339                 uvm_kick_scheduler();
  340 
  341         return l;
  342 }
  343 
  344 /*
  345  * sleepq_unsleep:
  346  *
  347  *      Remove an LWP from its sleep queue and set it runnable again. 
  348  *      sleepq_unsleep() is called with the LWP's mutex held, and will
  349  *      always release it.
  350  */
  351 u_int
  352 sleepq_unsleep(lwp_t *l, bool cleanup)
  353 {
  354         sleepq_t *sq = l->l_sleepq;
  355         kmutex_t *mp = l->l_mutex;
  356         int swapin;
  357 
  358         KASSERT(lwp_locked(l, mp));
  359         KASSERT(l->l_wchan != NULL);
  360 
  361         swapin = sleepq_remove(sq, l);
  362 
  363         if (cleanup) {
  364                 mutex_spin_exit(mp);
  365                 if (swapin)
  366                         uvm_kick_scheduler();
  367         }
  368 
  369         return swapin;
  370 }
  371 
  372 /*
  373  * sleepq_timeout:
  374  *
  375  *      Entered via the callout(9) subsystem to time out an LWP that is on a
  376  *      sleep queue.
  377  */
  378 void
  379 sleepq_timeout(void *arg)
  380 {
  381         lwp_t *l = arg;
  382 
  383         /*
  384          * Lock the LWP.  Assuming it's still on the sleep queue, its
  385          * current mutex will also be the sleep queue mutex.
  386          */
  387         lwp_lock(l);
  388 
  389         if (l->l_wchan == NULL) {
  390                 /* Somebody beat us to it. */
  391                 lwp_unlock(l);
  392                 return;
  393         }
  394 
  395         lwp_unsleep(l, true);
  396 }
  397 
  398 /*
  399  * sleepq_sigtoerror:
  400  *
  401  *      Given a signal number, interpret and return an error code.
  402  */
  403 int
  404 sleepq_sigtoerror(lwp_t *l, int sig)
  405 {
  406         struct proc *p = l->l_proc;
  407         int error;
  408 
  409         KASSERT(mutex_owned(p->p_lock));
  410 
  411         /*
  412          * If this sleep was canceled, don't let the syscall restart.
  413          */
  414         if ((SIGACTION(p, sig).sa_flags & SA_RESTART) == 0)
  415                 error = EINTR;
  416         else
  417                 error = ERESTART;
  418 
  419         return error;
  420 }
  421 
  422 /*
  423  * sleepq_abort:
  424  *
  425  *      After a panic or during autoconfiguration, lower the interrupt
  426  *      priority level to give pending interrupts a chance to run, and
  427  *      then return.  Called if sleepq_dontsleep() returns non-zero, and
  428  *      always returns zero.
  429  */
  430 int
  431 sleepq_abort(kmutex_t *mtx, int unlock)
  432 { 
  433         extern int safepri;
  434         int s;
  435 
  436         s = splhigh();
  437         splx(safepri);
  438         splx(s);
  439         if (mtx != NULL && unlock != 0)
  440                 mutex_exit(mtx);
  441 
  442         return 0;
  443 }
  444 
  445 /*
  446  * sleepq_changepri:
  447  *
  448  *      Adjust the priority of an LWP residing on a sleepq.  This method
  449  *      will only alter the user priority; the effective priority is
  450  *      assumed to have been fixed at the time of insertion into the queue.
  451  */
  452 void
  453 sleepq_changepri(lwp_t *l, pri_t pri)
  454 {
  455         sleepq_t *sq = l->l_sleepq;
  456         pri_t opri;
  457 
  458         KASSERT(lwp_locked(l, NULL));
  459 
  460         opri = lwp_eprio(l);
  461         l->l_priority = pri;
  462 
  463         if (lwp_eprio(l) == opri) {
  464                 return;
  465         }
  466         if ((l->l_syncobj->sobj_flag & SOBJ_SLEEPQ_SORTED) == 0) {
  467                 return;
  468         }
  469 
  470         /*
  471          * Don't let the sleep queue become empty, even briefly.
  472          * cv_signal() and cv_broadcast() inspect it without the
  473          * sleep queue lock held and need to see a non-empty queue
  474          * head if there are waiters.
  475          */
  476         if (TAILQ_FIRST(sq) == l && TAILQ_NEXT(l, l_sleepchain) == NULL) {
  477                 return;
  478         }
  479         TAILQ_REMOVE(sq, l, l_sleepchain);
  480         sleepq_insert(sq, l, l->l_syncobj);
  481 }
  482 
  483 void
  484 sleepq_lendpri(lwp_t *l, pri_t pri)
  485 {
  486         sleepq_t *sq = l->l_sleepq;
  487         pri_t opri;
  488 
  489         KASSERT(lwp_locked(l, NULL));
  490 
  491         opri = lwp_eprio(l);
  492         l->l_inheritedprio = pri;
  493 
  494         if (lwp_eprio(l) == opri) {
  495                 return;
  496         }
  497         if ((l->l_syncobj->sobj_flag & SOBJ_SLEEPQ_SORTED) == 0) {
  498                 return;
  499         }
  500 
  501         /*
  502          * Don't let the sleep queue become empty, even briefly.
  503          * cv_signal() and cv_broadcast() inspect it without the
  504          * sleep queue lock held and need to see a non-empty queue
  505          * head if there are waiters.
  506          */
  507         if (TAILQ_FIRST(sq) == l && TAILQ_NEXT(l, l_sleepchain) == NULL) {
  508                 return;
  509         }
  510         TAILQ_REMOVE(sq, l, l_sleepchain);
  511         sleepq_insert(sq, l, l->l_syncobj);
  512 }

Cache object: fa46fce1410bf59c54303601ff96b9fb


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.