The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/kern/lwkt_thread.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*
    2  * Copyright (c) 2003-2011 The DragonFly Project.  All rights reserved.
    3  *
    4  * This code is derived from software contributed to The DragonFly Project
    5  * by Matthew Dillon <dillon@backplane.com>
    6  *
    7  * Redistribution and use in source and binary forms, with or without
    8  * modification, are permitted provided that the following conditions
    9  * are met:
   10  *
   11  * 1. Redistributions of source code must retain the above copyright
   12  *    notice, this list of conditions and the following disclaimer.
   13  * 2. Redistributions in binary form must reproduce the above copyright
   14  *    notice, this list of conditions and the following disclaimer in
   15  *    the documentation and/or other materials provided with the
   16  *    distribution.
   17  * 3. Neither the name of The DragonFly Project nor the names of its
   18  *    contributors may be used to endorse or promote products derived
   19  *    from this software without specific, prior written permission.
   20  *
   21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
   22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
   23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
   24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
   25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
   26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
   27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
   28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
   29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
   30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
   31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   32  * SUCH DAMAGE.
   33  */
   34 
   35 /*
   36  * Each cpu in a system has its own self-contained light weight kernel
   37  * thread scheduler, which means that generally speaking we only need
   38  * to use a critical section to avoid problems.  Foreign thread 
   39  * scheduling is queued via (async) IPIs.
   40  */
   41 
   42 #include <sys/param.h>
   43 #include <sys/systm.h>
   44 #include <sys/kernel.h>
   45 #include <sys/proc.h>
   46 #include <sys/rtprio.h>
   47 #include <sys/kinfo.h>
   48 #include <sys/queue.h>
   49 #include <sys/sysctl.h>
   50 #include <sys/kthread.h>
   51 #include <machine/cpu.h>
   52 #include <sys/lock.h>
   53 #include <sys/spinlock.h>
   54 #include <sys/ktr.h>
   55 
   56 #include <sys/thread2.h>
   57 #include <sys/spinlock2.h>
   58 #include <sys/mplock2.h>
   59 
   60 #include <sys/dsched.h>
   61 
   62 #include <vm/vm.h>
   63 #include <vm/vm_param.h>
   64 #include <vm/vm_kern.h>
   65 #include <vm/vm_object.h>
   66 #include <vm/vm_page.h>
   67 #include <vm/vm_map.h>
   68 #include <vm/vm_pager.h>
   69 #include <vm/vm_extern.h>
   70 
   71 #include <machine/stdarg.h>
   72 #include <machine/smp.h>
   73 
   74 #ifdef _KERNEL_VIRTUAL
   75 #include <pthread.h>
   76 #endif
   77 
   78 #if !defined(KTR_CTXSW)
   79 #define KTR_CTXSW KTR_ALL
   80 #endif
   81 KTR_INFO_MASTER(ctxsw);
   82 KTR_INFO(KTR_CTXSW, ctxsw, sw, 0, "#cpu[%d].td = %p", int cpu, struct thread *td);
   83 KTR_INFO(KTR_CTXSW, ctxsw, pre, 1, "#cpu[%d].td = %p", int cpu, struct thread *td);
   84 KTR_INFO(KTR_CTXSW, ctxsw, newtd, 2, "#threads[%p].name = %s", struct thread *td, char *comm);
   85 KTR_INFO(KTR_CTXSW, ctxsw, deadtd, 3, "#threads[%p].name = <dead>", struct thread *td);
   86 
   87 static MALLOC_DEFINE(M_THREAD, "thread", "lwkt threads");
   88 
   89 #ifdef  INVARIANTS
   90 static int panic_on_cscount = 0;
   91 #endif
   92 static __int64_t switch_count = 0;
   93 static __int64_t preempt_hit = 0;
   94 static __int64_t preempt_miss = 0;
   95 static __int64_t preempt_weird = 0;
   96 static int lwkt_use_spin_port;
   97 static struct objcache *thread_cache;
   98 int cpu_mwait_spin = 0;
   99 
  100 static void lwkt_schedule_remote(void *arg, int arg2, struct intrframe *frame);
  101 static void lwkt_setcpu_remote(void *arg);
  102 
  103 extern void cpu_heavy_restore(void);
  104 extern void cpu_lwkt_restore(void);
  105 extern void cpu_kthread_restore(void);
  106 extern void cpu_idle_restore(void);
  107 
  108 /*
  109  * We can make all thread ports use the spin backend instead of the thread
  110  * backend.  This should only be set to debug the spin backend.
  111  */
  112 TUNABLE_INT("lwkt.use_spin_port", &lwkt_use_spin_port);
  113 
  114 #ifdef  INVARIANTS
  115 SYSCTL_INT(_lwkt, OID_AUTO, panic_on_cscount, CTLFLAG_RW, &panic_on_cscount, 0,
  116     "Panic if attempting to switch lwkt's while mastering cpusync");
  117 #endif
  118 SYSCTL_INT(_hw, OID_AUTO, cpu_mwait_spin, CTLFLAG_RW, &cpu_mwait_spin, 0,
  119     "monitor/mwait target state");
  120 SYSCTL_QUAD(_lwkt, OID_AUTO, switch_count, CTLFLAG_RW, &switch_count, 0,
  121     "Number of switched threads");
  122 SYSCTL_QUAD(_lwkt, OID_AUTO, preempt_hit, CTLFLAG_RW, &preempt_hit, 0, 
  123     "Successful preemption events");
  124 SYSCTL_QUAD(_lwkt, OID_AUTO, preempt_miss, CTLFLAG_RW, &preempt_miss, 0, 
  125     "Failed preemption events");
  126 SYSCTL_QUAD(_lwkt, OID_AUTO, preempt_weird, CTLFLAG_RW, &preempt_weird, 0,
  127     "Number of preempted threads.");
  128 static int fairq_enable = 0;
  129 SYSCTL_INT(_lwkt, OID_AUTO, fairq_enable, CTLFLAG_RW,
  130         &fairq_enable, 0, "Turn on fairq priority accumulators");
  131 static int fairq_bypass = -1;
  132 SYSCTL_INT(_lwkt, OID_AUTO, fairq_bypass, CTLFLAG_RW,
  133         &fairq_bypass, 0, "Allow fairq to bypass td on token failure");
  134 extern int lwkt_sched_debug;
  135 int lwkt_sched_debug = 0;
  136 SYSCTL_INT(_lwkt, OID_AUTO, sched_debug, CTLFLAG_RW,
  137         &lwkt_sched_debug, 0, "Scheduler debug");
  138 static int lwkt_spin_loops = 10;
  139 SYSCTL_INT(_lwkt, OID_AUTO, spin_loops, CTLFLAG_RW,
  140         &lwkt_spin_loops, 0, "Scheduler spin loops until sorted decon");
  141 static int lwkt_spin_reseq = 0;
  142 SYSCTL_INT(_lwkt, OID_AUTO, spin_reseq, CTLFLAG_RW,
  143         &lwkt_spin_reseq, 0, "Scheduler resequencer enable");
  144 static int lwkt_spin_monitor = 0;
  145 SYSCTL_INT(_lwkt, OID_AUTO, spin_monitor, CTLFLAG_RW,
  146         &lwkt_spin_monitor, 0, "Scheduler uses monitor/mwait");
  147 static int lwkt_spin_fatal = 0; /* disabled */
  148 SYSCTL_INT(_lwkt, OID_AUTO, spin_fatal, CTLFLAG_RW,
  149         &lwkt_spin_fatal, 0, "LWKT scheduler spin loops till fatal panic");
  150 static int preempt_enable = 1;
  151 SYSCTL_INT(_lwkt, OID_AUTO, preempt_enable, CTLFLAG_RW,
  152         &preempt_enable, 0, "Enable preemption");
  153 static int lwkt_cache_threads = 0;
  154 SYSCTL_INT(_lwkt, OID_AUTO, cache_threads, CTLFLAG_RD,
  155         &lwkt_cache_threads, 0, "thread+kstack cache");
  156 
  157 #ifndef _KERNEL_VIRTUAL
  158 static __cachealign int lwkt_cseq_rindex;
  159 static __cachealign int lwkt_cseq_windex;
  160 #endif
  161 
  162 /*
  163  * These helper procedures handle the runq, they can only be called from
  164  * within a critical section.
  165  *
  166  * WARNING!  Prior to SMP being brought up it is possible to enqueue and
  167  * dequeue threads belonging to other cpus, so be sure to use td->td_gd
  168  * instead of 'mycpu' when referencing the globaldata structure.   Once
  169  * SMP live enqueuing and dequeueing only occurs on the current cpu.
  170  */
  171 static __inline
  172 void
  173 _lwkt_dequeue(thread_t td)
  174 {
  175     if (td->td_flags & TDF_RUNQ) {
  176         struct globaldata *gd = td->td_gd;
  177 
  178         td->td_flags &= ~TDF_RUNQ;
  179         TAILQ_REMOVE(&gd->gd_tdrunq, td, td_threadq);
  180         --gd->gd_tdrunqcount;
  181         if (TAILQ_FIRST(&gd->gd_tdrunq) == NULL)
  182                 atomic_clear_int(&gd->gd_reqflags, RQF_RUNNING);
  183     }
  184 }
  185 
  186 /*
  187  * Priority enqueue.
  188  *
  189  * There are a limited number of lwkt threads runnable since user
  190  * processes only schedule one at a time per cpu.  However, there can
  191  * be many user processes in kernel mode exiting from a tsleep() which
  192  * become runnable.
  193  *
  194  * NOTE: lwkt_schedulerclock() will force a round-robin based on td_pri and
  195  *       will ignore user priority.  This is to ensure that user threads in
  196  *       kernel mode get cpu at some point regardless of what the user
  197  *       scheduler thinks.
  198  */
  199 static __inline
  200 void
  201 _lwkt_enqueue(thread_t td)
  202 {
  203     thread_t xtd;
  204 
  205     if ((td->td_flags & (TDF_RUNQ|TDF_MIGRATING|TDF_BLOCKQ)) == 0) {
  206         struct globaldata *gd = td->td_gd;
  207 
  208         td->td_flags |= TDF_RUNQ;
  209         xtd = TAILQ_FIRST(&gd->gd_tdrunq);
  210         if (xtd == NULL) {
  211             TAILQ_INSERT_TAIL(&gd->gd_tdrunq, td, td_threadq);
  212             atomic_set_int(&gd->gd_reqflags, RQF_RUNNING);
  213         } else {
  214             /*
  215              * NOTE: td_upri - higher numbers more desireable, same sense
  216              *       as td_pri (typically reversed from lwp_upri).
  217              *
  218              *       In the equal priority case we want the best selection
  219              *       at the beginning so the less desireable selections know
  220              *       that they have to setrunqueue/go-to-another-cpu, even
  221              *       though it means switching back to the 'best' selection.
  222              *       This also avoids degenerate situations when many threads
  223              *       are runnable or waking up at the same time.
  224              *
  225              *       If upri matches exactly place at end/round-robin.
  226              */
  227             while (xtd &&
  228                    (xtd->td_pri >= td->td_pri ||
  229                     (xtd->td_pri == td->td_pri &&
  230                      xtd->td_upri >= td->td_upri))) {
  231                 xtd = TAILQ_NEXT(xtd, td_threadq);
  232             }
  233             if (xtd)
  234                 TAILQ_INSERT_BEFORE(xtd, td, td_threadq);
  235             else
  236                 TAILQ_INSERT_TAIL(&gd->gd_tdrunq, td, td_threadq);
  237         }
  238         ++gd->gd_tdrunqcount;
  239 
  240         /*
  241          * Request a LWKT reschedule if we are now at the head of the queue.
  242          */
  243         if (TAILQ_FIRST(&gd->gd_tdrunq) == td)
  244             need_lwkt_resched();
  245     }
  246 }
  247 
  248 static __boolean_t
  249 _lwkt_thread_ctor(void *obj, void *privdata, int ocflags)
  250 {
  251         struct thread *td = (struct thread *)obj;
  252 
  253         td->td_kstack = NULL;
  254         td->td_kstack_size = 0;
  255         td->td_flags = TDF_ALLOCATED_THREAD;
  256         td->td_mpflags = 0;
  257         return (1);
  258 }
  259 
  260 static void
  261 _lwkt_thread_dtor(void *obj, void *privdata)
  262 {
  263         struct thread *td = (struct thread *)obj;
  264 
  265         KASSERT(td->td_flags & TDF_ALLOCATED_THREAD,
  266             ("_lwkt_thread_dtor: not allocated from objcache"));
  267         KASSERT((td->td_flags & TDF_ALLOCATED_STACK) && td->td_kstack &&
  268                 td->td_kstack_size > 0,
  269             ("_lwkt_thread_dtor: corrupted stack"));
  270         kmem_free(&kernel_map, (vm_offset_t)td->td_kstack, td->td_kstack_size);
  271         td->td_kstack = NULL;
  272         td->td_flags = 0;
  273 }
  274 
  275 /*
  276  * Initialize the lwkt s/system.
  277  *
  278  * Nominally cache up to 32 thread + kstack structures.  Cache more on
  279  * systems with a lot of cpu cores.
  280  */
  281 void
  282 lwkt_init(void)
  283 {
  284     TUNABLE_INT("lwkt.cache_threads", &lwkt_cache_threads);
  285     if (lwkt_cache_threads == 0) {
  286         lwkt_cache_threads = ncpus * 4;
  287         if (lwkt_cache_threads < 32)
  288             lwkt_cache_threads = 32;
  289     }
  290     thread_cache = objcache_create_mbacked(
  291                                 M_THREAD, sizeof(struct thread),
  292                                 0, lwkt_cache_threads,
  293                                 _lwkt_thread_ctor, _lwkt_thread_dtor, NULL);
  294 }
  295 
  296 /*
  297  * Schedule a thread to run.  As the current thread we can always safely
  298  * schedule ourselves, and a shortcut procedure is provided for that
  299  * function.
  300  *
  301  * (non-blocking, self contained on a per cpu basis)
  302  */
  303 void
  304 lwkt_schedule_self(thread_t td)
  305 {
  306     KKASSERT((td->td_flags & TDF_MIGRATING) == 0);
  307     crit_enter_quick(td);
  308     KASSERT(td != &td->td_gd->gd_idlethread,
  309             ("lwkt_schedule_self(): scheduling gd_idlethread is illegal!"));
  310     KKASSERT(td->td_lwp == NULL ||
  311              (td->td_lwp->lwp_mpflags & LWP_MP_ONRUNQ) == 0);
  312     _lwkt_enqueue(td);
  313     crit_exit_quick(td);
  314 }
  315 
  316 /*
  317  * Deschedule a thread.
  318  *
  319  * (non-blocking, self contained on a per cpu basis)
  320  */
  321 void
  322 lwkt_deschedule_self(thread_t td)
  323 {
  324     crit_enter_quick(td);
  325     _lwkt_dequeue(td);
  326     crit_exit_quick(td);
  327 }
  328 
  329 /*
  330  * LWKTs operate on a per-cpu basis
  331  *
  332  * WARNING!  Called from early boot, 'mycpu' may not work yet.
  333  */
  334 void
  335 lwkt_gdinit(struct globaldata *gd)
  336 {
  337     TAILQ_INIT(&gd->gd_tdrunq);
  338     TAILQ_INIT(&gd->gd_tdallq);
  339 }
  340 
  341 /*
  342  * Create a new thread.  The thread must be associated with a process context
  343  * or LWKT start address before it can be scheduled.  If the target cpu is
  344  * -1 the thread will be created on the current cpu.
  345  *
  346  * If you intend to create a thread without a process context this function
  347  * does everything except load the startup and switcher function.
  348  */
  349 thread_t
  350 lwkt_alloc_thread(struct thread *td, int stksize, int cpu, int flags)
  351 {
  352     static int cpu_rotator;
  353     globaldata_t gd = mycpu;
  354     void *stack;
  355 
  356     /*
  357      * If static thread storage is not supplied allocate a thread.  Reuse
  358      * a cached free thread if possible.  gd_freetd is used to keep an exiting
  359      * thread intact through the exit.
  360      */
  361     if (td == NULL) {
  362         crit_enter_gd(gd);
  363         if ((td = gd->gd_freetd) != NULL) {
  364             KKASSERT((td->td_flags & (TDF_RUNNING|TDF_PREEMPT_LOCK|
  365                                       TDF_RUNQ)) == 0);
  366             gd->gd_freetd = NULL;
  367         } else {
  368             td = objcache_get(thread_cache, M_WAITOK);
  369             KKASSERT((td->td_flags & (TDF_RUNNING|TDF_PREEMPT_LOCK|
  370                                       TDF_RUNQ)) == 0);
  371         }
  372         crit_exit_gd(gd);
  373         KASSERT((td->td_flags &
  374                  (TDF_ALLOCATED_THREAD|TDF_RUNNING|TDF_PREEMPT_LOCK)) ==
  375                  TDF_ALLOCATED_THREAD,
  376                 ("lwkt_alloc_thread: corrupted td flags 0x%X", td->td_flags));
  377         flags |= td->td_flags & (TDF_ALLOCATED_THREAD|TDF_ALLOCATED_STACK);
  378     }
  379 
  380     /*
  381      * Try to reuse cached stack.
  382      */
  383     if ((stack = td->td_kstack) != NULL && td->td_kstack_size != stksize) {
  384         if (flags & TDF_ALLOCATED_STACK) {
  385             kmem_free(&kernel_map, (vm_offset_t)stack, td->td_kstack_size);
  386             stack = NULL;
  387         }
  388     }
  389     if (stack == NULL) {
  390         stack = (void *)kmem_alloc_stack(&kernel_map, stksize);
  391         flags |= TDF_ALLOCATED_STACK;
  392     }
  393     if (cpu < 0) {
  394         cpu = ++cpu_rotator;
  395         cpu_ccfence();
  396         cpu %= ncpus;
  397     }
  398     lwkt_init_thread(td, stack, stksize, flags, globaldata_find(cpu));
  399     return(td);
  400 }
  401 
  402 /*
  403  * Initialize a preexisting thread structure.  This function is used by
  404  * lwkt_alloc_thread() and also used to initialize the per-cpu idlethread.
  405  *
  406  * All threads start out in a critical section at a priority of
  407  * TDPRI_KERN_DAEMON.  Higher level code will modify the priority as
  408  * appropriate.  This function may send an IPI message when the 
  409  * requested cpu is not the current cpu and consequently gd_tdallq may
  410  * not be initialized synchronously from the point of view of the originating
  411  * cpu.
  412  *
  413  * NOTE! we have to be careful in regards to creating threads for other cpus
  414  * if SMP has not yet been activated.
  415  */
  416 static void
  417 lwkt_init_thread_remote(void *arg)
  418 {
  419     thread_t td = arg;
  420 
  421     /*
  422      * Protected by critical section held by IPI dispatch
  423      */
  424     TAILQ_INSERT_TAIL(&td->td_gd->gd_tdallq, td, td_allq);
  425 }
  426 
  427 /*
  428  * lwkt core thread structural initialization.
  429  *
  430  * NOTE: All threads are initialized as mpsafe threads.
  431  */
  432 void
  433 lwkt_init_thread(thread_t td, void *stack, int stksize, int flags,
  434                 struct globaldata *gd)
  435 {
  436     globaldata_t mygd = mycpu;
  437 
  438     bzero(td, sizeof(struct thread));
  439     td->td_kstack = stack;
  440     td->td_kstack_size = stksize;
  441     td->td_flags = flags;
  442     td->td_mpflags = 0;
  443     td->td_type = TD_TYPE_GENERIC;
  444     td->td_gd = gd;
  445     td->td_pri = TDPRI_KERN_DAEMON;
  446     td->td_critcount = 1;
  447     td->td_toks_have = NULL;
  448     td->td_toks_stop = &td->td_toks_base;
  449     if (lwkt_use_spin_port || (flags & TDF_FORCE_SPINPORT)) {
  450         lwkt_initport_spin(&td->td_msgport, td,
  451             (flags & TDF_FIXEDCPU) ? TRUE : FALSE);
  452     } else {
  453         lwkt_initport_thread(&td->td_msgport, td);
  454     }
  455     pmap_init_thread(td);
  456     /*
  457      * Normally initializing a thread for a remote cpu requires sending an
  458      * IPI.  However, the idlethread is setup before the other cpus are
  459      * activated so we have to treat it as a special case.  XXX manipulation
  460      * of gd_tdallq requires the BGL.
  461      */
  462     if (gd == mygd || td == &gd->gd_idlethread) {
  463         crit_enter_gd(mygd);
  464         TAILQ_INSERT_TAIL(&gd->gd_tdallq, td, td_allq);
  465         crit_exit_gd(mygd);
  466     } else {
  467         lwkt_send_ipiq(gd, lwkt_init_thread_remote, td);
  468     }
  469     dsched_new_thread(td);
  470 }
  471 
  472 void
  473 lwkt_set_comm(thread_t td, const char *ctl, ...)
  474 {
  475     __va_list va;
  476 
  477     __va_start(va, ctl);
  478     kvsnprintf(td->td_comm, sizeof(td->td_comm), ctl, va);
  479     __va_end(va);
  480     KTR_LOG(ctxsw_newtd, td, td->td_comm);
  481 }
  482 
  483 /*
  484  * Prevent the thread from getting destroyed.  Note that unlike PHOLD/PRELE
  485  * this does not prevent the thread from migrating to another cpu so the
  486  * gd_tdallq state is not protected by this.
  487  */
  488 void
  489 lwkt_hold(thread_t td)
  490 {
  491     atomic_add_int(&td->td_refs, 1);
  492 }
  493 
  494 void
  495 lwkt_rele(thread_t td)
  496 {
  497     KKASSERT(td->td_refs > 0);
  498     atomic_add_int(&td->td_refs, -1);
  499 }
  500 
  501 void
  502 lwkt_free_thread(thread_t td)
  503 {
  504     KKASSERT(td->td_refs == 0);
  505     KKASSERT((td->td_flags & (TDF_RUNNING | TDF_PREEMPT_LOCK |
  506                               TDF_RUNQ | TDF_TSLEEPQ)) == 0);
  507     if (td->td_flags & TDF_ALLOCATED_THREAD) {
  508         objcache_put(thread_cache, td);
  509     } else if (td->td_flags & TDF_ALLOCATED_STACK) {
  510         /* client-allocated struct with internally allocated stack */
  511         KASSERT(td->td_kstack && td->td_kstack_size > 0,
  512             ("lwkt_free_thread: corrupted stack"));
  513         kmem_free(&kernel_map, (vm_offset_t)td->td_kstack, td->td_kstack_size);
  514         td->td_kstack = NULL;
  515         td->td_kstack_size = 0;
  516     }
  517 
  518     KTR_LOG(ctxsw_deadtd, td);
  519 }
  520 
  521 
  522 /*
  523  * Switch to the next runnable lwkt.  If no LWKTs are runnable then 
  524  * switch to the idlethread.  Switching must occur within a critical
  525  * section to avoid races with the scheduling queue.
  526  *
  527  * We always have full control over our cpu's run queue.  Other cpus
  528  * that wish to manipulate our queue must use the cpu_*msg() calls to
  529  * talk to our cpu, so a critical section is all that is needed and
  530  * the result is very, very fast thread switching.
  531  *
  532  * The LWKT scheduler uses a fixed priority model and round-robins at
  533  * each priority level.  User process scheduling is a totally
  534  * different beast and LWKT priorities should not be confused with
  535  * user process priorities.
  536  *
  537  * PREEMPTION NOTE: Preemption occurs via lwkt_preempt().  lwkt_switch()
  538  * is not called by the current thread in the preemption case, only when
  539  * the preempting thread blocks (in order to return to the original thread).
  540  *
  541  * SPECIAL NOTE ON SWITCH ATOMICY: Certain operations such as thread
  542  * migration and tsleep deschedule the current lwkt thread and call
  543  * lwkt_switch().  In particular, the target cpu of the migration fully
  544  * expects the thread to become non-runnable and can deadlock against
  545  * cpusync operations if we run any IPIs prior to switching the thread out.
  546  *
  547  * WE MUST BE VERY CAREFUL NOT TO RUN SPLZ DIRECTLY OR INDIRECTLY IF
  548  * THE CURRENT THREAD HAS BEEN DESCHEDULED!
  549  */
  550 void
  551 lwkt_switch(void)
  552 {
  553     globaldata_t gd = mycpu;
  554     thread_t td = gd->gd_curthread;
  555     thread_t ntd;
  556     int spinning = 0;
  557 
  558     KKASSERT(gd->gd_processing_ipiq == 0);
  559     KKASSERT(td->td_flags & TDF_RUNNING);
  560 
  561     /*
  562      * Switching from within a 'fast' (non thread switched) interrupt or IPI
  563      * is illegal.  However, we may have to do it anyway if we hit a fatal
  564      * kernel trap or we have paniced.
  565      *
  566      * If this case occurs save and restore the interrupt nesting level.
  567      */
  568     if (gd->gd_intr_nesting_level) {
  569         int savegdnest;
  570         int savegdtrap;
  571 
  572         if (gd->gd_trap_nesting_level == 0 && panic_cpu_gd != mycpu) {
  573             panic("lwkt_switch: Attempt to switch from a "
  574                   "fast interrupt, ipi, or hard code section, "
  575                   "td %p\n",
  576                   td);
  577         } else {
  578             savegdnest = gd->gd_intr_nesting_level;
  579             savegdtrap = gd->gd_trap_nesting_level;
  580             gd->gd_intr_nesting_level = 0;
  581             gd->gd_trap_nesting_level = 0;
  582             if ((td->td_flags & TDF_PANICWARN) == 0) {
  583                 td->td_flags |= TDF_PANICWARN;
  584                 kprintf("Warning: thread switch from interrupt, IPI, "
  585                         "or hard code section.\n"
  586                         "thread %p (%s)\n", td, td->td_comm);
  587                 print_backtrace(-1);
  588             }
  589             lwkt_switch();
  590             gd->gd_intr_nesting_level = savegdnest;
  591             gd->gd_trap_nesting_level = savegdtrap;
  592             return;
  593         }
  594     }
  595 
  596     /*
  597      * Release our current user process designation if we are blocking
  598      * or if a user reschedule was requested.
  599      *
  600      * NOTE: This function is NOT called if we are switching into or
  601      *       returning from a preemption.
  602      *
  603      * NOTE: Releasing our current user process designation may cause
  604      *       it to be assigned to another thread, which in turn will
  605      *       cause us to block in the usched acquire code when we attempt
  606      *       to return to userland.
  607      *
  608      * NOTE: On SMP systems this can be very nasty when heavy token
  609      *       contention is present so we want to be careful not to
  610      *       release the designation gratuitously.
  611      */
  612     if (td->td_release &&
  613         (user_resched_wanted() || (td->td_flags & TDF_RUNQ) == 0)) {
  614             td->td_release(td);
  615     }
  616 
  617     /*
  618      * Release all tokens
  619      */
  620     crit_enter_gd(gd);
  621     if (TD_TOKS_HELD(td))
  622             lwkt_relalltokens(td);
  623 
  624     /*
  625      * We had better not be holding any spin locks, but don't get into an
  626      * endless panic loop.
  627      */
  628     KASSERT(gd->gd_spinlocks == 0 || panicstr != NULL,
  629             ("lwkt_switch: still holding %d exclusive spinlocks!",
  630              gd->gd_spinlocks));
  631 
  632 
  633 #ifdef  INVARIANTS
  634     if (td->td_cscount) {
  635         kprintf("Diagnostic: attempt to switch while mastering cpusync: %p\n",
  636                 td);
  637         if (panic_on_cscount)
  638             panic("switching while mastering cpusync");
  639     }
  640 #endif
  641 
  642     /*
  643      * If we had preempted another thread on this cpu, resume the preempted
  644      * thread.  This occurs transparently, whether the preempted thread
  645      * was scheduled or not (it may have been preempted after descheduling
  646      * itself).
  647      *
  648      * We have to setup the MP lock for the original thread after backing
  649      * out the adjustment that was made to curthread when the original
  650      * was preempted.
  651      */
  652     if ((ntd = td->td_preempted) != NULL) {
  653         KKASSERT(ntd->td_flags & TDF_PREEMPT_LOCK);
  654         ntd->td_flags |= TDF_PREEMPT_DONE;
  655 
  656         /*
  657          * The interrupt may have woken a thread up, we need to properly
  658          * set the reschedule flag if the originally interrupted thread is
  659          * at a lower priority.
  660          *
  661          * The interrupt may not have descheduled.
  662          */
  663         if (TAILQ_FIRST(&gd->gd_tdrunq) != ntd)
  664             need_lwkt_resched();
  665         goto havethread_preempted;
  666     }
  667 
  668     /*
  669      * If we cannot obtain ownership of the tokens we cannot immediately
  670      * schedule the target thread.
  671      *
  672      * Reminder: Again, we cannot afford to run any IPIs in this path if
  673      * the current thread has been descheduled.
  674      */
  675     for (;;) {
  676         clear_lwkt_resched();
  677 
  678         /*
  679          * Hotpath - pull the head of the run queue and attempt to schedule
  680          * it.
  681          */
  682         ntd = TAILQ_FIRST(&gd->gd_tdrunq);
  683 
  684         if (ntd == NULL) {
  685             /*
  686              * Runq is empty, switch to idle to allow it to halt.
  687              */
  688             ntd = &gd->gd_idlethread;
  689             if (gd->gd_trap_nesting_level == 0 && panicstr == NULL)
  690                 ASSERT_NO_TOKENS_HELD(ntd);
  691             cpu_time.cp_msg[0] = 0;
  692             cpu_time.cp_stallpc = 0;
  693             goto haveidle;
  694         }
  695 
  696         /*
  697          * Hotpath - schedule ntd.
  698          *
  699          * NOTE: For UP there is no mplock and lwkt_getalltokens()
  700          *           always succeeds.
  701          */
  702         if (TD_TOKS_NOT_HELD(ntd) ||
  703             lwkt_getalltokens(ntd, (spinning >= lwkt_spin_loops)))
  704         {
  705             goto havethread;
  706         }
  707 
  708         /*
  709          * Coldpath (SMP only since tokens always succeed on UP)
  710          *
  711          * We had some contention on the thread we wanted to schedule.
  712          * What we do now is try to find a thread that we can schedule
  713          * in its stead.
  714          *
  715          * The coldpath scan does NOT rearrange threads in the run list.
  716          * The lwkt_schedulerclock() will assert need_lwkt_resched() on
  717          * the next tick whenever the current head is not the current thread.
  718          */
  719 #ifdef  INVARIANTS
  720         ++ntd->td_contended;
  721 #endif
  722         ++gd->gd_cnt.v_lock_colls;
  723 
  724         if (fairq_bypass > 0)
  725                 goto skip;
  726 
  727         while ((ntd = TAILQ_NEXT(ntd, td_threadq)) != NULL) {
  728 #ifndef NO_LWKT_SPLIT_USERPRI
  729                 /*
  730                  * Never schedule threads returning to userland or the
  731                  * user thread scheduler helper thread when higher priority
  732                  * threads are present.  The runq is sorted by priority
  733                  * so we can give up traversing it when we find the first
  734                  * low priority thread.
  735                  */
  736                 if (ntd->td_pri < TDPRI_KERN_LPSCHED) {
  737                         ntd = NULL;
  738                         break;
  739                 }
  740 #endif
  741 
  742                 /*
  743                  * Try this one.
  744                  */
  745                 if (TD_TOKS_NOT_HELD(ntd) ||
  746                     lwkt_getalltokens(ntd, (spinning >= lwkt_spin_loops))) {
  747                         goto havethread;
  748                 }
  749 #ifdef  INVARIANTS
  750                 ++ntd->td_contended;
  751 #endif
  752                 ++gd->gd_cnt.v_lock_colls;
  753         }
  754 
  755 skip:
  756         /*
  757          * We exhausted the run list, meaning that all runnable threads
  758          * are contested.
  759          */
  760         cpu_pause();
  761 #ifdef _KERNEL_VIRTUAL
  762         pthread_yield();
  763 #endif
  764         ntd = &gd->gd_idlethread;
  765         if (gd->gd_trap_nesting_level == 0 && panicstr == NULL)
  766             ASSERT_NO_TOKENS_HELD(ntd);
  767         /* contention case, do not clear contention mask */
  768 
  769         /*
  770          * We are going to have to retry but if the current thread is not
  771          * on the runq we instead switch through the idle thread to get away
  772          * from the current thread.  We have to flag for lwkt reschedule
  773          * to prevent the idle thread from halting.
  774          *
  775          * NOTE: A non-zero spinning is passed to lwkt_getalltokens() to
  776          *       instruct it to deal with the potential for deadlocks by
  777          *       ordering the tokens by address.
  778          */
  779         if ((td->td_flags & TDF_RUNQ) == 0) {
  780             need_lwkt_resched();        /* prevent hlt */
  781             goto haveidle;
  782         }
  783 #if defined(INVARIANTS) && defined(__x86_64__)
  784         if ((read_rflags() & PSL_I) == 0) {
  785                 cpu_enable_intr();
  786                 panic("lwkt_switch() called with interrupts disabled");
  787         }
  788 #endif
  789 
  790         /*
  791          * Number iterations so far.  After a certain point we switch to
  792          * a sorted-address/monitor/mwait version of lwkt_getalltokens()
  793          */
  794         if (spinning < 0x7FFFFFFF)
  795             ++spinning;
  796 
  797 #ifndef _KERNEL_VIRTUAL
  798         /*
  799          * lwkt_getalltokens() failed in sorted token mode, we can use
  800          * monitor/mwait in this case.
  801          */
  802         if (spinning >= lwkt_spin_loops &&
  803             (cpu_mi_feature & CPU_MI_MONITOR) &&
  804             lwkt_spin_monitor)
  805         {
  806             cpu_mmw_pause_int(&gd->gd_reqflags,
  807                               (gd->gd_reqflags | RQF_SPINNING) &
  808                               ~RQF_IDLECHECK_WK_MASK,
  809                               cpu_mwait_spin);
  810         }
  811 #endif
  812 
  813         /*
  814          * We already checked that td is still scheduled so this should be
  815          * safe.
  816          */
  817         splz_check();
  818 
  819 #ifndef _KERNEL_VIRTUAL
  820         /*
  821          * This experimental resequencer is used as a fall-back to reduce
  822          * hw cache line contention by placing each core's scheduler into a
  823          * time-domain-multplexed slot.
  824          *
  825          * The resequencer is disabled by default.  It's functionality has
  826          * largely been superceeded by the token algorithm which limits races
  827          * to a subset of cores.
  828          *
  829          * The resequencer algorithm tends to break down when more than
  830          * 20 cores are contending.  What appears to happen is that new
  831          * tokens can be obtained out of address-sorted order by new cores
  832          * while existing cores languish in long delays between retries and
  833          * wind up being starved-out of the token acquisition.
  834          */
  835         if (lwkt_spin_reseq && spinning >= lwkt_spin_reseq) {
  836             int cseq = atomic_fetchadd_int(&lwkt_cseq_windex, 1);
  837             int oseq;
  838 
  839             while ((oseq = lwkt_cseq_rindex) != cseq) {
  840                 cpu_ccfence();
  841 #if 1
  842                 if (cpu_mi_feature & CPU_MI_MONITOR) {
  843                     cpu_mmw_pause_int(&lwkt_cseq_rindex, oseq, cpu_mwait_spin);
  844                 } else {
  845 #endif
  846                     cpu_pause();
  847                     cpu_lfence();
  848 #if 1
  849                 }
  850 #endif
  851             }
  852             DELAY(1);
  853             atomic_add_int(&lwkt_cseq_rindex, 1);
  854         }
  855 #endif
  856         /* highest level for(;;) loop */
  857     }
  858 
  859 havethread:
  860     /*
  861      * Clear gd_idle_repeat when doing a normal switch to a non-idle
  862      * thread.
  863      */
  864     ntd->td_wmesg = NULL;
  865     ++gd->gd_cnt.v_swtch;
  866     gd->gd_idle_repeat = 0;
  867 
  868 havethread_preempted:
  869     /*
  870      * If the new target does not need the MP lock and we are holding it,
  871      * release the MP lock.  If the new target requires the MP lock we have
  872      * already acquired it for the target.
  873      */
  874     ;
  875 haveidle:
  876     KASSERT(ntd->td_critcount,
  877             ("priority problem in lwkt_switch %d %d",
  878             td->td_critcount, ntd->td_critcount));
  879 
  880     if (td != ntd) {
  881         /*
  882          * Execute the actual thread switch operation.  This function
  883          * returns to the current thread and returns the previous thread
  884          * (which may be different from the thread we switched to).
  885          *
  886          * We are responsible for marking ntd as TDF_RUNNING.
  887          */
  888         KKASSERT((ntd->td_flags & TDF_RUNNING) == 0);
  889         ++switch_count;
  890         KTR_LOG(ctxsw_sw, gd->gd_cpuid, ntd);
  891         ntd->td_flags |= TDF_RUNNING;
  892         lwkt_switch_return(td->td_switch(ntd));
  893         /* ntd invalid, td_switch() can return a different thread_t */
  894     }
  895 
  896     /*
  897      * catch-all.  XXX is this strictly needed?
  898      */
  899     splz_check();
  900 
  901     /* NOTE: current cpu may have changed after switch */
  902     crit_exit_quick(td);
  903 }
  904 
  905 /*
  906  * Called by assembly in the td_switch (thread restore path) for thread
  907  * bootstrap cases which do not 'return' to lwkt_switch().
  908  */
  909 void
  910 lwkt_switch_return(thread_t otd)
  911 {
  912         globaldata_t rgd;
  913 
  914         /*
  915          * Check if otd was migrating.  Now that we are on ntd we can finish
  916          * up the migration.  This is a bit messy but it is the only place
  917          * where td is known to be fully descheduled.
  918          *
  919          * We can only activate the migration if otd was migrating but not
  920          * held on the cpu due to a preemption chain.  We still have to
  921          * clear TDF_RUNNING on the old thread either way.
  922          *
  923          * We are responsible for clearing the previously running thread's
  924          * TDF_RUNNING.
  925          */
  926         if ((rgd = otd->td_migrate_gd) != NULL &&
  927             (otd->td_flags & TDF_PREEMPT_LOCK) == 0) {
  928                 KKASSERT((otd->td_flags & (TDF_MIGRATING | TDF_RUNNING)) ==
  929                          (TDF_MIGRATING | TDF_RUNNING));
  930                 otd->td_migrate_gd = NULL;
  931                 otd->td_flags &= ~TDF_RUNNING;
  932                 lwkt_send_ipiq(rgd, lwkt_setcpu_remote, otd);
  933         } else {
  934                 otd->td_flags &= ~TDF_RUNNING;
  935         }
  936 
  937         /*
  938          * Final exit validations (see lwp_wait()).  Note that otd becomes
  939          * invalid the *instant* we set TDF_MP_EXITSIG.
  940          */
  941         while (otd->td_flags & TDF_EXITING) {
  942                 u_int mpflags;
  943 
  944                 mpflags = otd->td_mpflags;
  945                 cpu_ccfence();
  946 
  947                 if (mpflags & TDF_MP_EXITWAIT) {
  948                         if (atomic_cmpset_int(&otd->td_mpflags, mpflags,
  949                                               mpflags | TDF_MP_EXITSIG)) {
  950                                 wakeup(otd);
  951                                 break;
  952                         }
  953                 } else {
  954                         if (atomic_cmpset_int(&otd->td_mpflags, mpflags,
  955                                               mpflags | TDF_MP_EXITSIG)) {
  956                                 wakeup(otd);
  957                                 break;
  958                         }
  959                 }
  960         }
  961 }
  962 
  963 /*
  964  * Request that the target thread preempt the current thread.  Preemption
  965  * can only occur if our only critical section is the one that we were called
  966  * with, the relative priority of the target thread is higher, and the target
  967  * thread holds no tokens.  This also only works if we are not holding any
  968  * spinlocks (obviously).
  969  *
  970  * THE CALLER OF LWKT_PREEMPT() MUST BE IN A CRITICAL SECTION.  Typically
  971  * this is called via lwkt_schedule() through the td_preemptable callback.
  972  * critcount is the managed critical priority that we should ignore in order
  973  * to determine whether preemption is possible (aka usually just the crit
  974  * priority of lwkt_schedule() itself).
  975  *
  976  * Preemption is typically limited to interrupt threads.
  977  *
  978  * Operation works in a fairly straight-forward manner.  The normal
  979  * scheduling code is bypassed and we switch directly to the target
  980  * thread.  When the target thread attempts to block or switch away
  981  * code at the base of lwkt_switch() will switch directly back to our
  982  * thread.  Our thread is able to retain whatever tokens it holds and
  983  * if the target needs one of them the target will switch back to us
  984  * and reschedule itself normally.
  985  */
  986 void
  987 lwkt_preempt(thread_t ntd, int critcount)
  988 {
  989     struct globaldata *gd = mycpu;
  990     thread_t xtd;
  991     thread_t td;
  992     int save_gd_intr_nesting_level;
  993 
  994     /*
  995      * The caller has put us in a critical section.  We can only preempt
  996      * if the caller of the caller was not in a critical section (basically
  997      * a local interrupt), as determined by the 'critcount' parameter.  We
  998      * also can't preempt if the caller is holding any spinlocks (even if
  999      * he isn't in a critical section).  This also handles the tokens test.
 1000      *
 1001      * YYY The target thread must be in a critical section (else it must
 1002      * inherit our critical section?  I dunno yet).
 1003      */
 1004     KASSERT(ntd->td_critcount, ("BADCRIT0 %d", ntd->td_pri));
 1005 
 1006     td = gd->gd_curthread;
 1007     if (preempt_enable == 0) {
 1008         ++preempt_miss;
 1009         return;
 1010     }
 1011     if (ntd->td_pri <= td->td_pri) {
 1012         ++preempt_miss;
 1013         return;
 1014     }
 1015     if (td->td_critcount > critcount) {
 1016         ++preempt_miss;
 1017         return;
 1018     }
 1019     if (td->td_cscount) {
 1020         ++preempt_miss;
 1021         return;
 1022     }
 1023     if (ntd->td_gd != gd) {
 1024         ++preempt_miss;
 1025         return;
 1026     }
 1027     /*
 1028      * We don't have to check spinlocks here as they will also bump
 1029      * td_critcount.
 1030      *
 1031      * Do not try to preempt if the target thread is holding any tokens.
 1032      * We could try to acquire the tokens but this case is so rare there
 1033      * is no need to support it.
 1034      */
 1035     KKASSERT(gd->gd_spinlocks == 0);
 1036 
 1037     if (TD_TOKS_HELD(ntd)) {
 1038         ++preempt_miss;
 1039         return;
 1040     }
 1041     if (td == ntd || ((td->td_flags | ntd->td_flags) & TDF_PREEMPT_LOCK)) {
 1042         ++preempt_weird;
 1043         return;
 1044     }
 1045     if (ntd->td_preempted) {
 1046         ++preempt_hit;
 1047         return;
 1048     }
 1049     KKASSERT(gd->gd_processing_ipiq == 0);
 1050 
 1051     /*
 1052      * Since we are able to preempt the current thread, there is no need to
 1053      * call need_lwkt_resched().
 1054      *
 1055      * We must temporarily clear gd_intr_nesting_level around the switch
 1056      * since switchouts from the target thread are allowed (they will just
 1057      * return to our thread), and since the target thread has its own stack.
 1058      *
 1059      * A preemption must switch back to the original thread, assert the
 1060      * case.
 1061      */
 1062     ++preempt_hit;
 1063     ntd->td_preempted = td;
 1064     td->td_flags |= TDF_PREEMPT_LOCK;
 1065     KTR_LOG(ctxsw_pre, gd->gd_cpuid, ntd);
 1066     save_gd_intr_nesting_level = gd->gd_intr_nesting_level;
 1067     gd->gd_intr_nesting_level = 0;
 1068 
 1069     KKASSERT((ntd->td_flags & TDF_RUNNING) == 0);
 1070     ntd->td_flags |= TDF_RUNNING;
 1071     xtd = td->td_switch(ntd);
 1072     KKASSERT(xtd == ntd);
 1073     lwkt_switch_return(xtd);
 1074     gd->gd_intr_nesting_level = save_gd_intr_nesting_level;
 1075 
 1076     KKASSERT(ntd->td_preempted && (td->td_flags & TDF_PREEMPT_DONE));
 1077     ntd->td_preempted = NULL;
 1078     td->td_flags &= ~(TDF_PREEMPT_LOCK|TDF_PREEMPT_DONE);
 1079 }
 1080 
 1081 /*
 1082  * Conditionally call splz() if gd_reqflags indicates work is pending.
 1083  * This will work inside a critical section but not inside a hard code
 1084  * section.
 1085  *
 1086  * (self contained on a per cpu basis)
 1087  */
 1088 void
 1089 splz_check(void)
 1090 {
 1091     globaldata_t gd = mycpu;
 1092     thread_t td = gd->gd_curthread;
 1093 
 1094     if ((gd->gd_reqflags & RQF_IDLECHECK_MASK) &&
 1095         gd->gd_intr_nesting_level == 0 &&
 1096         td->td_nest_count < 2)
 1097     {
 1098         splz();
 1099     }
 1100 }
 1101 
 1102 /*
 1103  * This version is integrated into crit_exit, reqflags has already
 1104  * been tested but td_critcount has not.
 1105  *
 1106  * We only want to execute the splz() on the 1->0 transition of
 1107  * critcount and not in a hard code section or if too deeply nested.
 1108  *
 1109  * NOTE: gd->gd_spinlocks is implied to be 0 when td_critcount is 0.
 1110  */
 1111 void
 1112 lwkt_maybe_splz(thread_t td)
 1113 {
 1114     globaldata_t gd = td->td_gd;
 1115 
 1116     if (td->td_critcount == 0 &&
 1117         gd->gd_intr_nesting_level == 0 &&
 1118         td->td_nest_count < 2)
 1119     {
 1120         splz();
 1121     }
 1122 }
 1123 
 1124 /*
 1125  * Drivers which set up processing co-threads can call this function to
 1126  * run the co-thread at a higher priority and to allow it to preempt
 1127  * normal threads.
 1128  */
 1129 void
 1130 lwkt_set_interrupt_support_thread(void)
 1131 {
 1132         thread_t td = curthread;
 1133 
 1134         lwkt_setpri_self(TDPRI_INT_SUPPORT);
 1135         td->td_flags |= TDF_INTTHREAD;
 1136         td->td_preemptable = lwkt_preempt;
 1137 }
 1138 
 1139 
 1140 /*
 1141  * This function is used to negotiate a passive release of the current
 1142  * process/lwp designation with the user scheduler, allowing the user
 1143  * scheduler to schedule another user thread.  The related kernel thread
 1144  * (curthread) continues running in the released state.
 1145  */
 1146 void
 1147 lwkt_passive_release(struct thread *td)
 1148 {
 1149     struct lwp *lp = td->td_lwp;
 1150 
 1151 #ifndef NO_LWKT_SPLIT_USERPRI
 1152     td->td_release = NULL;
 1153     lwkt_setpri_self(TDPRI_KERN_USER);
 1154 #endif
 1155 
 1156     lp->lwp_proc->p_usched->release_curproc(lp);
 1157 }
 1158 
 1159 
 1160 /*
 1161  * This implements a LWKT yield, allowing a kernel thread to yield to other
 1162  * kernel threads at the same or higher priority.  This function can be
 1163  * called in a tight loop and will typically only yield once per tick.
 1164  *
 1165  * Most kernel threads run at the same priority in order to allow equal
 1166  * sharing.
 1167  *
 1168  * (self contained on a per cpu basis)
 1169  */
 1170 void
 1171 lwkt_yield(void)
 1172 {
 1173     globaldata_t gd = mycpu;
 1174     thread_t td = gd->gd_curthread;
 1175 
 1176     if ((gd->gd_reqflags & RQF_IDLECHECK_MASK) && td->td_nest_count < 2)
 1177         splz();
 1178     if (lwkt_resched_wanted()) {
 1179         lwkt_schedule_self(curthread);
 1180         lwkt_switch();
 1181     }
 1182 }
 1183 
 1184 /*
 1185  * The quick version processes pending interrupts and higher-priority
 1186  * LWKT threads but will not round-robin same-priority LWKT threads.
 1187  *
 1188  * When called while attempting to return to userland the only same-pri
 1189  * threads are the ones which have already tried to become the current
 1190  * user process.
 1191  */
 1192 void
 1193 lwkt_yield_quick(void)
 1194 {
 1195     globaldata_t gd = mycpu;
 1196     thread_t td = gd->gd_curthread;
 1197 
 1198     if ((gd->gd_reqflags & RQF_IDLECHECK_MASK) && td->td_nest_count < 2)
 1199         splz();
 1200     if (lwkt_resched_wanted()) {
 1201         crit_enter();
 1202         if (TAILQ_FIRST(&gd->gd_tdrunq) == td) {
 1203             clear_lwkt_resched();
 1204         } else {
 1205             lwkt_schedule_self(curthread);
 1206             lwkt_switch();
 1207         }
 1208         crit_exit();
 1209     }
 1210 }
 1211 
 1212 /*
 1213  * This yield is designed for kernel threads with a user context.
 1214  *
 1215  * The kernel acting on behalf of the user is potentially cpu-bound,
 1216  * this function will efficiently allow other threads to run and also
 1217  * switch to other processes by releasing.
 1218  *
 1219  * The lwkt_user_yield() function is designed to have very low overhead
 1220  * if no yield is determined to be needed.
 1221  */
 1222 void
 1223 lwkt_user_yield(void)
 1224 {
 1225     globaldata_t gd = mycpu;
 1226     thread_t td = gd->gd_curthread;
 1227 
 1228     /*
 1229      * Always run any pending interrupts in case we are in a critical
 1230      * section.
 1231      */
 1232     if ((gd->gd_reqflags & RQF_IDLECHECK_MASK) && td->td_nest_count < 2)
 1233         splz();
 1234 
 1235     /*
 1236      * Switch (which forces a release) if another kernel thread needs
 1237      * the cpu, if userland wants us to resched, or if our kernel
 1238      * quantum has run out.
 1239      */
 1240     if (lwkt_resched_wanted() ||
 1241         user_resched_wanted())
 1242     {
 1243         lwkt_switch();
 1244     }
 1245 
 1246 #if 0
 1247     /*
 1248      * Reacquire the current process if we are released.
 1249      *
 1250      * XXX not implemented atm.  The kernel may be holding locks and such,
 1251      *     so we want the thread to continue to receive cpu.
 1252      */
 1253     if (td->td_release == NULL && lp) {
 1254         lp->lwp_proc->p_usched->acquire_curproc(lp);
 1255         td->td_release = lwkt_passive_release;
 1256         lwkt_setpri_self(TDPRI_USER_NORM);
 1257     }
 1258 #endif
 1259 }
 1260 
 1261 /*
 1262  * Generic schedule.  Possibly schedule threads belonging to other cpus and
 1263  * deal with threads that might be blocked on a wait queue.
 1264  *
 1265  * We have a little helper inline function which does additional work after
 1266  * the thread has been enqueued, including dealing with preemption and
 1267  * setting need_lwkt_resched() (which prevents the kernel from returning
 1268  * to userland until it has processed higher priority threads).
 1269  *
 1270  * It is possible for this routine to be called after a failed _enqueue
 1271  * (due to the target thread migrating, sleeping, or otherwise blocked).
 1272  * We have to check that the thread is actually on the run queue!
 1273  */
 1274 static __inline
 1275 void
 1276 _lwkt_schedule_post(globaldata_t gd, thread_t ntd, int ccount)
 1277 {
 1278     if (ntd->td_flags & TDF_RUNQ) {
 1279         if (ntd->td_preemptable) {
 1280             ntd->td_preemptable(ntd, ccount);   /* YYY +token */
 1281         }
 1282     }
 1283 }
 1284 
 1285 static __inline
 1286 void
 1287 _lwkt_schedule(thread_t td)
 1288 {
 1289     globaldata_t mygd = mycpu;
 1290 
 1291     KASSERT(td != &td->td_gd->gd_idlethread,
 1292             ("lwkt_schedule(): scheduling gd_idlethread is illegal!"));
 1293     KKASSERT((td->td_flags & TDF_MIGRATING) == 0);
 1294     crit_enter_gd(mygd);
 1295     KKASSERT(td->td_lwp == NULL ||
 1296              (td->td_lwp->lwp_mpflags & LWP_MP_ONRUNQ) == 0);
 1297 
 1298     if (td == mygd->gd_curthread) {
 1299         _lwkt_enqueue(td);
 1300     } else {
 1301         /*
 1302          * If we own the thread, there is no race (since we are in a
 1303          * critical section).  If we do not own the thread there might
 1304          * be a race but the target cpu will deal with it.
 1305          */
 1306         if (td->td_gd == mygd) {
 1307             _lwkt_enqueue(td);
 1308             _lwkt_schedule_post(mygd, td, 1);
 1309         } else {
 1310             lwkt_send_ipiq3(td->td_gd, lwkt_schedule_remote, td, 0);
 1311         }
 1312     }
 1313     crit_exit_gd(mygd);
 1314 }
 1315 
 1316 void
 1317 lwkt_schedule(thread_t td)
 1318 {
 1319     _lwkt_schedule(td);
 1320 }
 1321 
 1322 void
 1323 lwkt_schedule_noresched(thread_t td)    /* XXX not impl */
 1324 {
 1325     _lwkt_schedule(td);
 1326 }
 1327 
 1328 /*
 1329  * When scheduled remotely if frame != NULL the IPIQ is being
 1330  * run via doreti or an interrupt then preemption can be allowed.
 1331  *
 1332  * To allow preemption we have to drop the critical section so only
 1333  * one is present in _lwkt_schedule_post.
 1334  */
 1335 static void
 1336 lwkt_schedule_remote(void *arg, int arg2, struct intrframe *frame)
 1337 {
 1338     thread_t td = curthread;
 1339     thread_t ntd = arg;
 1340 
 1341     if (frame && ntd->td_preemptable) {
 1342         crit_exit_noyield(td);
 1343         _lwkt_schedule(ntd);
 1344         crit_enter_quick(td);
 1345     } else {
 1346         _lwkt_schedule(ntd);
 1347     }
 1348 }
 1349 
 1350 /*
 1351  * Thread migration using a 'Pull' method.  The thread may or may not be
 1352  * the current thread.  It MUST be descheduled and in a stable state.
 1353  * lwkt_giveaway() must be called on the cpu owning the thread.
 1354  *
 1355  * At any point after lwkt_giveaway() is called, the target cpu may
 1356  * 'pull' the thread by calling lwkt_acquire().
 1357  *
 1358  * We have to make sure the thread is not sitting on a per-cpu tsleep
 1359  * queue or it will blow up when it moves to another cpu.
 1360  *
 1361  * MPSAFE - must be called under very specific conditions.
 1362  */
 1363 void
 1364 lwkt_giveaway(thread_t td)
 1365 {
 1366     globaldata_t gd = mycpu;
 1367 
 1368     crit_enter_gd(gd);
 1369     if (td->td_flags & TDF_TSLEEPQ)
 1370         tsleep_remove(td);
 1371     KKASSERT(td->td_gd == gd);
 1372     TAILQ_REMOVE(&gd->gd_tdallq, td, td_allq);
 1373     td->td_flags |= TDF_MIGRATING;
 1374     crit_exit_gd(gd);
 1375 }
 1376 
 1377 void
 1378 lwkt_acquire(thread_t td)
 1379 {
 1380     globaldata_t gd;
 1381     globaldata_t mygd;
 1382     int retry = 10000000;
 1383 
 1384     KKASSERT(td->td_flags & TDF_MIGRATING);
 1385     gd = td->td_gd;
 1386     mygd = mycpu;
 1387     if (gd != mycpu) {
 1388         cpu_lfence();
 1389         KKASSERT((td->td_flags & TDF_RUNQ) == 0);
 1390         crit_enter_gd(mygd);
 1391         DEBUG_PUSH_INFO("lwkt_acquire");
 1392         while (td->td_flags & (TDF_RUNNING|TDF_PREEMPT_LOCK)) {
 1393             lwkt_process_ipiq();
 1394             cpu_lfence();
 1395             if (--retry == 0) {
 1396                 kprintf("lwkt_acquire: stuck: td %p td->td_flags %08x\n",
 1397                         td, td->td_flags);
 1398                 retry = 10000000;
 1399             }
 1400 #ifdef _KERNEL_VIRTUAL
 1401             pthread_yield();
 1402 #endif
 1403         }
 1404         DEBUG_POP_INFO();
 1405         cpu_mfence();
 1406         td->td_gd = mygd;
 1407         TAILQ_INSERT_TAIL(&mygd->gd_tdallq, td, td_allq);
 1408         td->td_flags &= ~TDF_MIGRATING;
 1409         crit_exit_gd(mygd);
 1410     } else {
 1411         crit_enter_gd(mygd);
 1412         TAILQ_INSERT_TAIL(&mygd->gd_tdallq, td, td_allq);
 1413         td->td_flags &= ~TDF_MIGRATING;
 1414         crit_exit_gd(mygd);
 1415     }
 1416 }
 1417 
 1418 /*
 1419  * Generic deschedule.  Descheduling threads other then your own should be
 1420  * done only in carefully controlled circumstances.  Descheduling is 
 1421  * asynchronous.  
 1422  *
 1423  * This function may block if the cpu has run out of messages.
 1424  */
 1425 void
 1426 lwkt_deschedule(thread_t td)
 1427 {
 1428     crit_enter();
 1429     if (td == curthread) {
 1430         _lwkt_dequeue(td);
 1431     } else {
 1432         if (td->td_gd == mycpu) {
 1433             _lwkt_dequeue(td);
 1434         } else {
 1435             lwkt_send_ipiq(td->td_gd, (ipifunc1_t)lwkt_deschedule, td);
 1436         }
 1437     }
 1438     crit_exit();
 1439 }
 1440 
 1441 /*
 1442  * Set the target thread's priority.  This routine does not automatically
 1443  * switch to a higher priority thread, LWKT threads are not designed for
 1444  * continuous priority changes.  Yield if you want to switch.
 1445  */
 1446 void
 1447 lwkt_setpri(thread_t td, int pri)
 1448 {
 1449     if (td->td_pri != pri) {
 1450         KKASSERT(pri >= 0);
 1451         crit_enter();
 1452         if (td->td_flags & TDF_RUNQ) {
 1453             KKASSERT(td->td_gd == mycpu);
 1454             _lwkt_dequeue(td);
 1455             td->td_pri = pri;
 1456             _lwkt_enqueue(td);
 1457         } else {
 1458             td->td_pri = pri;
 1459         }
 1460         crit_exit();
 1461     }
 1462 }
 1463 
 1464 /*
 1465  * Set the initial priority for a thread prior to it being scheduled for
 1466  * the first time.  The thread MUST NOT be scheduled before or during
 1467  * this call.  The thread may be assigned to a cpu other then the current
 1468  * cpu.
 1469  *
 1470  * Typically used after a thread has been created with TDF_STOPPREQ,
 1471  * and before the thread is initially scheduled.
 1472  */
 1473 void
 1474 lwkt_setpri_initial(thread_t td, int pri)
 1475 {
 1476     KKASSERT(pri >= 0);
 1477     KKASSERT((td->td_flags & TDF_RUNQ) == 0);
 1478     td->td_pri = pri;
 1479 }
 1480 
 1481 void
 1482 lwkt_setpri_self(int pri)
 1483 {
 1484     thread_t td = curthread;
 1485 
 1486     KKASSERT(pri >= 0 && pri <= TDPRI_MAX);
 1487     crit_enter();
 1488     if (td->td_flags & TDF_RUNQ) {
 1489         _lwkt_dequeue(td);
 1490         td->td_pri = pri;
 1491         _lwkt_enqueue(td);
 1492     } else {
 1493         td->td_pri = pri;
 1494     }
 1495     crit_exit();
 1496 }
 1497 
 1498 /*
 1499  * hz tick scheduler clock for LWKT threads
 1500  */
 1501 void
 1502 lwkt_schedulerclock(thread_t td)
 1503 {
 1504     globaldata_t gd = td->td_gd;
 1505     thread_t xtd;
 1506 
 1507     if (TAILQ_FIRST(&gd->gd_tdrunq) == td) {
 1508         /*
 1509          * If the current thread is at the head of the runq shift it to the
 1510          * end of any equal-priority threads and request a LWKT reschedule
 1511          * if it moved.
 1512          *
 1513          * Ignore upri in this situation.  There will only be one user thread
 1514          * in user mode, all others will be user threads running in kernel
 1515          * mode and we have to make sure they get some cpu.
 1516          */
 1517         xtd = TAILQ_NEXT(td, td_threadq);
 1518         if (xtd && xtd->td_pri == td->td_pri) {
 1519             TAILQ_REMOVE(&gd->gd_tdrunq, td, td_threadq);
 1520             while (xtd && xtd->td_pri == td->td_pri)
 1521                 xtd = TAILQ_NEXT(xtd, td_threadq);
 1522             if (xtd)
 1523                 TAILQ_INSERT_BEFORE(xtd, td, td_threadq);
 1524             else
 1525                 TAILQ_INSERT_TAIL(&gd->gd_tdrunq, td, td_threadq);
 1526             need_lwkt_resched();
 1527         }
 1528     } else {
 1529         /*
 1530          * If we scheduled a thread other than the one at the head of the
 1531          * queue always request a reschedule every tick.
 1532          */
 1533         need_lwkt_resched();
 1534     }
 1535 }
 1536 
 1537 /*
 1538  * Migrate the current thread to the specified cpu. 
 1539  *
 1540  * This is accomplished by descheduling ourselves from the current cpu
 1541  * and setting td_migrate_gd.  The lwkt_switch() code will detect that the
 1542  * 'old' thread wants to migrate after it has been completely switched out
 1543  * and will complete the migration.
 1544  *
 1545  * TDF_MIGRATING prevents scheduling races while the thread is being migrated.
 1546  *
 1547  * We must be sure to release our current process designation (if a user
 1548  * process) before clearing out any tsleepq we are on because the release
 1549  * code may re-add us.
 1550  *
 1551  * We must be sure to remove ourselves from the current cpu's tsleepq
 1552  * before potentially moving to another queue.  The thread can be on
 1553  * a tsleepq due to a left-over tsleep_interlock().
 1554  */
 1555 
 1556 void
 1557 lwkt_setcpu_self(globaldata_t rgd)
 1558 {
 1559     thread_t td = curthread;
 1560 
 1561     if (td->td_gd != rgd) {
 1562         crit_enter_quick(td);
 1563 
 1564         if (td->td_release)
 1565             td->td_release(td);
 1566         if (td->td_flags & TDF_TSLEEPQ)
 1567             tsleep_remove(td);
 1568 
 1569         /*
 1570          * Set TDF_MIGRATING to prevent a spurious reschedule while we are
 1571          * trying to deschedule ourselves and switch away, then deschedule
 1572          * ourself, remove us from tdallq, and set td_migrate_gd.  Finally,
 1573          * call lwkt_switch() to complete the operation.
 1574          */
 1575         td->td_flags |= TDF_MIGRATING;
 1576         lwkt_deschedule_self(td);
 1577         TAILQ_REMOVE(&td->td_gd->gd_tdallq, td, td_allq);
 1578         td->td_migrate_gd = rgd;
 1579         lwkt_switch();
 1580 
 1581         /*
 1582          * We are now on the target cpu
 1583          */
 1584         KKASSERT(rgd == mycpu);
 1585         TAILQ_INSERT_TAIL(&rgd->gd_tdallq, td, td_allq);
 1586         crit_exit_quick(td);
 1587     }
 1588 }
 1589 
 1590 void
 1591 lwkt_migratecpu(int cpuid)
 1592 {
 1593         globaldata_t rgd;
 1594 
 1595         rgd = globaldata_find(cpuid);
 1596         lwkt_setcpu_self(rgd);
 1597 }
 1598 
 1599 /*
 1600  * Remote IPI for cpu migration (called while in a critical section so we
 1601  * do not have to enter another one).
 1602  *
 1603  * The thread (td) has already been completely descheduled from the
 1604  * originating cpu and we can simply assert the case.  The thread is
 1605  * assigned to the new cpu and enqueued.
 1606  *
 1607  * The thread will re-add itself to tdallq when it resumes execution.
 1608  */
 1609 static void
 1610 lwkt_setcpu_remote(void *arg)
 1611 {
 1612     thread_t td = arg;
 1613     globaldata_t gd = mycpu;
 1614 
 1615     KKASSERT((td->td_flags & (TDF_RUNNING|TDF_PREEMPT_LOCK)) == 0);
 1616     td->td_gd = gd;
 1617     cpu_mfence();
 1618     td->td_flags &= ~TDF_MIGRATING;
 1619     KKASSERT(td->td_migrate_gd == NULL);
 1620     KKASSERT(td->td_lwp == NULL ||
 1621             (td->td_lwp->lwp_mpflags & LWP_MP_ONRUNQ) == 0);
 1622     _lwkt_enqueue(td);
 1623 }
 1624 
 1625 struct lwp *
 1626 lwkt_preempted_proc(void)
 1627 {
 1628     thread_t td = curthread;
 1629     while (td->td_preempted)
 1630         td = td->td_preempted;
 1631     return(td->td_lwp);
 1632 }
 1633 
 1634 /*
 1635  * Create a kernel process/thread/whatever.  It shares it's address space
 1636  * with proc0 - ie: kernel only.
 1637  *
 1638  * If the cpu is not specified one will be selected.  In the future
 1639  * specifying a cpu of -1 will enable kernel thread migration between
 1640  * cpus.
 1641  */
 1642 int
 1643 lwkt_create(void (*func)(void *), void *arg, struct thread **tdp,
 1644             thread_t template, int tdflags, int cpu, const char *fmt, ...)
 1645 {
 1646     thread_t td;
 1647     __va_list ap;
 1648 
 1649     td = lwkt_alloc_thread(template, LWKT_THREAD_STACK, cpu,
 1650                            tdflags);
 1651     if (tdp)
 1652         *tdp = td;
 1653     cpu_set_thread_handler(td, lwkt_exit, func, arg);
 1654 
 1655     /*
 1656      * Set up arg0 for 'ps' etc
 1657      */
 1658     __va_start(ap, fmt);
 1659     kvsnprintf(td->td_comm, sizeof(td->td_comm), fmt, ap);
 1660     __va_end(ap);
 1661 
 1662     /*
 1663      * Schedule the thread to run
 1664      */
 1665     if (td->td_flags & TDF_NOSTART)
 1666         td->td_flags &= ~TDF_NOSTART;
 1667     else
 1668         lwkt_schedule(td);
 1669     return 0;
 1670 }
 1671 
 1672 /*
 1673  * Destroy an LWKT thread.   Warning!  This function is not called when
 1674  * a process exits, cpu_proc_exit() directly calls cpu_thread_exit() and
 1675  * uses a different reaping mechanism.
 1676  */
 1677 void
 1678 lwkt_exit(void)
 1679 {
 1680     thread_t td = curthread;
 1681     thread_t std;
 1682     globaldata_t gd;
 1683 
 1684     /*
 1685      * Do any cleanup that might block here
 1686      */
 1687     if (td->td_flags & TDF_VERBOSE)
 1688         kprintf("kthread %p %s has exited\n", td, td->td_comm);
 1689     biosched_done(td);
 1690     dsched_exit_thread(td);
 1691 
 1692     /*
 1693      * Get us into a critical section to interlock gd_freetd and loop
 1694      * until we can get it freed.
 1695      *
 1696      * We have to cache the current td in gd_freetd because objcache_put()ing
 1697      * it would rip it out from under us while our thread is still active.
 1698      *
 1699      * We are the current thread so of course our own TDF_RUNNING bit will
 1700      * be set, so unlike the lwp reap code we don't wait for it to clear.
 1701      */
 1702     gd = mycpu;
 1703     crit_enter_quick(td);
 1704     for (;;) {
 1705         if (td->td_refs) {
 1706             tsleep(td, 0, "tdreap", 1);
 1707             continue;
 1708         }
 1709         if ((std = gd->gd_freetd) != NULL) {
 1710             KKASSERT((std->td_flags & (TDF_RUNNING|TDF_PREEMPT_LOCK)) == 0);
 1711             gd->gd_freetd = NULL;
 1712             objcache_put(thread_cache, std);
 1713             continue;
 1714         }
 1715         break;
 1716     }
 1717 
 1718     /*
 1719      * Remove thread resources from kernel lists and deschedule us for
 1720      * the last time.  We cannot block after this point or we may end
 1721      * up with a stale td on the tsleepq.
 1722      *
 1723      * None of this may block, the critical section is the only thing
 1724      * protecting tdallq and the only thing preventing new lwkt_hold()
 1725      * thread refs now.
 1726      */
 1727     if (td->td_flags & TDF_TSLEEPQ)
 1728         tsleep_remove(td);
 1729     lwkt_deschedule_self(td);
 1730     lwkt_remove_tdallq(td);
 1731     KKASSERT(td->td_refs == 0);
 1732 
 1733     /*
 1734      * Final cleanup
 1735      */
 1736     KKASSERT(gd->gd_freetd == NULL);
 1737     if (td->td_flags & TDF_ALLOCATED_THREAD)
 1738         gd->gd_freetd = td;
 1739     cpu_thread_exit();
 1740 }
 1741 
 1742 void
 1743 lwkt_remove_tdallq(thread_t td)
 1744 {
 1745     KKASSERT(td->td_gd == mycpu);
 1746     TAILQ_REMOVE(&td->td_gd->gd_tdallq, td, td_allq);
 1747 }
 1748 
 1749 /*
 1750  * Code reduction and branch prediction improvements.  Call/return
 1751  * overhead on modern cpus often degenerates into 0 cycles due to
 1752  * the cpu's branch prediction hardware and return pc cache.  We
 1753  * can take advantage of this by not inlining medium-complexity
 1754  * functions and we can also reduce the branch prediction impact
 1755  * by collapsing perfectly predictable branches into a single
 1756  * procedure instead of duplicating it.
 1757  *
 1758  * Is any of this noticeable?  Probably not, so I'll take the
 1759  * smaller code size.
 1760  */
 1761 void
 1762 crit_exit_wrapper(__DEBUG_CRIT_ARG__)
 1763 {
 1764     _crit_exit(mycpu __DEBUG_CRIT_PASS_ARG__);
 1765 }
 1766 
 1767 void
 1768 crit_panic(void)
 1769 {
 1770     thread_t td = curthread;
 1771     int lcrit = td->td_critcount;
 1772 
 1773     td->td_critcount = 0;
 1774     panic("td_critcount is/would-go negative! %p %d", td, lcrit);
 1775     /* NOT REACHED */
 1776 }
 1777 
 1778 /*
 1779  * Called from debugger/panic on cpus which have been stopped.  We must still
 1780  * process the IPIQ while stopped, even if we were stopped while in a critical
 1781  * section (XXX).
 1782  *
 1783  * If we are dumping also try to process any pending interrupts.  This may
 1784  * or may not work depending on the state of the cpu at the point it was
 1785  * stopped.
 1786  */
 1787 void
 1788 lwkt_smp_stopped(void)
 1789 {
 1790     globaldata_t gd = mycpu;
 1791 
 1792     crit_enter_gd(gd);
 1793     if (dumping) {
 1794         lwkt_process_ipiq();
 1795         splz();
 1796     } else {
 1797         lwkt_process_ipiq();
 1798     }
 1799     crit_exit_gd(gd);
 1800 }

Cache object: 5e982cf0f3cf47ab4c242a08a00f460d


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.