The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/kern/lwkt_token.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*
    2  * Copyright (c) 2003,2004,2009 The DragonFly Project.  All rights reserved.
    3  * 
    4  * This code is derived from software contributed to The DragonFly Project
    5  * by Matthew Dillon <dillon@backplane.com>
    6  * 
    7  * Redistribution and use in source and binary forms, with or without
    8  * modification, are permitted provided that the following conditions
    9  * are met:
   10  * 
   11  * 1. Redistributions of source code must retain the above copyright
   12  *    notice, this list of conditions and the following disclaimer.
   13  * 2. Redistributions in binary form must reproduce the above copyright
   14  *    notice, this list of conditions and the following disclaimer in
   15  *    the documentation and/or other materials provided with the
   16  *    distribution.
   17  * 3. Neither the name of The DragonFly Project nor the names of its
   18  *    contributors may be used to endorse or promote products derived
   19  *    from this software without specific, prior written permission.
   20  * 
   21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
   22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
   23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
   24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
   25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
   26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
   27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
   28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
   29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
   30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
   31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   32  * SUCH DAMAGE.
   33  */
   34 
   35 /*
   36  * lwkt_token - Implement soft token locks.
   37  *
   38  * Tokens are locks which serialize a thread only while the thread is
   39  * running.  If the thread blocks all tokens are released, then reacquired
   40  * when the thread resumes.
   41  *
   42  * This implementation requires no critical sections or spin locks, but
   43  * does use atomic_cmpset_ptr().
   44  *
   45  * Tokens may be recursively acquired by the same thread.  However the
   46  * caller must be sure to release such tokens in reverse order.
   47  */
   48 #include <sys/param.h>
   49 #include <sys/systm.h>
   50 #include <sys/kernel.h>
   51 #include <sys/proc.h>
   52 #include <sys/rtprio.h>
   53 #include <sys/queue.h>
   54 #include <sys/sysctl.h>
   55 #include <sys/ktr.h>
   56 #include <sys/kthread.h>
   57 #include <machine/cpu.h>
   58 #include <sys/lock.h>
   59 #include <sys/spinlock.h>
   60 
   61 #include <sys/thread2.h>
   62 #include <sys/spinlock2.h>
   63 #include <sys/mplock2.h>
   64 
   65 #include <vm/vm.h>
   66 #include <vm/vm_param.h>
   67 #include <vm/vm_kern.h>
   68 #include <vm/vm_object.h>
   69 #include <vm/vm_page.h>
   70 #include <vm/vm_map.h>
   71 #include <vm/vm_pager.h>
   72 #include <vm/vm_extern.h>
   73 #include <vm/vm_zone.h>
   74 
   75 #include <machine/stdarg.h>
   76 #include <machine/smp.h>
   77 
   78 #include "opt_ddb.h"
   79 #ifdef DDB
   80 #include <ddb/ddb.h>
   81 #endif
   82 
   83 extern int lwkt_sched_debug;
   84 
   85 #ifndef LWKT_NUM_POOL_TOKENS
   86 #define LWKT_NUM_POOL_TOKENS    4001    /* prime number */
   87 #endif
   88 
   89 static lwkt_token       pool_tokens[LWKT_NUM_POOL_TOKENS];
   90 struct spinlock         tok_debug_spin = SPINLOCK_INITIALIZER(&tok_debug_spin);
   91 
   92 #define TOKEN_STRING    "REF=%p TOK=%p TD=%p"
   93 #define TOKEN_ARGS      lwkt_tokref_t ref, lwkt_token_t tok, struct thread *td
   94 #define CONTENDED_STRING        TOKEN_STRING " (contention started)"
   95 #define UNCONTENDED_STRING      TOKEN_STRING " (contention stopped)"
   96 #if !defined(KTR_TOKENS)
   97 #define KTR_TOKENS      KTR_ALL
   98 #endif
   99 
  100 KTR_INFO_MASTER(tokens);
  101 KTR_INFO(KTR_TOKENS, tokens, fail, 0, TOKEN_STRING, TOKEN_ARGS);
  102 KTR_INFO(KTR_TOKENS, tokens, succ, 1, TOKEN_STRING, TOKEN_ARGS);
  103 #if 0
  104 KTR_INFO(KTR_TOKENS, tokens, release, 2, TOKEN_STRING, TOKEN_ARGS);
  105 KTR_INFO(KTR_TOKENS, tokens, remote, 3, TOKEN_STRING, TOKEN_ARGS);
  106 KTR_INFO(KTR_TOKENS, tokens, reqremote, 4, TOKEN_STRING, TOKEN_ARGS);
  107 KTR_INFO(KTR_TOKENS, tokens, reqfail, 5, TOKEN_STRING, TOKEN_ARGS);
  108 KTR_INFO(KTR_TOKENS, tokens, drain, 6, TOKEN_STRING, TOKEN_ARGS);
  109 KTR_INFO(KTR_TOKENS, tokens, contention_start, 7, CONTENDED_STRING, TOKEN_ARGS);
  110 KTR_INFO(KTR_TOKENS, tokens, contention_stop, 7, UNCONTENDED_STRING, TOKEN_ARGS);
  111 #endif
  112 
  113 #define logtoken(name, ref)                                             \
  114         KTR_LOG(tokens_ ## name, ref, ref->tr_tok, curthread)
  115 
  116 /*
  117  * Global tokens.  These replace the MP lock for major subsystem locking.
  118  * These tokens are initially used to lockup both global and individual
  119  * operations.
  120  *
  121  * Once individual structures get their own locks these tokens are used
  122  * only to protect global lists & other variables and to interlock
  123  * allocations and teardowns and such.
  124  *
  125  * The UP initializer causes token acquisition to also acquire the MP lock
  126  * for maximum compatibility.  The feature may be enabled and disabled at
  127  * any time, the MP state is copied to the tokref when the token is acquired
  128  * and will not race against sysctl changes.
  129  */
  130 struct lwkt_token mp_token = LWKT_TOKEN_INITIALIZER(mp_token);
  131 struct lwkt_token pmap_token = LWKT_TOKEN_INITIALIZER(pmap_token);
  132 struct lwkt_token dev_token = LWKT_TOKEN_INITIALIZER(dev_token);
  133 struct lwkt_token vm_token = LWKT_TOKEN_INITIALIZER(vm_token);
  134 struct lwkt_token vmspace_token = LWKT_TOKEN_INITIALIZER(vmspace_token);
  135 struct lwkt_token kvm_token = LWKT_TOKEN_INITIALIZER(kvm_token);
  136 struct lwkt_token sigio_token = LWKT_TOKEN_INITIALIZER(sigio_token);
  137 struct lwkt_token tty_token = LWKT_TOKEN_INITIALIZER(tty_token);
  138 struct lwkt_token vnode_token = LWKT_TOKEN_INITIALIZER(vnode_token);
  139 struct lwkt_token ifnet_token = LWKT_TOKEN_INITIALIZER(ifnet_token);
  140 
  141 static int lwkt_token_spin = 5;
  142 SYSCTL_INT(_lwkt, OID_AUTO, token_spin, CTLFLAG_RW,
  143     &lwkt_token_spin, 0, "Decontention spin loops");
  144 static int lwkt_token_delay = 0;
  145 SYSCTL_INT(_lwkt, OID_AUTO, token_delay, CTLFLAG_RW,
  146     &lwkt_token_delay, 0, "Decontention spin delay in ns");
  147 
  148 /*
  149  * The collision count is bumped every time the LWKT scheduler fails
  150  * to acquire needed tokens in addition to a normal lwkt_gettoken()
  151  * stall.
  152  */
  153 SYSCTL_LONG(_lwkt, OID_AUTO, mp_collisions, CTLFLAG_RW,
  154     &mp_token.t_collisions, 0, "Collision counter of mp_token");
  155 SYSCTL_LONG(_lwkt, OID_AUTO, pmap_collisions, CTLFLAG_RW,
  156     &pmap_token.t_collisions, 0, "Collision counter of pmap_token");
  157 SYSCTL_LONG(_lwkt, OID_AUTO, dev_collisions, CTLFLAG_RW,
  158     &dev_token.t_collisions, 0, "Collision counter of dev_token");
  159 SYSCTL_LONG(_lwkt, OID_AUTO, vm_collisions, CTLFLAG_RW,
  160     &vm_token.t_collisions, 0, "Collision counter of vm_token");
  161 SYSCTL_LONG(_lwkt, OID_AUTO, vmspace_collisions, CTLFLAG_RW,
  162     &vmspace_token.t_collisions, 0, "Collision counter of vmspace_token");
  163 SYSCTL_LONG(_lwkt, OID_AUTO, kvm_collisions, CTLFLAG_RW,
  164     &kvm_token.t_collisions, 0, "Collision counter of kvm_token");
  165 SYSCTL_LONG(_lwkt, OID_AUTO, sigio_collisions, CTLFLAG_RW,
  166     &sigio_token.t_collisions, 0, "Collision counter of sigio_token");
  167 SYSCTL_LONG(_lwkt, OID_AUTO, tty_collisions, CTLFLAG_RW,
  168     &tty_token.t_collisions, 0, "Collision counter of tty_token");
  169 SYSCTL_LONG(_lwkt, OID_AUTO, vnode_collisions, CTLFLAG_RW,
  170     &vnode_token.t_collisions, 0, "Collision counter of vnode_token");
  171 
  172 int tokens_debug_output;
  173 SYSCTL_INT(_lwkt, OID_AUTO, tokens_debug_output, CTLFLAG_RW,
  174     &tokens_debug_output, 0, "Generate stack trace N times");
  175 
  176 
  177 #ifdef DEBUG_LOCKS_LATENCY
  178 
  179 static long tokens_add_latency;
  180 SYSCTL_LONG(_debug, OID_AUTO, tokens_add_latency, CTLFLAG_RW,
  181             &tokens_add_latency, 0,
  182             "Add spinlock latency");
  183 
  184 #endif
  185 
  186 
  187 static int _lwkt_getalltokens_sorted(thread_t td);
  188 
  189 /*
  190  * Acquire the initial mplock
  191  *
  192  * (low level boot only)
  193  */
  194 void
  195 cpu_get_initial_mplock(void)
  196 {
  197         KKASSERT(mp_token.t_ref == NULL);
  198         if (lwkt_trytoken(&mp_token) == FALSE)
  199                 panic("cpu_get_initial_mplock");
  200 }
  201 
  202 /*
  203  * Return a pool token given an address.  Use a prime number to reduce
  204  * overlaps.
  205  */
  206 static __inline
  207 lwkt_token_t
  208 _lwkt_token_pool_lookup(void *ptr)
  209 {
  210         u_int i;
  211 
  212         i = (u_int)(uintptr_t)ptr % LWKT_NUM_POOL_TOKENS;
  213         return(&pool_tokens[i]);
  214 }
  215 
  216 /*
  217  * Initialize a tokref_t prior to making it visible in the thread's
  218  * token array.
  219  */
  220 static __inline
  221 void
  222 _lwkt_tokref_init(lwkt_tokref_t ref, lwkt_token_t tok, thread_t td, long excl)
  223 {
  224         ref->tr_tok = tok;
  225         ref->tr_count = excl;
  226         ref->tr_owner = td;
  227 }
  228 
  229 /*
  230  * Attempt to acquire a shared or exclusive token.  Returns TRUE on success,
  231  * FALSE on failure.
  232  *
  233  * If TOK_EXCLUSIVE is set in mode we are attempting to get an exclusive
  234  * token, otherwise are attempting to get a shared token.
  235  *
  236  * If TOK_EXCLREQ is set in mode this is a blocking operation, otherwise
  237  * it is a non-blocking operation (for both exclusive or shared acquisions).
  238  */
  239 static __inline
  240 int
  241 _lwkt_trytokref(lwkt_tokref_t ref, thread_t td, long mode)
  242 {
  243         lwkt_token_t tok;
  244         lwkt_tokref_t oref;
  245         long count;
  246 
  247         tok = ref->tr_tok;
  248         KASSERT(((mode & TOK_EXCLREQ) == 0 ||   /* non blocking */
  249                 td->td_gd->gd_intr_nesting_level == 0 ||
  250                 panic_cpu_gd == mycpu),
  251                 ("Attempt to acquire token %p not already "
  252                 "held in hard code section", tok));
  253 
  254         if (mode & TOK_EXCLUSIVE) {
  255                 /*
  256                  * Attempt to get an exclusive token
  257                  */
  258                 for (;;) {
  259                         count = tok->t_count;
  260                         oref = tok->t_ref;      /* can be NULL */
  261                         cpu_ccfence();
  262                         if ((count & ~TOK_EXCLREQ) == 0) {
  263                                 /*
  264                                  * It is possible to get the exclusive bit.
  265                                  * We must clear TOK_EXCLREQ on successful
  266                                  * acquisition.
  267                                  */
  268                                 if (atomic_cmpset_long(&tok->t_count, count,
  269                                                        (count & ~TOK_EXCLREQ) |
  270                                                        TOK_EXCLUSIVE)) {
  271                                         KKASSERT(tok->t_ref == NULL);
  272                                         tok->t_ref = ref;
  273                                         return TRUE;
  274                                 }
  275                                 /* retry */
  276                         } else if ((count & TOK_EXCLUSIVE) &&
  277                                    oref >= &td->td_toks_base &&
  278                                    oref < td->td_toks_stop) {
  279                                 /*
  280                                  * Our thread already holds the exclusive
  281                                  * bit, we treat this tokref as a shared
  282                                  * token (sorta) to make the token release
  283                                  * code easier.
  284                                  *
  285                                  * NOTE: oref cannot race above if it
  286                                  *       happens to be ours, so we're good.
  287                                  *       But we must still have a stable
  288                                  *       variable for both parts of the
  289                                  *       comparison.
  290                                  *
  291                                  * NOTE: Since we already have an exclusive
  292                                  *       lock and don't need to check EXCLREQ
  293                                  *       we can just use an atomic_add here
  294                                  */
  295                                 atomic_add_long(&tok->t_count, TOK_INCR);
  296                                 ref->tr_count &= ~TOK_EXCLUSIVE;
  297                                 return TRUE;
  298                         } else if ((mode & TOK_EXCLREQ) &&
  299                                    (count & TOK_EXCLREQ) == 0) {
  300                                 /*
  301                                  * Unable to get the exclusive bit but being
  302                                  * asked to set the exclusive-request bit.
  303                                  * Since we are going to retry anyway just
  304                                  * set the bit unconditionally.
  305                                  */
  306                                 atomic_set_long(&tok->t_count, TOK_EXCLREQ);
  307                                 return FALSE;
  308                         } else {
  309                                 /*
  310                                  * Unable to get the exclusive bit and not
  311                                  * being asked to set the exclusive-request
  312                                  * (aka lwkt_trytoken()), or EXCLREQ was
  313                                  * already set.
  314                                  */
  315                                 cpu_pause();
  316                                 return FALSE;
  317                         }
  318                         /* retry */
  319                 }
  320         } else {
  321                 /*
  322                  * Attempt to get a shared token.  Note that TOK_EXCLREQ
  323                  * for shared tokens simply means the caller intends to
  324                  * block.  We never actually set the bit in tok->t_count.
  325                  */
  326                 for (;;) {
  327                         count = tok->t_count;
  328                         oref = tok->t_ref;      /* can be NULL */
  329                         cpu_ccfence();
  330                         if ((count & (TOK_EXCLUSIVE/*|TOK_EXCLREQ*/)) == 0) {
  331                                 /* XXX EXCLREQ should work */
  332                                 /*
  333                                  * It is possible to get the token shared.
  334                                  */
  335                                 if (atomic_cmpset_long(&tok->t_count, count,
  336                                                        count + TOK_INCR)) {
  337                                         return TRUE;
  338                                 }
  339                                 /* retry */
  340                         } else if ((count & TOK_EXCLUSIVE) &&
  341                                    oref >= &td->td_toks_base &&
  342                                    oref < td->td_toks_stop) {
  343                                 /*
  344                                  * We own the exclusive bit on the token so
  345                                  * we can in fact also get it shared.
  346                                  */
  347                                 atomic_add_long(&tok->t_count, TOK_INCR);
  348                                 return TRUE;
  349                         } else {
  350                                 /*
  351                                  * We failed to get the token shared
  352                                  */
  353                                 return FALSE;
  354                         }
  355                         /* retry */
  356                 }
  357         }
  358 }
  359 
  360 static __inline
  361 int
  362 _lwkt_trytokref_spin(lwkt_tokref_t ref, thread_t td, long mode)
  363 {
  364         int spin;
  365 
  366         if (_lwkt_trytokref(ref, td, mode)) {
  367 #ifdef DEBUG_LOCKS_LATENCY
  368                 long j;
  369                 for (j = tokens_add_latency; j > 0; --j)
  370                         cpu_ccfence();
  371 #endif
  372                 return TRUE;
  373         }
  374         for (spin = lwkt_token_spin; spin > 0; --spin) {
  375                 if (lwkt_token_delay)
  376                         tsc_delay(lwkt_token_delay);
  377                 else
  378                         cpu_pause();
  379                 if (_lwkt_trytokref(ref, td, mode)) {
  380 #ifdef DEBUG_LOCKS_LATENCY
  381                         long j;
  382                         for (j = tokens_add_latency; j > 0; --j)
  383                                 cpu_ccfence();
  384 #endif
  385                         return TRUE;
  386                 }
  387         }
  388         return FALSE;
  389 }
  390 
  391 /*
  392  * Release a token that we hold.
  393  */
  394 static __inline
  395 void
  396 _lwkt_reltokref(lwkt_tokref_t ref, thread_t td)
  397 {
  398         lwkt_token_t tok;
  399         long count;
  400 
  401         tok = ref->tr_tok;
  402         for (;;) {
  403                 count = tok->t_count;
  404                 cpu_ccfence();
  405                 if (tok->t_ref == ref) {
  406                         /*
  407                          * We are an exclusive holder.  We must clear tr_ref
  408                          * before we clear the TOK_EXCLUSIVE bit.  If we are
  409                          * unable to clear the bit we must restore
  410                          * tok->t_ref.
  411                          */
  412                         KKASSERT(count & TOK_EXCLUSIVE);
  413                         tok->t_ref = NULL;
  414                         if (atomic_cmpset_long(&tok->t_count, count,
  415                                                count & ~TOK_EXCLUSIVE)) {
  416                                 return;
  417                         }
  418                         tok->t_ref = ref;
  419                         /* retry */
  420                 } else {
  421                         /*
  422                          * We are a shared holder
  423                          */
  424                         KKASSERT(count & TOK_COUNTMASK);
  425                         if (atomic_cmpset_long(&tok->t_count, count,
  426                                                count - TOK_INCR)) {
  427                                 return;
  428                         }
  429                         /* retry */
  430                 }
  431                 /* retry */
  432         }
  433 }
  434 
  435 /*
  436  * Obtain all the tokens required by the specified thread on the current
  437  * cpu, return 0 on failure and non-zero on success.  If a failure occurs
  438  * any partially acquired tokens will be released prior to return.
  439  *
  440  * lwkt_getalltokens is called by the LWKT scheduler to re-acquire all
  441  * tokens that the thread had to release when it switched away.
  442  *
  443  * If spinning is non-zero this function acquires the tokens in a particular
  444  * order to deal with potential deadlocks.  We simply use address order for
  445  * the case.
  446  *
  447  * Called from a critical section.
  448  */
  449 int
  450 lwkt_getalltokens(thread_t td, int spinning)
  451 {
  452         lwkt_tokref_t scan;
  453         lwkt_token_t tok;
  454 
  455         if (spinning)
  456                 return(_lwkt_getalltokens_sorted(td));
  457 
  458         /*
  459          * Acquire tokens in forward order, assign or validate tok->t_ref.
  460          */
  461         for (scan = &td->td_toks_base; scan < td->td_toks_stop; ++scan) {
  462                 tok = scan->tr_tok;
  463                 for (;;) {
  464                         /*
  465                          * Only try really hard on the last token
  466                          */
  467                         if (scan == td->td_toks_stop - 1) {
  468                             if (_lwkt_trytokref_spin(scan, td, scan->tr_count))
  469                                     break;
  470                         } else {
  471                             if (_lwkt_trytokref(scan, td, scan->tr_count))
  472                                     break;
  473                         }
  474 
  475                         /*
  476                          * Otherwise we failed to acquire all the tokens.
  477                          * Release whatever we did get.
  478                          */
  479                         KASSERT(tok->t_desc,
  480                                 ("token %p is not initialized", tok));
  481                         strncpy(td->td_gd->gd_cnt.v_lock_name,
  482                                 tok->t_desc,
  483                                 sizeof(td->td_gd->gd_cnt.v_lock_name) - 1);
  484 
  485                         if (lwkt_sched_debug > 0) {
  486                                 --lwkt_sched_debug;
  487                                 kprintf("toka %p %s %s\n",
  488                                         tok, tok->t_desc, td->td_comm);
  489                         }
  490                         td->td_wmesg = tok->t_desc;
  491                         ++tok->t_collisions;
  492                         while (--scan >= &td->td_toks_base)
  493                                 _lwkt_reltokref(scan, td);
  494                         return(FALSE);
  495                 }
  496         }
  497         return (TRUE);
  498 }
  499 
  500 /*
  501  * Release all tokens owned by the specified thread on the current cpu.
  502  *
  503  * This code is really simple.  Even in cases where we own all the tokens
  504  * note that t_ref may not match the scan for recursively held tokens which
  505  * are held deeper in the stack, or for the case where a lwkt_getalltokens()
  506  * failed.
  507  *
  508  * Tokens are released in reverse order to reduce chasing race failures.
  509  * 
  510  * Called from a critical section.
  511  */
  512 void
  513 lwkt_relalltokens(thread_t td)
  514 {
  515         lwkt_tokref_t scan;
  516 
  517         /*
  518          * Weird order is to try to avoid a panic loop
  519          */
  520         if (td->td_toks_have) {
  521                 scan = td->td_toks_have;
  522                 td->td_toks_have = NULL;
  523         } else {
  524                 scan = td->td_toks_stop;
  525         }
  526         while (--scan >= &td->td_toks_base)
  527                 _lwkt_reltokref(scan, td);
  528 }
  529 
  530 /*
  531  * This is the decontention version of lwkt_getalltokens().  The tokens are
  532  * acquired in address-sorted order to deal with any deadlocks.  Ultimately
  533  * token failures will spin into the scheduler and get here.
  534  *
  535  * Called from critical section
  536  */
  537 static
  538 int
  539 _lwkt_getalltokens_sorted(thread_t td)
  540 {
  541         lwkt_tokref_t sort_array[LWKT_MAXTOKENS];
  542         lwkt_tokref_t scan;
  543         lwkt_token_t tok;
  544         int i;
  545         int j;
  546         int n;
  547 
  548         /*
  549          * Sort the token array.  Yah yah, I know this isn't fun.
  550          *
  551          * NOTE: Recursively acquired tokens are ordered the same as in the
  552          *       td_toks_array so we can always get the earliest one first.
  553          */
  554         i = 0;
  555         scan = &td->td_toks_base;
  556         while (scan < td->td_toks_stop) {
  557                 for (j = 0; j < i; ++j) {
  558                         if (scan->tr_tok < sort_array[j]->tr_tok)
  559                                 break;
  560                 }
  561                 if (j != i) {
  562                         bcopy(sort_array + j, sort_array + j + 1,
  563                               (i - j) * sizeof(lwkt_tokref_t));
  564                 }
  565                 sort_array[j] = scan;
  566                 ++scan;
  567                 ++i;
  568         }
  569         n = i;
  570 
  571         /*
  572          * Acquire tokens in forward order, assign or validate tok->t_ref.
  573          */
  574         for (i = 0; i < n; ++i) {
  575                 scan = sort_array[i];
  576                 tok = scan->tr_tok;
  577                 for (;;) {
  578                         /*
  579                          * Only try really hard on the last token
  580                          */
  581                         if (scan == td->td_toks_stop - 1) {
  582                             if (_lwkt_trytokref_spin(scan, td, scan->tr_count))
  583                                     break;
  584                         } else {
  585                             if (_lwkt_trytokref(scan, td, scan->tr_count))
  586                                     break;
  587                         }
  588 
  589                         /*
  590                          * Otherwise we failed to acquire all the tokens.
  591                          * Release whatever we did get.
  592                          */
  593                         if (lwkt_sched_debug > 0) {
  594                                 --lwkt_sched_debug;
  595                                 kprintf("tokb %p %s %s\n",
  596                                         tok, tok->t_desc, td->td_comm);
  597                         }
  598                         td->td_wmesg = tok->t_desc;
  599                         ++tok->t_collisions;
  600                         while (--i >= 0) {
  601                                 scan = sort_array[i];
  602                                 _lwkt_reltokref(scan, td);
  603                         }
  604                         return(FALSE);
  605                 }
  606         }
  607 
  608         /*
  609          * We were successful, there is no need for another core to signal
  610          * us.
  611          */
  612         return (TRUE);
  613 }
  614 
  615 /*
  616  * Get a serializing token.  This routine can block.
  617  */
  618 void
  619 lwkt_gettoken(lwkt_token_t tok)
  620 {
  621         thread_t td = curthread;
  622         lwkt_tokref_t ref;
  623 
  624         ref = td->td_toks_stop;
  625         KKASSERT(ref < &td->td_toks_end);
  626         ++td->td_toks_stop;
  627         cpu_ccfence();
  628         _lwkt_tokref_init(ref, tok, td, TOK_EXCLUSIVE|TOK_EXCLREQ);
  629 
  630 #ifdef DEBUG_LOCKS
  631         /*
  632          * Taking an exclusive token after holding it shared will
  633          * livelock. Scan for that case and assert.
  634          */
  635         lwkt_tokref_t tk;
  636         int found = 0;
  637         for (tk = &td->td_toks_base; tk < ref; tk++) {
  638                 if (tk->tr_tok != tok)
  639                         continue;
  640                 
  641                 found++;
  642                 if (tk->tr_count & TOK_EXCLUSIVE) 
  643                         goto good;
  644         }
  645         /* We found only shared instances of this token if found >0 here */
  646         KASSERT((found == 0), ("Token %p s/x livelock", tok));
  647 good:
  648 #endif
  649 
  650         if (_lwkt_trytokref_spin(ref, td, TOK_EXCLUSIVE|TOK_EXCLREQ))
  651                 return;
  652 
  653         /*
  654          * Give up running if we can't acquire the token right now.
  655          *
  656          * Since the tokref is already active the scheduler now
  657          * takes care of acquisition, so we need only call
  658          * lwkt_switch().
  659          *
  660          * Since we failed this was not a recursive token so upon
  661          * return tr_tok->t_ref should be assigned to this specific
  662          * ref.
  663          */
  664         td->td_wmesg = tok->t_desc;
  665         ++tok->t_collisions;
  666         logtoken(fail, ref);
  667         td->td_toks_have = td->td_toks_stop - 1;
  668 
  669         if (tokens_debug_output > 0) {
  670                 --tokens_debug_output;
  671                 spin_lock(&tok_debug_spin);
  672                 kprintf("Excl Token thread %p %s %s\n",
  673                         td, tok->t_desc, td->td_comm);
  674                 print_backtrace(6);
  675                 kprintf("\n");
  676                 spin_unlock(&tok_debug_spin);
  677         }
  678 
  679         lwkt_switch();
  680         logtoken(succ, ref);
  681         KKASSERT(tok->t_ref == ref);
  682 }
  683 
  684 /*
  685  * Similar to gettoken but we acquire a shared token instead of an exclusive
  686  * token.
  687  */
  688 void
  689 lwkt_gettoken_shared(lwkt_token_t tok)
  690 {
  691         thread_t td = curthread;
  692         lwkt_tokref_t ref;
  693 
  694         ref = td->td_toks_stop;
  695         KKASSERT(ref < &td->td_toks_end);
  696         ++td->td_toks_stop;
  697         cpu_ccfence();
  698         _lwkt_tokref_init(ref, tok, td, TOK_EXCLREQ);
  699 
  700 #ifdef DEBUG_LOCKS
  701         /*
  702          * Taking a pool token in shared mode is a bad idea; other
  703          * addresses deeper in the call stack may hash to the same pool
  704          * token and you may end up with an exclusive-shared livelock.
  705          * Warn in this condition.
  706          */
  707         if ((tok >= &pool_tokens[0]) &&
  708             (tok < &pool_tokens[LWKT_NUM_POOL_TOKENS]))
  709                 kprintf("Warning! Taking pool token %p in shared mode\n", tok);
  710 #endif
  711 
  712 
  713         if (_lwkt_trytokref_spin(ref, td, TOK_EXCLREQ))
  714                 return;
  715 
  716         /*
  717          * Give up running if we can't acquire the token right now.
  718          *
  719          * Since the tokref is already active the scheduler now
  720          * takes care of acquisition, so we need only call
  721          * lwkt_switch().
  722          *
  723          * Since we failed this was not a recursive token so upon
  724          * return tr_tok->t_ref should be assigned to this specific
  725          * ref.
  726          */
  727         td->td_wmesg = tok->t_desc;
  728         ++tok->t_collisions;
  729         logtoken(fail, ref);
  730         td->td_toks_have = td->td_toks_stop - 1;
  731 
  732         if (tokens_debug_output > 0) {
  733                 --tokens_debug_output;
  734                 spin_lock(&tok_debug_spin);
  735                 kprintf("Shar Token thread %p %s %s\n",
  736                         td, tok->t_desc, td->td_comm);
  737                 print_backtrace(6);
  738                 kprintf("\n");
  739                 spin_unlock(&tok_debug_spin);
  740         }
  741 
  742         lwkt_switch();
  743         logtoken(succ, ref);
  744 }
  745 
  746 /*
  747  * Attempt to acquire a token, return TRUE on success, FALSE on failure.
  748  *
  749  * We setup the tokref in case we actually get the token (if we switch later
  750  * it becomes mandatory so we set TOK_EXCLREQ), but we call trytokref without
  751  * TOK_EXCLREQ in case we fail.
  752  */
  753 int
  754 lwkt_trytoken(lwkt_token_t tok)
  755 {
  756         thread_t td = curthread;
  757         lwkt_tokref_t ref;
  758 
  759         ref = td->td_toks_stop;
  760         KKASSERT(ref < &td->td_toks_end);
  761         ++td->td_toks_stop;
  762         cpu_ccfence();
  763         _lwkt_tokref_init(ref, tok, td, TOK_EXCLUSIVE|TOK_EXCLREQ);
  764 
  765         if (_lwkt_trytokref(ref, td, TOK_EXCLUSIVE))
  766                 return TRUE;
  767 
  768         /*
  769          * Failed, unpend the request
  770          */
  771         cpu_ccfence();
  772         --td->td_toks_stop;
  773         ++tok->t_collisions;
  774         return FALSE;
  775 }
  776 
  777 
  778 void
  779 lwkt_gettoken_hard(lwkt_token_t tok)
  780 {
  781         lwkt_gettoken(tok);
  782         crit_enter_hard();
  783 }
  784 
  785 lwkt_token_t
  786 lwkt_getpooltoken(void *ptr)
  787 {
  788         lwkt_token_t tok;
  789 
  790         tok = _lwkt_token_pool_lookup(ptr);
  791         lwkt_gettoken(tok);
  792         return (tok);
  793 }
  794 
  795 /*
  796  * Release a serializing token.
  797  *
  798  * WARNING!  All tokens must be released in reverse order.  This will be
  799  *           asserted.
  800  */
  801 void
  802 lwkt_reltoken(lwkt_token_t tok)
  803 {
  804         thread_t td = curthread;
  805         lwkt_tokref_t ref;
  806 
  807         /*
  808          * Remove ref from thread token list and assert that it matches
  809          * the token passed in.  Tokens must be released in reverse order.
  810          */
  811         ref = td->td_toks_stop - 1;
  812         KKASSERT(ref >= &td->td_toks_base && ref->tr_tok == tok);
  813         _lwkt_reltokref(ref, td);
  814         cpu_sfence();
  815         td->td_toks_stop = ref;
  816 }
  817 
  818 void
  819 lwkt_reltoken_hard(lwkt_token_t tok)
  820 {
  821         lwkt_reltoken(tok);
  822         crit_exit_hard();
  823 }
  824 
  825 /*
  826  * It is faster for users of lwkt_getpooltoken() to use the returned
  827  * token and just call lwkt_reltoken(), but for convenience we provide
  828  * this function which looks the token up based on the ident.
  829  */
  830 void
  831 lwkt_relpooltoken(void *ptr)
  832 {
  833         lwkt_token_t tok = _lwkt_token_pool_lookup(ptr);
  834         lwkt_reltoken(tok);
  835 }
  836 
  837 /*
  838  * Return a count of the number of token refs the thread has to the
  839  * specified token, whether it currently owns the token or not.
  840  */
  841 int
  842 lwkt_cnttoken(lwkt_token_t tok, thread_t td)
  843 {
  844         lwkt_tokref_t scan;
  845         int count = 0;
  846 
  847         for (scan = &td->td_toks_base; scan < td->td_toks_stop; ++scan) {
  848                 if (scan->tr_tok == tok)
  849                         ++count;
  850         }
  851         return(count);
  852 }
  853 
  854 /*
  855  * Pool tokens are used to provide a type-stable serializing token
  856  * pointer that does not race against disappearing data structures.
  857  *
  858  * This routine is called in early boot just after we setup the BSP's
  859  * globaldata structure.
  860  */
  861 void
  862 lwkt_token_pool_init(void)
  863 {
  864         int i;
  865 
  866         for (i = 0; i < LWKT_NUM_POOL_TOKENS; ++i)
  867                 lwkt_token_init(&pool_tokens[i], "pool");
  868 }
  869 
  870 lwkt_token_t
  871 lwkt_token_pool_lookup(void *ptr)
  872 {
  873         return (_lwkt_token_pool_lookup(ptr));
  874 }
  875 
  876 /*
  877  * Initialize a token.  
  878  */
  879 void
  880 lwkt_token_init(lwkt_token_t tok, const char *desc)
  881 {
  882         tok->t_count = 0;
  883         tok->t_ref = NULL;
  884         tok->t_collisions = 0;
  885         tok->t_desc = desc;
  886 }
  887 
  888 void
  889 lwkt_token_uninit(lwkt_token_t tok)
  890 {
  891         /* empty */
  892 }
  893 
  894 /*
  895  * Exchange the two most recent tokens on the tokref stack.  This allows
  896  * you to release a token out of order.
  897  *
  898  * We have to be careful about the case where the top two tokens are
  899  * the same token.  In this case tok->t_ref will point to the deeper
  900  * ref and must remain pointing to the deeper ref.  If we were to swap
  901  * it the first release would clear the token even though a second
  902  * ref is still present.
  903  *
  904  * Only exclusively held tokens contain a reference to the tokref which
  905  * has to be flipped along with the swap.
  906  */
  907 void
  908 lwkt_token_swap(void)
  909 {
  910         lwkt_tokref_t ref1, ref2;
  911         lwkt_token_t tok1, tok2;
  912         long count1, count2;
  913         thread_t td = curthread;
  914 
  915         crit_enter();
  916 
  917         ref1 = td->td_toks_stop - 1;
  918         ref2 = td->td_toks_stop - 2;
  919         KKASSERT(ref1 >= &td->td_toks_base);
  920         KKASSERT(ref2 >= &td->td_toks_base);
  921 
  922         tok1 = ref1->tr_tok;
  923         tok2 = ref2->tr_tok;
  924         count1 = ref1->tr_count;
  925         count2 = ref2->tr_count;
  926 
  927         if (tok1 != tok2) {
  928                 ref1->tr_tok = tok2;
  929                 ref1->tr_count = count2;
  930                 ref2->tr_tok = tok1;
  931                 ref2->tr_count = count1;
  932                 if (tok1->t_ref == ref1)
  933                         tok1->t_ref = ref2;
  934                 if (tok2->t_ref == ref2)
  935                         tok2->t_ref = ref1;
  936         }
  937 
  938         crit_exit();
  939 }
  940 
  941 #ifdef DDB
  942 DB_SHOW_COMMAND(tokens, db_tok_all)
  943 {
  944         struct lwkt_token *tok, **ptr;
  945         struct lwkt_token *toklist[16] = {
  946                 &mp_token,
  947                 &pmap_token,
  948                 &dev_token,
  949                 &vm_token,
  950                 &vmspace_token,
  951                 &kvm_token,
  952                 &sigio_token,
  953                 &tty_token,
  954                 &vnode_token,
  955                 NULL
  956         };
  957 
  958         ptr = toklist;
  959         for (tok = *ptr; tok; tok = *(++ptr)) {
  960                 db_printf("tok=%p tr_owner=%p t_colissions=%ld t_desc=%s\n", tok,
  961                     (tok->t_ref ? tok->t_ref->tr_owner : NULL),
  962                     tok->t_collisions, tok->t_desc);
  963         }
  964 }
  965 #endif /* DDB */

Cache object: 628bb898454089f17bfb39408768d815


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.