The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/sys/thread2.h

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*
    2  * SYS/THREAD2.H
    3  *
    4  * Implements inline procedure support for the LWKT subsystem. 
    5  *
    6  * Generally speaking these routines only operate on threads associated
    7  * with the current cpu.  For example, a higher priority thread pending
    8  * on a different cpu will not be immediately scheduled by a yield() on
    9  * this cpu.
   10  */
   11 
   12 #ifndef _SYS_THREAD2_H_
   13 #define _SYS_THREAD2_H_
   14 
   15 #ifndef _KERNEL
   16 
   17 #error "This file should not be included by userland programs."
   18 
   19 #else
   20 
   21 /*
   22  * Userland will have its own globaldata which it includes prior to this.
   23  */
   24 #ifndef _SYS_SYSTM_H_
   25 #include <sys/systm.h>
   26 #endif
   27 #ifndef _SYS_GLOBALDATA_H_
   28 #include <sys/globaldata.h>
   29 #endif
   30 #include <machine/cpufunc.h>
   31 
   32 /*
   33  * Is a token held either by the specified thread or held shared?
   34  *
   35  * We can't inexpensively validate the thread for a shared token
   36  * without iterating td->td_toks, so this isn't a perfect test.
   37  */
   38 static __inline int
   39 _lwkt_token_held_any(lwkt_token_t tok, thread_t td)
   40 {
   41         long count = tok->t_count;
   42 
   43         cpu_ccfence();
   44         if (tok->t_ref >= &td->td_toks_base && tok->t_ref < td->td_toks_stop)
   45                 return TRUE;
   46         if ((count & TOK_EXCLUSIVE) == 0 &&
   47             (count & ~(TOK_EXCLUSIVE|TOK_EXCLREQ))) {
   48                 return TRUE;
   49         }
   50         return FALSE;
   51 }
   52 
   53 /*
   54  * Is a token held by the specified thread?
   55  */
   56 static __inline int
   57 _lwkt_token_held_excl(lwkt_token_t tok, thread_t td)
   58 {
   59         return ((tok->t_ref >= &td->td_toks_base &&
   60                  tok->t_ref < td->td_toks_stop));
   61 }
   62 
   63 /*
   64  * Critical section debugging
   65  */
   66 #ifdef DEBUG_CRIT_SECTIONS
   67 #define __DEBUG_CRIT_ARG__              const char *id
   68 #define __DEBUG_CRIT_ADD_ARG__          , const char *id
   69 #define __DEBUG_CRIT_PASS_ARG__         , id
   70 #define __DEBUG_CRIT_ENTER(td)          _debug_crit_enter((td), id)
   71 #define __DEBUG_CRIT_EXIT(td)           _debug_crit_exit((td), id)
   72 #define crit_enter()                    _crit_enter(mycpu, __func__)
   73 #define crit_enter_id(id)               _crit_enter(mycpu, id)
   74 #define crit_enter_gd(curgd)            _crit_enter((curgd), __func__)
   75 #define crit_enter_quick(curtd)         _crit_enter_quick((curtd), __func__)
   76 #define crit_enter_hard()               _crit_enter_hard(mycpu, __func__)
   77 #define crit_enter_hard_gd(curgd)       _crit_enter_hard((curgd), __func__)
   78 #define crit_exit()                     _crit_exit(mycpu, __func__)
   79 #define crit_exit_id(id)                _crit_exit(mycpu, id)
   80 #define crit_exit_gd(curgd)             _crit_exit((curgd), __func__)
   81 #define crit_exit_quick(curtd)          _crit_exit_quick((curtd), __func__)
   82 #define crit_exit_hard()                _crit_exit_hard(mycpu, __func__)
   83 #define crit_exit_hard_gd(curgd)        _crit_exit_hard((curgd), __func__)
   84 #define crit_exit_noyield(curtd)        _crit_exit_noyield((curtd),__func__)
   85 #else
   86 #define __DEBUG_CRIT_ARG__              void
   87 #define __DEBUG_CRIT_ADD_ARG__
   88 #define __DEBUG_CRIT_PASS_ARG__
   89 #define __DEBUG_CRIT_ENTER(td)
   90 #define __DEBUG_CRIT_EXIT(td)
   91 #define crit_enter()                    _crit_enter(mycpu)
   92 #define crit_enter_id(id)               _crit_enter(mycpu)
   93 #define crit_enter_gd(curgd)            _crit_enter((curgd))
   94 #define crit_enter_quick(curtd)         _crit_enter_quick((curtd))
   95 #define crit_enter_hard()               _crit_enter_hard(mycpu)
   96 #define crit_enter_hard_gd(curgd)       _crit_enter_hard((curgd))
   97 #define crit_exit()                     crit_exit_wrapper()
   98 #define crit_exit_id(id)                _crit_exit(mycpu)
   99 #define crit_exit_gd(curgd)             _crit_exit((curgd))
  100 #define crit_exit_quick(curtd)          _crit_exit_quick((curtd))
  101 #define crit_exit_hard()                _crit_exit_hard(mycpu)
  102 #define crit_exit_hard_gd(curgd)        _crit_exit_hard((curgd))
  103 #define crit_exit_noyield(curtd)        _crit_exit_noyield((curtd))
  104 #endif
  105 
  106 extern void crit_exit_wrapper(__DEBUG_CRIT_ARG__);
  107 
  108 /*
  109  * Track crit_enter()/crit_exit() pairs and warn on mismatches.
  110  */
  111 #ifdef DEBUG_CRIT_SECTIONS
  112 
  113 static __inline void
  114 _debug_crit_enter(thread_t td, const char *id)
  115 {
  116     int wi = td->td_crit_debug_index;
  117 
  118     td->td_crit_debug_array[wi & CRIT_DEBUG_ARRAY_MASK] = id;
  119     ++td->td_crit_debug_index;
  120 }
  121 
  122 static __inline void
  123 _debug_crit_exit(thread_t td, const char *id)
  124 {
  125     const char *gid;
  126     int wi;
  127 
  128     wi = td->td_crit_debug_index - 1;
  129     if ((gid = td->td_crit_debug_array[wi & CRIT_DEBUG_ARRAY_MASK]) != id) {
  130         if (td->td_in_crit_report == 0) {
  131             td->td_in_crit_report = 1;
  132             kprintf("crit_exit(%s) expected id %s\n", id, gid);
  133             td->td_in_crit_report = 0;
  134         }
  135     }
  136     --td->td_crit_debug_index;
  137 }
  138 
  139 #endif
  140 
  141 /*
  142  * Critical sections prevent preemption, but allowing explicit blocking
  143  * and thread switching.  Any interrupt occuring while in a critical
  144  * section is made pending and returns immediately.  Interrupts are not
  145  * physically disabled.
  146  *
  147  * Hard critical sections prevent preemption and disallow any blocking
  148  * or thread switching, and in addition will assert on any blockable
  149  * operation (acquire token not already held, lockmgr, mutex ops, or
  150  * splz).  Spinlocks can still be used in hard sections.
  151  *
  152  * All critical section routines only operate on the current thread.
  153  * Passed gd or td arguments are simply optimizations when mycpu or
  154  * curthread is already available to the caller.
  155  */
  156 
  157 /*
  158  * crit_enter
  159  */
  160 static __inline void
  161 _crit_enter_quick(thread_t td __DEBUG_CRIT_ADD_ARG__)
  162 {
  163     ++td->td_critcount;
  164     __DEBUG_CRIT_ENTER(td);
  165     cpu_ccfence();
  166 }
  167 
  168 static __inline void
  169 _crit_enter(globaldata_t gd __DEBUG_CRIT_ADD_ARG__)
  170 {
  171     _crit_enter_quick(gd->gd_curthread __DEBUG_CRIT_PASS_ARG__);
  172 }
  173 
  174 static __inline void
  175 _crit_enter_hard(globaldata_t gd __DEBUG_CRIT_ADD_ARG__)
  176 {
  177     _crit_enter_quick(gd->gd_curthread __DEBUG_CRIT_PASS_ARG__);
  178     ++gd->gd_intr_nesting_level;
  179 }
  180 
  181 
  182 /*
  183  * crit_exit*()
  184  *
  185  * NOTE: Conditionalizing just gd_reqflags, a case which is virtually
  186  *       never true regardless of crit_count, should result in 100%
  187  *       optimal code execution.  We don't check crit_count because
  188  *       it just bloats the inline and does not improve performance.
  189  *
  190  * NOTE: This can produce a considerable amount of code despite the
  191  *       relatively few lines of code so the non-debug case typically
  192  *       just wraps it in a real function, crit_exit_wrapper().
  193  */
  194 static __inline void
  195 _crit_exit_noyield(thread_t td __DEBUG_CRIT_ADD_ARG__)
  196 {
  197     __DEBUG_CRIT_EXIT(td);
  198     --td->td_critcount;
  199 #ifdef INVARIANTS
  200     if (__predict_false(td->td_critcount < 0))
  201         crit_panic();
  202 #endif
  203     cpu_ccfence();      /* prevent compiler reordering */
  204 }
  205 
  206 static __inline void
  207 _crit_exit_quick(thread_t td __DEBUG_CRIT_ADD_ARG__)
  208 {
  209     _crit_exit_noyield(td __DEBUG_CRIT_PASS_ARG__);
  210     if (__predict_false(td->td_gd->gd_reqflags & RQF_IDLECHECK_MASK))
  211         lwkt_maybe_splz(td);
  212 }
  213 
  214 static __inline void
  215 _crit_exit(globaldata_t gd __DEBUG_CRIT_ADD_ARG__)
  216 {
  217     _crit_exit_quick(gd->gd_curthread __DEBUG_CRIT_PASS_ARG__);
  218 }
  219 
  220 static __inline void
  221 _crit_exit_hard(globaldata_t gd __DEBUG_CRIT_ADD_ARG__)
  222 {
  223     --gd->gd_intr_nesting_level;
  224     _crit_exit_quick(gd->gd_curthread __DEBUG_CRIT_PASS_ARG__);
  225 }
  226 
  227 static __inline int
  228 crit_test(thread_t td)
  229 {
  230     return(td->td_critcount);
  231 }
  232 
  233 /*
  234  * Return whether any threads are runnable.
  235  */
  236 static __inline int
  237 lwkt_runnable(void)
  238 {
  239     return (TAILQ_FIRST(&mycpu->gd_tdrunq) != NULL);
  240 }
  241 
  242 static __inline int
  243 lwkt_getpri(thread_t td)
  244 {
  245     return(td->td_pri);
  246 }
  247 
  248 static __inline int
  249 lwkt_getpri_self(void)
  250 {
  251     return(lwkt_getpri(curthread));
  252 }
  253 
  254 /*
  255  * Reduce our priority in preparation for a return to userland.  If
  256  * our passive release function was still in place, our priority was
  257  * never raised and does not need to be reduced.
  258  *
  259  * See also lwkt_passive_release() and platform/blah/trap.c
  260  */
  261 static __inline void
  262 lwkt_passive_recover(thread_t td)
  263 {
  264 #ifndef NO_LWKT_SPLIT_USERPRI
  265     if (td->td_release == NULL)
  266         lwkt_setpri_self(TDPRI_USER_NORM);
  267     td->td_release = NULL;
  268 #endif
  269 }
  270 
  271 /*
  272  * cpusync support
  273  */
  274 static __inline void
  275 lwkt_cpusync_init(lwkt_cpusync_t cs, cpumask_t mask,
  276                   cpusync_func_t func, void *data)
  277 {
  278         cs->cs_mask = mask;
  279         /* cs->cs_mack = 0; handled by _interlock */
  280         cs->cs_func = func;
  281         cs->cs_data = data;
  282 }
  283 
  284 /*
  285  * IPIQ messaging wrappers.  IPIQ remote functions are passed three arguments:
  286  * a void * pointer, an integer, and a pointer to the trap frame (or NULL if
  287  * the trap frame is not known).  However, we wish to provide opaque 
  288  * interfaces for simpler callbacks... the basic IPI messaging function as
  289  * used by the kernel takes a single argument.
  290  */
  291 static __inline int
  292 lwkt_send_ipiq(globaldata_t target, ipifunc1_t func, void *arg)
  293 {
  294     return(lwkt_send_ipiq3(target, (ipifunc3_t)func, arg, 0));
  295 }
  296 
  297 static __inline int
  298 lwkt_send_ipiq2(globaldata_t target, ipifunc2_t func, void *arg1, int arg2)
  299 {
  300     return(lwkt_send_ipiq3(target, (ipifunc3_t)func, arg1, arg2));
  301 }
  302 
  303 static __inline int
  304 lwkt_send_ipiq_mask(cpumask_t mask, ipifunc1_t func, void *arg)
  305 {
  306     return(lwkt_send_ipiq3_mask(mask, (ipifunc3_t)func, arg, 0));
  307 }
  308 
  309 static __inline int
  310 lwkt_send_ipiq2_mask(cpumask_t mask, ipifunc2_t func, void *arg1, int arg2)
  311 {
  312     return(lwkt_send_ipiq3_mask(mask, (ipifunc3_t)func, arg1, arg2));
  313 }
  314 
  315 static __inline int
  316 lwkt_send_ipiq_nowait(globaldata_t target, ipifunc1_t func, void *arg)
  317 {
  318     return(lwkt_send_ipiq3_nowait(target, (ipifunc3_t)func, arg, 0));
  319 }
  320 
  321 static __inline int
  322 lwkt_send_ipiq2_nowait(globaldata_t target, ipifunc2_t func, 
  323                        void *arg1, int arg2)
  324 {
  325     return(lwkt_send_ipiq3_nowait(target, (ipifunc3_t)func, arg1, arg2));
  326 }
  327 
  328 static __inline int
  329 lwkt_send_ipiq_passive(globaldata_t target, ipifunc1_t func, void *arg)
  330 {
  331     return(lwkt_send_ipiq3_passive(target, (ipifunc3_t)func, arg, 0));
  332 }
  333 
  334 static __inline int
  335 lwkt_send_ipiq2_passive(globaldata_t target, ipifunc2_t func, 
  336                        void *arg1, int arg2)
  337 {
  338     return(lwkt_send_ipiq3_passive(target, (ipifunc3_t)func, arg1, arg2));
  339 }
  340 
  341 static __inline int
  342 lwkt_send_ipiq_bycpu(int dcpu, ipifunc1_t func, void *arg)
  343 {
  344     return(lwkt_send_ipiq3_bycpu(dcpu, (ipifunc3_t)func, arg, 0));
  345 }
  346 
  347 static __inline int
  348 lwkt_send_ipiq2_bycpu(int dcpu, ipifunc2_t func, void *arg1, int arg2)
  349 {
  350     return(lwkt_send_ipiq3_bycpu(dcpu, (ipifunc3_t)func, arg1, arg2));
  351 }
  352 
  353 #endif  /* _KERNEL */
  354 #endif  /* _SYS_THREAD2_H_ */
  355 

Cache object: d21fb910f6e66348d63b39a88f7bdeeb


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.