The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/sched_policy/mk_ts.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /* 
    2  * Mach Operating System
    3  * Copyright (c) 1993-1987 Carnegie Mellon University
    4  * All Rights Reserved.
    5  * 
    6  * Permission to use, copy, modify and distribute this software and its
    7  * documentation is hereby granted, provided that both the copyright
    8  * notice and this permission notice appear in all copies of the
    9  * software, derivative works or modified versions, and any portions
   10  * thereof, and that both notices appear in supporting documentation.
   11  * 
   12  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
   13  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
   14  * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
   15  * 
   16  * Carnegie Mellon requests users of this software to return to
   17  * 
   18  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
   19  *  School of Computer Science
   20  *  Carnegie Mellon University
   21  *  Pittsburgh PA 15213-3890
   22  * 
   23  * any improvements or extensions that they make and grant Carnegie Mellon
   24  * the rights to redistribute these changes.
   25  */
   26 /* 
   27  * HISTORY
   28  * $Log:        mk_ts.c,v $
   29  * Revision 2.2  93/11/17  18:37:33  dbg
   30  *      Break up thread lock.
   31  *      [93/05/26            dbg]
   32  * 
   33  *      Add per-policy scheduling parameters to thread and run queue.
   34  *      [93/05/10            dbg]
   35  * 
   36  *      Moved common operations to kern/run_queues.c.
   37  *      [93/04/10            dbg]
   38  * 
   39  *      Converted to per-policy run queue structure.
   40  *      [93/04/06            dbg]
   41  * 
   42  *      Always enable fixed-priority threads.
   43  *      [93/03/27            dbg]
   44  * 
   45  *      Merged back into microkernel mainline.  Simplfied idle-processor
   46  *      logic for single CPU.
   47  * 
   48  *      Added policy_init()
   49  *      [92/06/01       savage]
   50  *      Copied from kern/sched_prim.c
   51  *      [92/02/01       savage]
   52  * 
   53  */
   54 
   55 #include <mach_host.h>
   56 #include <mach_io_binding.h>
   57 #include <stat_time.h>
   58 
   59 #include <mach/boolean.h>
   60 
   61 #include <kern/macro_help.h>
   62 #include <kern/ast.h>
   63 #include <kern/kalloc.h>
   64 #include <kern/kern_io.h>
   65 #include <kern/processor.h>
   66 #include <kern/run_queues.h>
   67 #include <kern/sched.h>                 /* sched_tick */
   68 #include <kern/sched_policy.h>
   69 #include <kern/thread.h>
   70 
   71 #include <sched_policy/standard.h>
   72 
   73 #include <machine/machspl.h>
   74 
   75 /*
   76  *      Timesharing policy.
   77  */
   78 
   79 /*
   80  *      Uses 32 run queues, with a low marker.
   81  */
   82 #define MAX_PRIORITY    31              /* priorities 0..31 */
   83 
   84 #define NRQS            (MAX_PRIORITY+1)
   85 struct ts_run_queue {
   86         struct run_queue rq;            /* common structure */
   87         queue_head_t    ts_queue[NRQS];
   88         int             ts_low;         /* hint */
   89         int             ts_max_priority;
   90                                         /* limits for policy */
   91 };
   92 typedef struct ts_run_queue *   ts_run_queue_t;
   93 
   94 #define ts_runq(rq)     ((struct ts_run_queue *)(rq))
   95 #define ts_count        rq.rq_count
   96 
   97 /*
   98  *      Per-thread scheduling fields:
   99  */
  100 struct ts_sched_data {
  101         int     max_priority;           /* maximum priority */
  102         int     base_priority;          /* base priority */
  103         int     sched_priority;         /* current priority */
  104 };
  105 
  106 #define ts_sched(th)    ((struct ts_sched_data *)&(th)->sched_data)
  107 
  108 extern struct sched_policy      ts_sched_policy;        /* forward */
  109 
  110 /*
  111  *      Choose thread.
  112  */
  113 thread_t
  114 ts_thread_dequeue(
  115         run_queue_t     runq)
  116 {
  117         ts_run_queue_t  rq = ts_runq(runq);
  118         int             low;
  119         queue_t         q;
  120         queue_entry_t   elt;
  121         processor_t     processor;
  122 
  123         assert(rq->ts_count > 0);
  124 
  125         for (low = rq->ts_low, q = &rq->ts_queue[low];
  126               low < NRQS;
  127               low++, q++)
  128         {
  129             if (!queue_empty(q)) {
  130                 dequeue_head_macro(q, elt);
  131                 rq->ts_count--;
  132                 rq->ts_low = low;
  133 
  134                 processor = current_processor();
  135                 processor->quantum = processor->processor_set->set_quantum;
  136                 processor->first_quantum = TRUE;
  137 
  138                 return (thread_t) elt;
  139             }
  140         }
  141         panic("ts_choose_thread");
  142 }
  143 
  144 /*
  145  *      Put a thread onto a run queue.
  146  *      Return whether it can preempt the currently
  147  *      running thread.
  148  */
  149 boolean_t ts_thread_enqueue(
  150         run_queue_t     runq,
  151         thread_t        th,
  152         boolean_t       may_preempt)
  153 {
  154         register ts_run_queue_t rq;
  155         register unsigned int   whichq;
  156         register queue_t        q;
  157 
  158         whichq = ts_sched(th)->sched_priority;
  159         if (whichq >= NRQS) {
  160             printf("ts_thread_setrun: pri too high (%d)\n", whichq);
  161             whichq = NRQS - 1;
  162         }
  163 
  164         rq = ts_runq(runq);
  165 
  166         q = &rq->ts_queue[whichq];
  167         enqueue_tail_macro(q, (queue_entry_t) th);
  168 
  169         if (whichq < rq->ts_low || rq->ts_count == 0)
  170             rq->ts_low = whichq;        /* minimize */
  171         rq->ts_count++;
  172 
  173         if (!may_preempt)
  174             return FALSE;
  175 
  176         return ts_sched(th)->sched_priority
  177              <= ts_sched(current_thread())->sched_priority;
  178 }
  179 
  180 void
  181 ts_thread_remqueue(
  182         run_queue_t     runq,
  183         thread_t        thread)
  184 {
  185         ts_run_queue_t  rq = ts_runq(runq);
  186                 
  187         remqueue(&rq->ts_queue[0], (queue_entry_t) thread);
  188         rq->ts_count--;
  189 }
  190 
  191 boolean_t ts_csw_needed(
  192         run_queue_t     runq,
  193         thread_t        thread)
  194 {
  195         ts_run_queue_t  rq = ts_runq(runq);
  196         queue_t         q;
  197 #if     NCPUS > 1
  198         processor_set_t cur_pset = current_processor_set();
  199 #endif
  200 
  201         if (current_processor()->first_quantum)
  202             return FALSE;
  203 
  204         /*
  205          *      Check whether low hint is accurate.
  206          */
  207         q = rq->ts_queue + *(volatile int *)&rq->ts_low;
  208         if (queue_empty(q)) {
  209             register int i;
  210 
  211 #if     NCPUS > 1
  212             /* we know that 'rq' belongs to the current processor set */
  213             simple_lock(&cur_pset->runq.lock);
  214 #endif
  215             q = rq->ts_queue + rq->ts_low;
  216             if (rq->ts_count > 0) {
  217                 for (i = rq->ts_low; i < NRQS; i++) {
  218                     if (!queue_empty(q))
  219                         break;
  220                     q++;
  221                 }
  222                 rq->ts_low = i;
  223             }
  224 #if     NCPUS > 1
  225             simple_unlock(&cur_pset->runq.lock);
  226 #endif
  227         }
  228         return rq->ts_low <= ts_sched(thread)->sched_priority;
  229 }
  230 
  231 /*
  232  *      Set scheduling limit value for processor set.
  233  */
  234 kern_return_t
  235 ts_runq_set_limit(
  236         run_queue_t     runq,
  237         policy_param_t  limit,
  238         natural_t       count)
  239 {
  240         int     max_priority;
  241 
  242         if (count == 0) {
  243             /*
  244              *  Use default value for max priority.
  245              */
  246             max_priority = TS_BASEPRI_USER;
  247         }
  248         else {
  249             if (count < POLICY_PARAM_TIMESHARE_COUNT)
  250                 return KERN_INVALID_ARGUMENT;
  251 
  252             max_priority = ((struct policy_param_timeshare *)limit)->priority;
  253             if (max_priority < 0 || max_priority > MAX_PRIORITY)
  254                 return KERN_INVALID_ARGUMENT;
  255         }
  256 
  257         ts_runq(runq)->ts_max_priority = max_priority;
  258         return KERN_SUCCESS;
  259 }
  260 
  261 /*
  262  *      Get scheduling limit value for processor set.
  263  */
  264 kern_return_t
  265 ts_runq_get_limit(
  266         run_queue_t     runq,
  267         policy_param_t  limit,
  268         natural_t       *count)
  269 {
  270         if (*count < POLICY_PARAM_TIMESHARE_COUNT)
  271             return KERN_INVALID_ARGUMENT;
  272 
  273         ((struct policy_param_timeshare *)limit)->priority
  274                 = ts_runq(runq)->ts_max_priority;
  275         *count = POLICY_PARAM_TIMESHARE_COUNT;
  276         return KERN_SUCCESS;
  277 }
  278 
  279 kern_return_t
  280 ts_thread_set_limit(
  281         thread_t        thread,
  282         policy_param_t  limit,
  283         natural_t       count)
  284 {
  285         int     max_priority;
  286 
  287         if (count < POLICY_PARAM_TIMESHARE_COUNT)
  288             return KERN_INVALID_ARGUMENT;
  289 
  290         max_priority = ((struct policy_param_timeshare *)limit)->priority;
  291         if (max_priority < 0 || max_priority > MAX_PRIORITY)
  292             return KERN_INVALID_ARGUMENT;
  293 
  294         ts_sched(thread)->max_priority = max_priority;
  295         return KERN_SUCCESS;
  296 }
  297 
  298 /*
  299  *      Set the scheduling parameters for a thread.  If they
  300  *      are not supplied, the thread`s current parameters
  301  *      are used if 'new_policy' is FALSE; otherwise, per-
  302  *      policy defaults are used.  If 'new_policy' is FALSE,
  303  *      limit values are taken from the thread`s processor
  304  *      set; otherwise, the thread`s current limit values
  305  *      are used.  If 'check_limits' is TRUE, returns an
  306  *      error if parameter values violate the limits; otherwise,
  307  *      the limits are silently enforced.
  308  */
  309 kern_return_t
  310 ts_thread_set_param(
  311         thread_t        thread,
  312         policy_param_t  param,
  313         natural_t       count,
  314         boolean_t       new_policy,
  315         boolean_t       check_limits)
  316 {
  317         int     base_priority;
  318         int     max_priority;
  319         int     pset_max_priority;
  320 
  321         pset_max_priority =
  322             ts_runq(thread->processor_set->runq.runqs[ts_sched_policy.rank])
  323                 ->ts_max_priority;
  324 
  325         if (new_policy) {
  326             /*
  327              *  Thread is not already running this policy.
  328              *  Use default values for parameters and limit.
  329              */
  330             base_priority = TS_BASEPRI_USER;
  331             max_priority  = pset_max_priority;
  332         }
  333         else {
  334             /*
  335              *  Thread is already running policy.  Use
  336              *  thread`s current values.
  337              */
  338             base_priority = ts_sched(thread)->base_priority;
  339             max_priority = ts_sched(thread)->max_priority;
  340         }
  341 
  342         if (count != 0) {
  343             /*
  344              *  Parameters supplied: use it for scheduling parameters.
  345              */
  346             assert(count >= POLICY_PARAM_TIMESHARE_COUNT);
  347             base_priority =
  348                 ((struct policy_param_timeshare *)param)->priority;
  349             if (base_priority < 0 || base_priority > MAX_PRIORITY)
  350                 return KERN_INVALID_ARGUMENT;
  351         }
  352 
  353         if (check_limits) {
  354             /*
  355              *  Error if parameters violate limits.
  356              */
  357             if (base_priority < max_priority)
  358                 return KERN_FAILURE;
  359         }
  360         else {
  361             /*
  362              *  Validate current (or default)
  363              *  parameters against limits.
  364              */
  365             if (max_priority < pset_max_priority)
  366                 max_priority = pset_max_priority;
  367             if (base_priority < max_priority)
  368                 base_priority = max_priority;
  369         }
  370 
  371         ts_sched(thread)->base_priority = base_priority;
  372         ts_sched(thread)->max_priority  = max_priority;
  373 
  374         return KERN_SUCCESS;
  375 }
  376 
  377 kern_return_t
  378 ts_thread_get_param(
  379         thread_t        thread,
  380         policy_param_t  param,  /* data and limits */
  381         natural_t       *count)
  382 {
  383         struct policy_info_timeshare    *pi;
  384 
  385         if (*count < POLICY_INFO_TIMESHARE_COUNT)
  386             return KERN_INVALID_ARGUMENT;
  387 
  388         pi = (struct policy_info_timeshare *)param;
  389 
  390         pi->base_priority = ts_sched(thread)->base_priority;
  391         pi->cur_priority  = ts_sched(thread)->sched_priority;
  392         pi->max_priority  = ts_sched(thread)->max_priority;
  393         *count = POLICY_INFO_TIMESHARE_COUNT;
  394 
  395         return KERN_SUCCESS;
  396 }
  397 
  398 /*
  399  *      Set the default scheduling parameters for a task,
  400  *      to be used for newly created threads.
  401  */
  402 kern_return_t
  403 ts_task_set_param(
  404         task_t          task,
  405         policy_param_t  param,
  406         natural_t       count)
  407 {
  408         int             priority;
  409 
  410         if (count == 0) {
  411             /*
  412              *  Use default value for priority.
  413              */
  414             priority = TS_BASEPRI_USER;
  415         }
  416         else {
  417             if (count < POLICY_PARAM_TIMESHARE_COUNT)
  418                 return KERN_INVALID_ARGUMENT;
  419 
  420             priority = ((struct policy_param_timeshare *)param)->priority;
  421             if (priority < 0 || priority > MAX_PRIORITY)
  422                 return KERN_INVALID_ARGUMENT;
  423         }
  424 
  425         ((struct policy_param_timeshare *)&task->sched_data)
  426                 ->priority = priority;
  427         task->sched_data_count = POLICY_PARAM_TIMESHARE_COUNT;
  428 
  429         return KERN_SUCCESS;
  430 }
  431 
  432 /*
  433  *      Get the default scheduling parameters for a task.
  434  */
  435 kern_return_t
  436 ts_task_get_param(
  437         task_t          task,
  438         policy_param_t  param,
  439         natural_t       *count)
  440 {
  441         if (*count < POLICY_PARAM_TIMESHARE_COUNT)
  442             return KERN_INVALID_ARGUMENT;
  443 
  444         ((struct policy_param_timeshare *)param)->priority =
  445                 ((struct policy_param_timeshare *)&task->sched_data)
  446                         ->priority;
  447         *count = POLICY_PARAM_TIMESHARE_COUNT;
  448 
  449         return KERN_SUCCESS;
  450 }
  451 
  452 run_queue_t
  453 ts_run_queue_alloc(void)
  454 {
  455         ts_run_queue_t  rq;
  456         int             i;
  457 
  458         rq = (ts_run_queue_t) kalloc(sizeof(struct ts_run_queue));
  459 
  460         run_queue_init(&rq->rq, &ts_sched_policy);
  461 
  462         for (i = 0; i < NRQS; i++)
  463             queue_init(&rq->ts_queue[i]);
  464         rq->ts_low = NRQS;
  465 
  466         return &rq->rq;
  467 }
  468 
  469 void
  470 ts_run_queue_free(
  471         run_queue_t     runq)
  472 {
  473         kfree((vm_offset_t) runq, sizeof(struct ts_run_queue));
  474 }
  475 
  476 #if     MACH_KDB
  477 #include <ddb/db_output.h>
  478 void
  479 ts_thread_db_print(
  480         thread_t        thread)
  481 {
  482         db_printf("TS  %3d", ts_sched(thread)->sched_priority);
  483 }
  484 #endif
  485 
  486 /*
  487  *      Statically allocated policy structure.
  488  */
  489 void ts_clock_sched(
  490         thread_t        thread,
  491         boolean_t       end_quantum);   /* forward */
  492 void ts_update_priority(
  493         thread_t        thread);        /* forward */
  494 
  495 struct sched_policy     ts_sched_policy = {
  496     {
  497         /* sched_ops */
  498         ts_thread_dequeue,
  499         ts_thread_enqueue,
  500         ts_thread_remqueue,
  501 
  502         ts_csw_needed,
  503         ts_clock_sched,
  504         ts_update_priority,
  505 
  506         ts_run_queue_alloc,
  507         ts_run_queue_free,
  508 
  509         ts_runq_set_limit,
  510         ts_runq_get_limit,
  511         ts_thread_set_limit,
  512         ts_thread_set_param,
  513         ts_thread_get_param,
  514         ts_task_set_param,
  515         ts_task_get_param,
  516 
  517 #if     MACH_KDB
  518         ts_thread_db_print
  519 #endif
  520     },
  521         POLICY_TIMESHARE,
  522         "ts_timeshare"
  523 };
  524 
  525 /*
  526  ****************************************************************
  527  *
  528  *      Priority aging routines for Mach timesharing priority.
  529  *
  530  ****************************************************************
  531  */
  532 
  533 #if     STAT_TIME
  534 
  535 /*
  536  * Statistical timing uses microseconds as timer units.
  537  * 18 bit shift yields priorities.  PRI_SHIFT_2 isn`t needed.
  538  *
  539  * pri = usecs >> 18 == usecs / 2**18 ~= usecs / 250000;
  540  * i.e. 1 priority level = 1/4 second
  541  */
  542 #define PRI_SHIFT       18
  543 
  544 #else   /* STAT_TIME */
  545 
  546 /*
  547  * Otherwise machine provides shifts based on time units it uses.
  548  */
  549 #include <machine/sched_param.h>
  550 
  551 #endif  /* STAT_TIME */
  552 
  553 /*
  554  *      thread_timer_delta macro takes care of both thread timers.
  555  */
  556 
  557 #define thread_timer_delta(thread)                              \
  558 MACRO_BEGIN                                                     \
  559         register unsigned       delta;                          \
  560                                                                 \
  561         delta = 0;                                              \
  562         TIMER_DELTA((thread)->system_timer,                     \
  563                 (thread)->system_timer_save, delta);            \
  564         TIMER_DELTA((thread)->user_timer,                       \
  565                 (thread)->user_timer_save, delta);              \
  566         (thread)->cpu_delta += delta;                           \
  567         (thread)->sched_delta += delta *                        \
  568                         (thread)->processor_set->sched_load;    \
  569 MACRO_END
  570 
  571 /*
  572  *      Shift structures for holding update shifts.  Actual computation
  573  *      is  usage = (usage >> shift1) +/- (usage >> abs(shift2))  where the
  574  *      +/- is determined by the sign of shift 2.
  575  */
  576 struct shift {
  577         int     shift1;
  578         int     shift2;
  579 };
  580 
  581 typedef struct shift    *shift_t, shift_data_t;
  582 
  583 /*
  584  *      USAGE_THRESHOLD is the amount by which usage must change to
  585  *      cause a priority shift that moves a thread between run queues.
  586  *      It has an additional fudge factor (* 4) to slow down context
  587  *      switches.
  588  *
  589  *      PRI_USAGE is the priority shift resulting from an amount
  590  *      of CPU usage.
  591  *
  592  *      PRI_SHIFT and PRI_SHIFT_2 convert from usage to priorities.
  593  *      SCHED_SHIFT converts for the scaling of the sched_usage field
  594  *      by SCHED_SCALE.  This scaling comes from the multiplication
  595  *      by sched_load in thread_timer_delta.  sched_load is calculated
  596  *      as a scaled overload factor in compute_mach_factor.
  597  */
  598 
  599 #define USAGE_THRESHOLD_FACTOR  4
  600 
  601 #ifdef  PRI_SHIFT_2
  602 #if     PRI_SHIFT_2 > 0
  603 #define PRI_USAGE(usage) \
  604         (   (usage) >> (SCHED_SHIFT + PRI_SHIFT) \
  605           + (usage) >> (SCHED_SHIFT + PRI_SHIFT_2) \
  606         )
  607 #define USAGE_THRESHOLD \
  608         ( (((1 << PRI_SHIFT) + (1 << PRI_SHIFT_2)) << SCHED_SHIFT) \
  609           * USAGE_THRESHOLD_FACTOR)
  610 
  611 #else   /* PRI_SHIFT_2 > 0 */
  612 #define PRI_USAGE(usage) \
  613         (   (usage) >> (SCHED_SHIFT + PRI_SHIFT) \
  614           - (usage) >> (SCHED_SHIFT - PRI_SHIFT_2) \
  615         )
  616 #define USAGE_THRESHOLD \
  617         ( (((1 << PRI_SHIFT) - (1 << -(PRI_SHIFT_2))) << SCHED_SHIFT) \
  618           * USAGE_THRESHOLD_FACTOR)
  619 
  620 #endif  /* PRI_SHIFT_2 > 0 */
  621 
  622 #else   /* PRI_SHIFT_2 */
  623 #define PRI_USAGE(usage) \
  624         (   (usage) >> (SCHED_SHIFT + PRI_SHIFT) \
  625         )
  626 #define USAGE_THRESHOLD \
  627         ( (1 << (PRI_SHIFT + SCHED_SHIFT)) \
  628           * USAGE_THRESHOLD_FACTOR)
  629 
  630 #endif  /* PRI_SHIFT_2 */
  631 
  632 /*
  633  *      compute_sched_priority:
  634  *
  635  *      Calculate new priority for thread based on its base priority
  636  *      plus accumulated usage.  PRI_SHIFT and PRI_SHIFT_2 convert
  637  *      from usage to priorities.  SCHED_SHIFT converts for the scaling
  638  *      of the sched_usage field by SCHED_SCALE.  This scaling comes
  639  *      from the multiplication by sched_load (thread_timer_delta)
  640  *      in sched.h.  sched_load is calculated as a scaled overload
  641  *      factor in compute_mach_factor (mach_factor.c).
  642  */
  643 
  644 #define compute_sched_priority(th)                                      \
  645     MACRO_BEGIN                                                         \
  646         int     pri;                                                    \
  647         pri = ts_sched(th)->base_priority                               \
  648             + PRI_USAGE((th)->sched_usage);                             \
  649         if (pri > MAX_PRIORITY)                                         \
  650             pri = MAX_PRIORITY;                                         \
  651         ts_sched(th)->sched_priority = pri;                             \
  652     MACRO_END
  653 
  654 /*
  655  *      ts_clock_sched:
  656  *
  657  *      Recalculate the quantum and priority for a timesharing
  658  *      thread.  The priority is recalculated:
  659  *
  660  *      1.  Once per second
  661  *      2.  At the end of the current quantum
  662  *      3.  When the current usage is above USAGE_THRESHOLD
  663  */
  664 
  665 void ts_clock_sched(
  666         thread_t        thread,
  667         boolean_t       end_quantum)
  668 {
  669         spl_t   s;
  670 
  671         /*
  672          * Check for thread priority update.
  673          */
  674         s = splsched();
  675         thread_sched_lock(thread);
  676 
  677         if (thread->sched_stamp != sched_tick)          /* 1/second */
  678             ts_update_priority(thread);
  679         else {
  680             thread_timer_delta(thread);
  681             if (thread->sched_delta >= USAGE_THRESHOLD ||
  682                 end_quantum)
  683             {
  684                 thread->sched_usage += thread->sched_delta;
  685                 thread->sched_delta = 0;
  686                 compute_sched_priority(thread);
  687             }
  688         }
  689         thread_sched_unlock(thread);
  690         splx(s);
  691 
  692         /*
  693          * Check for context switch.
  694          */
  695         ast_check(thread, end_quantum);
  696 }
  697 
  698 /*
  699  *      Define shifts for simulating (5/8)**n
  700  */
  701 
  702 shift_data_t    wait_shift[32] = {
  703         {1,1},{1,3},{1,-3},{2,-7},{3,5},{3,-5},{4,-8},{5,7},
  704         {5,-7},{6,-10},{7,10},{7,-9},{8,-11},{9,12},{9,-11},{10,-13},
  705         {11,14},{11,-13},{12,-15},{13,17},{13,-15},{14,-17},{15,19},{16,18},
  706         {16,-19},{17,22},{18,20},{18,-20},{19,26},{20,22},{20,-22},{21,-27}};
  707 
  708 /*
  709  *      update_priority
  710  *
  711  *      Cause the priority computation of a thread that has been 
  712  *      sleeping or suspended to "catch up" with the system.  Thread
  713  *      *MUST* be locked by caller.  If thread is running, then this
  714  *      can only be called by the thread on itself.
  715  */
  716 void ts_update_priority(
  717         register thread_t       thread)
  718 {
  719         register unsigned int   ticks;
  720         register shift_t        shiftp;
  721 
  722         ticks = sched_tick - thread->sched_stamp;
  723 
  724         assert(ticks != 0);
  725 
  726         /*
  727          *      If asleep for more than 30 seconds forget all
  728          *      cpu_usage, else catch up on missed aging.
  729          *      5/8 ** n is approximated by the two shifts
  730          *      in the wait_shift array.
  731          */
  732         thread->sched_stamp += ticks;
  733         thread_timer_delta(thread);
  734         if (ticks >  30) {
  735             thread->cpu_usage = 0;
  736             thread->sched_usage = 0;
  737         }
  738         else {
  739             thread->cpu_usage += thread->cpu_delta;
  740             thread->sched_usage += thread->sched_delta;
  741             shiftp = &wait_shift[ticks];
  742             if (shiftp->shift2 > 0) {
  743                 thread->cpu_usage =
  744                         (thread->cpu_usage >> shiftp->shift1) +
  745                         (thread->cpu_usage >> shiftp->shift2);
  746                 thread->sched_usage =
  747                         (thread->sched_usage >> shiftp->shift1) +
  748                         (thread->sched_usage >> shiftp->shift2);
  749             }
  750             else {
  751                 thread->cpu_usage =
  752                         (thread->cpu_usage >> shiftp->shift1) -
  753                         (thread->cpu_usage >> -(shiftp->shift2));
  754                 thread->sched_usage =
  755                         (thread->sched_usage >> shiftp->shift1) -
  756                         (thread->sched_usage >> -(shiftp->shift2));
  757             }
  758         }
  759         thread->cpu_delta = 0;
  760         thread->sched_delta = 0;
  761 
  762         /*
  763          *      Recompute priority.
  764          */
  765         compute_sched_priority(thread);
  766 }
  767 
  768 /*
  769  *      do_thread_scan: scan for stuck threads.  A thread is stuck if
  770  *      it is runnable but its priority is so low that it has not
  771  *      run for several seconds.  Its priority should be higher, but
  772  *      won't be until it runs and calls update_priority.  The scanner
  773  *      finds these threads and does the updates.
  774  *
  775  *      Scanner runs in two passes.  Pass one squirrels likely
  776  *      thread ids away in an array, and removes them from the run queue.
  777  *      Pass two does the priority updates.  This is necessary because
  778  *      the run queue lock is required for the candidate scan, but
  779  *      cannot be held during updates [set_pri will deadlock].
  780  *
  781  *      Array length should be enough so that restart isn't necessary,
  782  *      but restart logic is included.  Does not scan processor runqs.
  783  *
  784  *      Only scans timesharing run queues.
  785  *
  786  */
  787 
  788 boolean_t       do_thread_scan_debug = FALSE;
  789 
  790 #define MAX_STUCK_THREADS       16
  791 
  792 thread_t        stuck_threads[MAX_STUCK_THREADS];
  793 int             stuck_count = 0;
  794 
  795 /*
  796  *      do_runq_scan is the guts of pass 1.  It scans a runq for
  797  *      stuck threads.  A boolean is returned indicating whether
  798  *      it ran out of space.
  799  */
  800 
  801 boolean_t
  802 do_runq_scan(
  803         run_queue_head_t        runq)
  804 {
  805         ts_run_queue_t          rq;
  806         spl_t                   s;
  807         register queue_t        q;
  808         register thread_t       thread;
  809         register int            count;
  810 
  811         /*
  812          *      Find the timeshare run queue.
  813          */
  814         rq = ts_runq(runq->runqs[ts_sched_policy.rank]);
  815         if (rq == 0)
  816             return FALSE;
  817 
  818         s = splsched();
  819         simple_lock(&runq->lock);
  820         if ((count = rq->ts_count) > 0) {
  821             q = &rq->ts_queue[rq->ts_low];
  822             while (count > 0) {
  823                 thread = (thread_t) queue_first(q);
  824                 while (!queue_end(q, (queue_entry_t) thread)) {
  825                     /*
  826                      *  Get the next thread now, since we may
  827                      *  remove this thread from the run queue.
  828                      */
  829                     thread_t next = (thread_t) queue_next(&thread->links);
  830 
  831                     if ((thread->state & TH_SCHED_STATE) == TH_RUN &&
  832                         sched_tick - thread->sched_stamp > 1)
  833                     {
  834                         /*
  835                          *      Stuck, save its id for later.
  836                          */
  837                         if (stuck_count == MAX_STUCK_THREADS) {
  838                             /*
  839                              *  !@#$% No more room.
  840                              */
  841                             simple_unlock(&runq->lock);
  842                             splx(s);
  843                             return TRUE;
  844                         }
  845 
  846                         /*
  847                          *      We can`t take the thread_lock here,
  848                          *      since we already have the runq lock.
  849                          *      So we can`t grab a reference to the
  850                          *      thread.  However, a thread that is
  851                          *      in RUN state cannot be deallocated
  852                          *      until it stops running.  If it isn`t
  853                          *      on the runq, then thread_halt cannot
  854                          *      see it.  So we remove the thread
  855                          *      from the runq to make it safe.
  856                          */
  857                         remqueue(q, (queue_entry_t) thread);
  858                         rq->ts_count--;
  859                         runq->count--;
  860                         thread->runq = RUN_QUEUE_HEAD_NULL;
  861 
  862                         stuck_threads[stuck_count++] = thread;
  863 
  864                         if (do_thread_scan_debug)
  865                             printf("do_runq_scan: adding thread %#x\n",
  866                                    (vm_offset_t) thread);
  867 
  868                     }
  869                     count--;
  870                     thread = next;
  871                 }
  872                 q++;
  873             }
  874         }
  875         simple_unlock(&runq->lock);
  876         splx(s);
  877 
  878         return FALSE;
  879 }
  880 
  881 void ts_thread_scan(void)
  882 {
  883         spl_t                   s;
  884         register boolean_t      restart_needed = FALSE;
  885         register thread_t       thread;
  886 #if     MACH_HOST
  887         register processor_set_t        pset;
  888 #endif  /* MACH_HOST */
  889 
  890         do {
  891 #if     MACH_HOST
  892             simple_lock(&all_psets_lock);
  893             queue_iterate(&all_psets, pset, processor_set_t, all_psets) {
  894                 restart_needed = do_runq_scan(&pset->runq);
  895                 if (restart_needed)
  896                         break;
  897             }
  898             simple_unlock(&all_psets_lock);
  899 #else   /* MACH_HOST */
  900             restart_needed = do_runq_scan(&default_pset.runq);
  901 #endif  /* MACH_HOST */
  902 
  903 #if     MACH_IO_BINDING
  904             if (!restart_needed)
  905                 restart_needed = do_runq_scan(&master_processor->runq);
  906 #endif
  907 
  908             /*
  909              *  Ok, we now have a collection of candidates -- fix them.
  910              */
  911 
  912             while (stuck_count > 0) {
  913                 thread = stuck_threads[--stuck_count];
  914                 stuck_threads[stuck_count] = THREAD_NULL;
  915                 s = splsched();
  916                 thread_sched_lock(thread);
  917                 if ((thread->state & TH_SCHED_STATE) == TH_RUN) {
  918                     /*
  919                      *  Call thread_setrun to update the thread`s
  920                      *  priority and put it back on the run queue.
  921                      */
  922                     thread_setrun(thread, TRUE);
  923                 }
  924                 thread_sched_unlock(thread);
  925                 splx(s);
  926             }
  927         } while (restart_needed);
  928 }
  929                 

Cache object: 0f5a6d9c348aa1c301196bdd4376a1b0


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.