The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_clocksource.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 2010 Alexander Motin <mav@FreeBSD.org>
    3  * All rights reserved.
    4  *
    5  * Redistribution and use in source and binary forms, with or without
    6  * modification, are permitted provided that the following conditions
    7  * are met:
    8  * 1. Redistributions of source code must retain the above copyright
    9  *    notice, this list of conditions and the following disclaimer,
   10  *    without modification, immediately at the beginning of the file.
   11  * 2. Redistributions in binary form must reproduce the above copyright
   12  *    notice, this list of conditions and the following disclaimer in the
   13  *    documentation and/or other materials provided with the distribution.
   14  *
   15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
   16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
   17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
   18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
   19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
   20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
   21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
   22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
   23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
   24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
   25  */
   26 
   27 #include <sys/cdefs.h>
   28 __FBSDID("$FreeBSD: releng/9.0/sys/kern/kern_clocksource.c 223426 2011-06-22 16:40:45Z jkim $");
   29 
   30 /*
   31  * Common routines to manage event timers hardware.
   32  */
   33 
   34 #include "opt_device_polling.h"
   35 #include "opt_kdtrace.h"
   36 
   37 #include <sys/param.h>
   38 #include <sys/systm.h>
   39 #include <sys/bus.h>
   40 #include <sys/lock.h>
   41 #include <sys/kdb.h>
   42 #include <sys/ktr.h>
   43 #include <sys/mutex.h>
   44 #include <sys/proc.h>
   45 #include <sys/kernel.h>
   46 #include <sys/sched.h>
   47 #include <sys/smp.h>
   48 #include <sys/sysctl.h>
   49 #include <sys/timeet.h>
   50 #include <sys/timetc.h>
   51 
   52 #include <machine/atomic.h>
   53 #include <machine/clock.h>
   54 #include <machine/cpu.h>
   55 #include <machine/smp.h>
   56 
   57 #ifdef KDTRACE_HOOKS
   58 #include <sys/dtrace_bsd.h>
   59 cyclic_clock_func_t     cyclic_clock_func = NULL;
   60 #endif
   61 
   62 int                     cpu_can_deep_sleep = 0; /* C3 state is available. */
   63 int                     cpu_disable_deep_sleep = 0; /* Timer dies in C3. */
   64 
   65 static void             setuptimer(void);
   66 static void             loadtimer(struct bintime *now, int first);
   67 static int              doconfigtimer(void);
   68 static void             configtimer(int start);
   69 static int              round_freq(struct eventtimer *et, int freq);
   70 
   71 static void             getnextcpuevent(struct bintime *event, int idle);
   72 static void             getnextevent(struct bintime *event);
   73 static int              handleevents(struct bintime *now, int fake);
   74 #ifdef SMP
   75 static void             cpu_new_callout(int cpu, int ticks);
   76 #endif
   77 
   78 static struct mtx       et_hw_mtx;
   79 
   80 #define ET_HW_LOCK(state)                                               \
   81         {                                                               \
   82                 if (timer->et_flags & ET_FLAGS_PERCPU)                  \
   83                         mtx_lock_spin(&(state)->et_hw_mtx);             \
   84                 else                                                    \
   85                         mtx_lock_spin(&et_hw_mtx);                      \
   86         }
   87 
   88 #define ET_HW_UNLOCK(state)                                             \
   89         {                                                               \
   90                 if (timer->et_flags & ET_FLAGS_PERCPU)                  \
   91                         mtx_unlock_spin(&(state)->et_hw_mtx);           \
   92                 else                                                    \
   93                         mtx_unlock_spin(&et_hw_mtx);                    \
   94         }
   95 
   96 static struct eventtimer *timer = NULL;
   97 static struct bintime   timerperiod;    /* Timer period for periodic mode. */
   98 static struct bintime   hardperiod;     /* hardclock() events period. */
   99 static struct bintime   statperiod;     /* statclock() events period. */
  100 static struct bintime   profperiod;     /* profclock() events period. */
  101 static struct bintime   nexttick;       /* Next global timer tick time. */
  102 static u_int            busy = 0;       /* Reconfiguration is in progress. */
  103 static int              profiling = 0;  /* Profiling events enabled. */
  104 
  105 static char             timername[32];  /* Wanted timer. */
  106 TUNABLE_STR("kern.eventtimer.timer", timername, sizeof(timername));
  107 
  108 static int              singlemul = 0;  /* Multiplier for periodic mode. */
  109 TUNABLE_INT("kern.eventtimer.singlemul", &singlemul);
  110 SYSCTL_INT(_kern_eventtimer, OID_AUTO, singlemul, CTLFLAG_RW, &singlemul,
  111     0, "Multiplier for periodic mode");
  112 
  113 static u_int            idletick = 0;   /* Idle mode allowed. */
  114 TUNABLE_INT("kern.eventtimer.idletick", &idletick);
  115 SYSCTL_UINT(_kern_eventtimer, OID_AUTO, idletick, CTLFLAG_RW, &idletick,
  116     0, "Run periodic events when idle");
  117 
  118 static int              periodic = 0;   /* Periodic or one-shot mode. */
  119 static int              want_periodic = 0; /* What mode to prefer. */
  120 TUNABLE_INT("kern.eventtimer.periodic", &want_periodic);
  121 
  122 struct pcpu_state {
  123         struct mtx      et_hw_mtx;      /* Per-CPU timer mutex. */
  124         u_int           action;         /* Reconfiguration requests. */
  125         u_int           handle;         /* Immediate handle resuests. */
  126         struct bintime  now;            /* Last tick time. */
  127         struct bintime  nextevent;      /* Next scheduled event on this CPU. */
  128         struct bintime  nexttick;       /* Next timer tick time. */
  129         struct bintime  nexthard;       /* Next hardlock() event. */
  130         struct bintime  nextstat;       /* Next statclock() event. */
  131         struct bintime  nextprof;       /* Next profclock() event. */
  132 #ifdef KDTRACE_HOOKS
  133         struct bintime  nextcyc;        /* Next OpenSolaris cyclics event. */
  134 #endif
  135         int             ipi;            /* This CPU needs IPI. */
  136         int             idle;           /* This CPU is in idle mode. */
  137 };
  138 
  139 static DPCPU_DEFINE(struct pcpu_state, timerstate);
  140 
  141 #define FREQ2BT(freq, bt)                                               \
  142 {                                                                       \
  143         (bt)->sec = 0;                                                  \
  144         (bt)->frac = ((uint64_t)0x8000000000000000  / (freq)) << 1;     \
  145 }
  146 #define BT2FREQ(bt)                                                     \
  147         (((uint64_t)0x8000000000000000 + ((bt)->frac >> 2)) /           \
  148             ((bt)->frac >> 1))
  149 
  150 /*
  151  * Timer broadcast IPI handler.
  152  */
  153 int
  154 hardclockintr(void)
  155 {
  156         struct bintime now;
  157         struct pcpu_state *state;
  158         int done;
  159 
  160         if (doconfigtimer() || busy)
  161                 return (FILTER_HANDLED);
  162         state = DPCPU_PTR(timerstate);
  163         now = state->now;
  164         CTR4(KTR_SPARE2, "ipi  at %d:    now  %d.%08x%08x",
  165             curcpu, now.sec, (unsigned int)(now.frac >> 32),
  166                              (unsigned int)(now.frac & 0xffffffff));
  167         done = handleevents(&now, 0);
  168         return (done ? FILTER_HANDLED : FILTER_STRAY);
  169 }
  170 
  171 /*
  172  * Handle all events for specified time on this CPU
  173  */
  174 static int
  175 handleevents(struct bintime *now, int fake)
  176 {
  177         struct bintime t;
  178         struct trapframe *frame;
  179         struct pcpu_state *state;
  180         uintfptr_t pc;
  181         int usermode;
  182         int done, runs;
  183 
  184         CTR4(KTR_SPARE2, "handle at %d:  now  %d.%08x%08x",
  185             curcpu, now->sec, (unsigned int)(now->frac >> 32),
  186                      (unsigned int)(now->frac & 0xffffffff));
  187         done = 0;
  188         if (fake) {
  189                 frame = NULL;
  190                 usermode = 0;
  191                 pc = 0;
  192         } else {
  193                 frame = curthread->td_intr_frame;
  194                 usermode = TRAPF_USERMODE(frame);
  195                 pc = TRAPF_PC(frame);
  196         }
  197 
  198         runs = 0;
  199         state = DPCPU_PTR(timerstate);
  200 
  201         while (bintime_cmp(now, &state->nexthard, >=)) {
  202                 bintime_add(&state->nexthard, &hardperiod);
  203                 runs++;
  204         }
  205         if (runs && fake < 2) {
  206                 hardclock_anycpu(runs, usermode);
  207                 done = 1;
  208         }
  209         while (bintime_cmp(now, &state->nextstat, >=)) {
  210                 if (fake < 2)
  211                         statclock(usermode);
  212                 bintime_add(&state->nextstat, &statperiod);
  213                 done = 1;
  214         }
  215         if (profiling) {
  216                 while (bintime_cmp(now, &state->nextprof, >=)) {
  217                         if (!fake)
  218                                 profclock(usermode, pc);
  219                         bintime_add(&state->nextprof, &profperiod);
  220                         done = 1;
  221                 }
  222         } else
  223                 state->nextprof = state->nextstat;
  224 
  225 #ifdef KDTRACE_HOOKS
  226         if (fake == 0 && cyclic_clock_func != NULL &&
  227             state->nextcyc.sec != -1 &&
  228             bintime_cmp(now, &state->nextcyc, >=)) {
  229                 state->nextcyc.sec = -1;
  230                 (*cyclic_clock_func)(frame);
  231         }
  232 #endif
  233 
  234         getnextcpuevent(&t, 0);
  235         if (fake == 2) {
  236                 state->nextevent = t;
  237                 return (done);
  238         }
  239         ET_HW_LOCK(state);
  240         if (!busy) {
  241                 state->idle = 0;
  242                 state->nextevent = t;
  243                 loadtimer(now, 0);
  244         }
  245         ET_HW_UNLOCK(state);
  246         return (done);
  247 }
  248 
  249 /*
  250  * Schedule binuptime of the next event on current CPU.
  251  */
  252 static void
  253 getnextcpuevent(struct bintime *event, int idle)
  254 {
  255         struct bintime tmp;
  256         struct pcpu_state *state;
  257         int skip;
  258 
  259         state = DPCPU_PTR(timerstate);
  260         *event = state->nexthard;
  261         if (idle) { /* If CPU is idle - ask callouts for how long. */
  262                 skip = 4;
  263                 if (curcpu == CPU_FIRST() && tc_min_ticktock_freq > skip)
  264                         skip = tc_min_ticktock_freq;
  265                 skip = callout_tickstofirst(hz / skip) - 1;
  266                 CTR2(KTR_SPARE2, "skip   at %d: %d", curcpu, skip);
  267                 tmp = hardperiod;
  268                 bintime_mul(&tmp, skip);
  269                 bintime_add(event, &tmp);
  270         } else { /* If CPU is active - handle all types of events. */
  271                 if (bintime_cmp(event, &state->nextstat, >))
  272                         *event = state->nextstat;
  273                 if (profiling && bintime_cmp(event, &state->nextprof, >))
  274                         *event = state->nextprof;
  275         }
  276 #ifdef KDTRACE_HOOKS
  277         if (state->nextcyc.sec != -1 && bintime_cmp(event, &state->nextcyc, >))
  278                 *event = state->nextcyc;
  279 #endif
  280 }
  281 
  282 /*
  283  * Schedule binuptime of the next event on all CPUs.
  284  */
  285 static void
  286 getnextevent(struct bintime *event)
  287 {
  288         struct pcpu_state *state;
  289 #ifdef SMP
  290         int     cpu;
  291 #endif
  292         int     c;
  293 
  294         state = DPCPU_PTR(timerstate);
  295         *event = state->nextevent;
  296         c = curcpu;
  297 #ifdef SMP
  298         if ((timer->et_flags & ET_FLAGS_PERCPU) == 0) {
  299                 CPU_FOREACH(cpu) {
  300                         if (curcpu == cpu)
  301                                 continue;
  302                         state = DPCPU_ID_PTR(cpu, timerstate);
  303                         if (bintime_cmp(event, &state->nextevent, >)) {
  304                                 *event = state->nextevent;
  305                                 c = cpu;
  306                         }
  307                 }
  308         }
  309 #endif
  310         CTR5(KTR_SPARE2, "next at %d:    next %d.%08x%08x by %d",
  311             curcpu, event->sec, (unsigned int)(event->frac >> 32),
  312                              (unsigned int)(event->frac & 0xffffffff), c);
  313 }
  314 
  315 /* Hardware timer callback function. */
  316 static void
  317 timercb(struct eventtimer *et, void *arg)
  318 {
  319         struct bintime now;
  320         struct bintime *next;
  321         struct pcpu_state *state;
  322 #ifdef SMP
  323         int cpu, bcast;
  324 #endif
  325 
  326         /* Do not touch anything if somebody reconfiguring timers. */
  327         if (busy)
  328                 return;
  329         /* Update present and next tick times. */
  330         state = DPCPU_PTR(timerstate);
  331         if (et->et_flags & ET_FLAGS_PERCPU) {
  332                 next = &state->nexttick;
  333         } else
  334                 next = &nexttick;
  335         if (periodic) {
  336                 now = *next;    /* Ex-next tick time becomes present time. */
  337                 bintime_add(next, &timerperiod); /* Next tick in 1 period. */
  338         } else {
  339                 binuptime(&now);        /* Get present time from hardware. */
  340                 next->sec = -1;         /* Next tick is not scheduled yet. */
  341         }
  342         state->now = now;
  343         CTR4(KTR_SPARE2, "intr at %d:    now  %d.%08x%08x",
  344             curcpu, now.sec, (unsigned int)(now.frac >> 32),
  345                              (unsigned int)(now.frac & 0xffffffff));
  346 
  347 #ifdef SMP
  348         /* Prepare broadcasting to other CPUs for non-per-CPU timers. */
  349         bcast = 0;
  350         if ((et->et_flags & ET_FLAGS_PERCPU) == 0 && smp_started) {
  351                 CPU_FOREACH(cpu) {
  352                         state = DPCPU_ID_PTR(cpu, timerstate);
  353                         ET_HW_LOCK(state);
  354                         state->now = now;
  355                         if (bintime_cmp(&now, &state->nextevent, >=)) {
  356                                 state->nextevent.sec++;
  357                                 if (curcpu != cpu) {
  358                                         state->ipi = 1;
  359                                         bcast = 1;
  360                                 }
  361                         }
  362                         ET_HW_UNLOCK(state);
  363                 }
  364         }
  365 #endif
  366 
  367         /* Handle events for this time on this CPU. */
  368         handleevents(&now, 0);
  369 
  370 #ifdef SMP
  371         /* Broadcast interrupt to other CPUs for non-per-CPU timers. */
  372         if (bcast) {
  373                 CPU_FOREACH(cpu) {
  374                         if (curcpu == cpu)
  375                                 continue;
  376                         state = DPCPU_ID_PTR(cpu, timerstate);
  377                         if (state->ipi) {
  378                                 state->ipi = 0;
  379                                 ipi_cpu(cpu, IPI_HARDCLOCK);
  380                         }
  381                 }
  382         }
  383 #endif
  384 }
  385 
  386 /*
  387  * Load new value into hardware timer.
  388  */
  389 static void
  390 loadtimer(struct bintime *now, int start)
  391 {
  392         struct pcpu_state *state;
  393         struct bintime new;
  394         struct bintime *next;
  395         uint64_t tmp;
  396         int eq;
  397 
  398         if (timer->et_flags & ET_FLAGS_PERCPU) {
  399                 state = DPCPU_PTR(timerstate);
  400                 next = &state->nexttick;
  401         } else
  402                 next = &nexttick;
  403         if (periodic) {
  404                 if (start) {
  405                         /*
  406                          * Try to start all periodic timers aligned
  407                          * to period to make events synchronous.
  408                          */
  409                         tmp = ((uint64_t)now->sec << 36) + (now->frac >> 28);
  410                         tmp = (tmp % (timerperiod.frac >> 28)) << 28;
  411                         new.sec = 0;
  412                         new.frac = timerperiod.frac - tmp;
  413                         if (new.frac < tmp)     /* Left less then passed. */
  414                                 bintime_add(&new, &timerperiod);
  415                         CTR5(KTR_SPARE2, "load p at %d:   now %d.%08x first in %d.%08x",
  416                             curcpu, now->sec, (unsigned int)(now->frac >> 32),
  417                             new.sec, (unsigned int)(new.frac >> 32));
  418                         *next = new;
  419                         bintime_add(next, now);
  420                         et_start(timer, &new, &timerperiod);
  421                 }
  422         } else {
  423                 getnextevent(&new);
  424                 eq = bintime_cmp(&new, next, ==);
  425                 CTR5(KTR_SPARE2, "load at %d:    next %d.%08x%08x eq %d",
  426                     curcpu, new.sec, (unsigned int)(new.frac >> 32),
  427                              (unsigned int)(new.frac & 0xffffffff),
  428                              eq);
  429                 if (!eq) {
  430                         *next = new;
  431                         bintime_sub(&new, now);
  432                         et_start(timer, &new, NULL);
  433                 }
  434         }
  435 }
  436 
  437 /*
  438  * Prepare event timer parameters after configuration changes.
  439  */
  440 static void
  441 setuptimer(void)
  442 {
  443         int freq;
  444 
  445         if (periodic && (timer->et_flags & ET_FLAGS_PERIODIC) == 0)
  446                 periodic = 0;
  447         else if (!periodic && (timer->et_flags & ET_FLAGS_ONESHOT) == 0)
  448                 periodic = 1;
  449         singlemul = MIN(MAX(singlemul, 1), 20);
  450         freq = hz * singlemul;
  451         while (freq < (profiling ? profhz : stathz))
  452                 freq += hz;
  453         freq = round_freq(timer, freq);
  454         FREQ2BT(freq, &timerperiod);
  455 }
  456 
  457 /*
  458  * Reconfigure specified per-CPU timer on other CPU. Called from IPI handler.
  459  */
  460 static int
  461 doconfigtimer(void)
  462 {
  463         struct bintime now;
  464         struct pcpu_state *state;
  465 
  466         state = DPCPU_PTR(timerstate);
  467         switch (atomic_load_acq_int(&state->action)) {
  468         case 1:
  469                 binuptime(&now);
  470                 ET_HW_LOCK(state);
  471                 loadtimer(&now, 1);
  472                 ET_HW_UNLOCK(state);
  473                 state->handle = 0;
  474                 atomic_store_rel_int(&state->action, 0);
  475                 return (1);
  476         case 2:
  477                 ET_HW_LOCK(state);
  478                 et_stop(timer);
  479                 ET_HW_UNLOCK(state);
  480                 state->handle = 0;
  481                 atomic_store_rel_int(&state->action, 0);
  482                 return (1);
  483         }
  484         if (atomic_readandclear_int(&state->handle) && !busy) {
  485                 binuptime(&now);
  486                 handleevents(&now, 0);
  487                 return (1);
  488         }
  489         return (0);
  490 }
  491 
  492 /*
  493  * Reconfigure specified timer.
  494  * For per-CPU timers use IPI to make other CPUs to reconfigure.
  495  */
  496 static void
  497 configtimer(int start)
  498 {
  499         struct bintime now, next;
  500         struct pcpu_state *state;
  501         int cpu;
  502 
  503         if (start) {
  504                 setuptimer();
  505                 binuptime(&now);
  506         }
  507         critical_enter();
  508         ET_HW_LOCK(DPCPU_PTR(timerstate));
  509         if (start) {
  510                 /* Initialize time machine parameters. */
  511                 next = now;
  512                 bintime_add(&next, &timerperiod);
  513                 if (periodic)
  514                         nexttick = next;
  515                 else
  516                         nexttick.sec = -1;
  517                 CPU_FOREACH(cpu) {
  518                         state = DPCPU_ID_PTR(cpu, timerstate);
  519                         state->now = now;
  520                         state->nextevent = next;
  521                         if (periodic)
  522                                 state->nexttick = next;
  523                         else
  524                                 state->nexttick.sec = -1;
  525                         state->nexthard = next;
  526                         state->nextstat = next;
  527                         state->nextprof = next;
  528                         hardclock_sync(cpu);
  529                 }
  530                 busy = 0;
  531                 /* Start global timer or per-CPU timer of this CPU. */
  532                 loadtimer(&now, 1);
  533         } else {
  534                 busy = 1;
  535                 /* Stop global timer or per-CPU timer of this CPU. */
  536                 et_stop(timer);
  537         }
  538         ET_HW_UNLOCK(DPCPU_PTR(timerstate));
  539 #ifdef SMP
  540         /* If timer is global or there is no other CPUs yet - we are done. */
  541         if ((timer->et_flags & ET_FLAGS_PERCPU) == 0 || !smp_started) {
  542                 critical_exit();
  543                 return;
  544         }
  545         /* Set reconfigure flags for other CPUs. */
  546         CPU_FOREACH(cpu) {
  547                 state = DPCPU_ID_PTR(cpu, timerstate);
  548                 atomic_store_rel_int(&state->action,
  549                     (cpu == curcpu) ? 0 : ( start ? 1 : 2));
  550         }
  551         /* Broadcast reconfigure IPI. */
  552         ipi_all_but_self(IPI_HARDCLOCK);
  553         /* Wait for reconfiguration completed. */
  554 restart:
  555         cpu_spinwait();
  556         CPU_FOREACH(cpu) {
  557                 if (cpu == curcpu)
  558                         continue;
  559                 state = DPCPU_ID_PTR(cpu, timerstate);
  560                 if (atomic_load_acq_int(&state->action))
  561                         goto restart;
  562         }
  563 #endif
  564         critical_exit();
  565 }
  566 
  567 /*
  568  * Calculate nearest frequency supported by hardware timer.
  569  */
  570 static int
  571 round_freq(struct eventtimer *et, int freq)
  572 {
  573         uint64_t div;
  574 
  575         if (et->et_frequency != 0) {
  576                 div = lmax((et->et_frequency + freq / 2) / freq, 1);
  577                 if (et->et_flags & ET_FLAGS_POW2DIV)
  578                         div = 1 << (flsl(div + div / 2) - 1);
  579                 freq = (et->et_frequency + div / 2) / div;
  580         }
  581         if (et->et_min_period.sec > 0)
  582                 freq = 0;
  583         else if (et->et_min_period.frac != 0)
  584                 freq = min(freq, BT2FREQ(&et->et_min_period));
  585         if (et->et_max_period.sec == 0 && et->et_max_period.frac != 0)
  586                 freq = max(freq, BT2FREQ(&et->et_max_period));
  587         return (freq);
  588 }
  589 
  590 /*
  591  * Configure and start event timers (BSP part).
  592  */
  593 void
  594 cpu_initclocks_bsp(void)
  595 {
  596         struct pcpu_state *state;
  597         int base, div, cpu;
  598 
  599         mtx_init(&et_hw_mtx, "et_hw_mtx", NULL, MTX_SPIN);
  600         CPU_FOREACH(cpu) {
  601                 state = DPCPU_ID_PTR(cpu, timerstate);
  602                 mtx_init(&state->et_hw_mtx, "et_hw_mtx", NULL, MTX_SPIN);
  603 #ifdef KDTRACE_HOOKS
  604                 state->nextcyc.sec = -1;
  605 #endif
  606         }
  607 #ifdef SMP
  608         callout_new_inserted = cpu_new_callout;
  609 #endif
  610         periodic = want_periodic;
  611         /* Grab requested timer or the best of present. */
  612         if (timername[0])
  613                 timer = et_find(timername, 0, 0);
  614         if (timer == NULL && periodic) {
  615                 timer = et_find(NULL,
  616                     ET_FLAGS_PERIODIC, ET_FLAGS_PERIODIC);
  617         }
  618         if (timer == NULL) {
  619                 timer = et_find(NULL,
  620                     ET_FLAGS_ONESHOT, ET_FLAGS_ONESHOT);
  621         }
  622         if (timer == NULL && !periodic) {
  623                 timer = et_find(NULL,
  624                     ET_FLAGS_PERIODIC, ET_FLAGS_PERIODIC);
  625         }
  626         if (timer == NULL)
  627                 panic("No usable event timer found!");
  628         et_init(timer, timercb, NULL, NULL);
  629 
  630         /* Adapt to timer capabilities. */
  631         if (periodic && (timer->et_flags & ET_FLAGS_PERIODIC) == 0)
  632                 periodic = 0;
  633         else if (!periodic && (timer->et_flags & ET_FLAGS_ONESHOT) == 0)
  634                 periodic = 1;
  635         if (timer->et_flags & ET_FLAGS_C3STOP)
  636                 cpu_disable_deep_sleep++;
  637 
  638         /*
  639          * We honor the requested 'hz' value.
  640          * We want to run stathz in the neighborhood of 128hz.
  641          * We would like profhz to run as often as possible.
  642          */
  643         if (singlemul <= 0 || singlemul > 20) {
  644                 if (hz >= 1500 || (hz % 128) == 0)
  645                         singlemul = 1;
  646                 else if (hz >= 750)
  647                         singlemul = 2;
  648                 else
  649                         singlemul = 4;
  650         }
  651         if (periodic) {
  652                 base = round_freq(timer, hz * singlemul);
  653                 singlemul = max((base + hz / 2) / hz, 1);
  654                 hz = (base + singlemul / 2) / singlemul;
  655                 if (base <= 128)
  656                         stathz = base;
  657                 else {
  658                         div = base / 128;
  659                         if (div >= singlemul && (div % singlemul) == 0)
  660                                 div++;
  661                         stathz = base / div;
  662                 }
  663                 profhz = stathz;
  664                 while ((profhz + stathz) <= 128 * 64)
  665                         profhz += stathz;
  666                 profhz = round_freq(timer, profhz);
  667         } else {
  668                 hz = round_freq(timer, hz);
  669                 stathz = round_freq(timer, 127);
  670                 profhz = round_freq(timer, stathz * 64);
  671         }
  672         tick = 1000000 / hz;
  673         FREQ2BT(hz, &hardperiod);
  674         FREQ2BT(stathz, &statperiod);
  675         FREQ2BT(profhz, &profperiod);
  676         ET_LOCK();
  677         configtimer(1);
  678         ET_UNLOCK();
  679 }
  680 
  681 /*
  682  * Start per-CPU event timers on APs.
  683  */
  684 void
  685 cpu_initclocks_ap(void)
  686 {
  687         struct bintime now;
  688         struct pcpu_state *state;
  689 
  690         state = DPCPU_PTR(timerstate);
  691         binuptime(&now);
  692         ET_HW_LOCK(state);
  693         if ((timer->et_flags & ET_FLAGS_PERCPU) == 0 && periodic) {
  694                 state->now = nexttick;
  695                 bintime_sub(&state->now, &timerperiod);
  696         } else
  697                 state->now = now;
  698         hardclock_sync(curcpu);
  699         handleevents(&state->now, 2);
  700         if (timer->et_flags & ET_FLAGS_PERCPU)
  701                 loadtimer(&now, 1);
  702         ET_HW_UNLOCK(state);
  703 }
  704 
  705 /*
  706  * Switch to profiling clock rates.
  707  */
  708 void
  709 cpu_startprofclock(void)
  710 {
  711 
  712         ET_LOCK();
  713         if (periodic) {
  714                 configtimer(0);
  715                 profiling = 1;
  716                 configtimer(1);
  717         } else
  718                 profiling = 1;
  719         ET_UNLOCK();
  720 }
  721 
  722 /*
  723  * Switch to regular clock rates.
  724  */
  725 void
  726 cpu_stopprofclock(void)
  727 {
  728 
  729         ET_LOCK();
  730         if (periodic) {
  731                 configtimer(0);
  732                 profiling = 0;
  733                 configtimer(1);
  734         } else
  735                 profiling = 0;
  736         ET_UNLOCK();
  737 }
  738 
  739 /*
  740  * Switch to idle mode (all ticks handled).
  741  */
  742 void
  743 cpu_idleclock(void)
  744 {
  745         struct bintime now, t;
  746         struct pcpu_state *state;
  747 
  748         if (idletick || busy ||
  749             (periodic && (timer->et_flags & ET_FLAGS_PERCPU))
  750 #ifdef DEVICE_POLLING
  751             || curcpu == CPU_FIRST()
  752 #endif
  753             )
  754                 return;
  755         state = DPCPU_PTR(timerstate);
  756         if (periodic)
  757                 now = state->now;
  758         else
  759                 binuptime(&now);
  760         CTR4(KTR_SPARE2, "idle at %d:    now  %d.%08x%08x",
  761             curcpu, now.sec, (unsigned int)(now.frac >> 32),
  762                              (unsigned int)(now.frac & 0xffffffff));
  763         getnextcpuevent(&t, 1);
  764         ET_HW_LOCK(state);
  765         state->idle = 1;
  766         state->nextevent = t;
  767         if (!periodic)
  768                 loadtimer(&now, 0);
  769         ET_HW_UNLOCK(state);
  770 }
  771 
  772 /*
  773  * Switch to active mode (skip empty ticks).
  774  */
  775 void
  776 cpu_activeclock(void)
  777 {
  778         struct bintime now;
  779         struct pcpu_state *state;
  780         struct thread *td;
  781 
  782         state = DPCPU_PTR(timerstate);
  783         if (state->idle == 0 || busy)
  784                 return;
  785         if (periodic)
  786                 now = state->now;
  787         else
  788                 binuptime(&now);
  789         CTR4(KTR_SPARE2, "active at %d:  now  %d.%08x%08x",
  790             curcpu, now.sec, (unsigned int)(now.frac >> 32),
  791                              (unsigned int)(now.frac & 0xffffffff));
  792         spinlock_enter();
  793         td = curthread;
  794         td->td_intr_nesting_level++;
  795         handleevents(&now, 1);
  796         td->td_intr_nesting_level--;
  797         spinlock_exit();
  798 }
  799 
  800 #ifdef KDTRACE_HOOKS
  801 void
  802 clocksource_cyc_set(const struct bintime *t)
  803 {
  804         struct bintime now;
  805         struct pcpu_state *state;
  806 
  807         state = DPCPU_PTR(timerstate);
  808         if (periodic)
  809                 now = state->now;
  810         else
  811                 binuptime(&now);
  812 
  813         CTR4(KTR_SPARE2, "set_cyc at %d:  now  %d.%08x%08x",
  814             curcpu, now.sec, (unsigned int)(now.frac >> 32),
  815                              (unsigned int)(now.frac & 0xffffffff));
  816         CTR4(KTR_SPARE2, "set_cyc at %d:  t  %d.%08x%08x",
  817             curcpu, t->sec, (unsigned int)(t->frac >> 32),
  818                              (unsigned int)(t->frac & 0xffffffff));
  819 
  820         ET_HW_LOCK(state);
  821         if (bintime_cmp(t, &state->nextcyc, ==)) {
  822                 ET_HW_UNLOCK(state);
  823                 return;
  824         }
  825         state->nextcyc = *t;
  826         if (bintime_cmp(&state->nextcyc, &state->nextevent, >=)) {
  827                 ET_HW_UNLOCK(state);
  828                 return;
  829         }
  830         state->nextevent = state->nextcyc;
  831         if (!periodic)
  832                 loadtimer(&now, 0);
  833         ET_HW_UNLOCK(state);
  834 }
  835 #endif
  836 
  837 #ifdef SMP
  838 static void
  839 cpu_new_callout(int cpu, int ticks)
  840 {
  841         struct bintime tmp;
  842         struct pcpu_state *state;
  843 
  844         CTR3(KTR_SPARE2, "new co at %d:    on %d in %d",
  845             curcpu, cpu, ticks);
  846         state = DPCPU_ID_PTR(cpu, timerstate);
  847         ET_HW_LOCK(state);
  848         if (state->idle == 0 || busy) {
  849                 ET_HW_UNLOCK(state);
  850                 return;
  851         }
  852         /*
  853          * If timer is periodic - just update next event time for target CPU.
  854          * If timer is global - there is chance it is already programmed.
  855          */
  856         if (periodic || (timer->et_flags & ET_FLAGS_PERCPU) == 0) {
  857                 state->nextevent = state->nexthard;
  858                 tmp = hardperiod;
  859                 bintime_mul(&tmp, ticks - 1);
  860                 bintime_add(&state->nextevent, &tmp);
  861                 if (periodic ||
  862                     bintime_cmp(&state->nextevent, &nexttick, >=)) {
  863                         ET_HW_UNLOCK(state);
  864                         return;
  865                 }
  866         }
  867         /*
  868          * Otherwise we have to wake that CPU up, as we can't get present
  869          * bintime to reprogram global timer from here. If timer is per-CPU,
  870          * we by definition can't do it from here.
  871          */
  872         ET_HW_UNLOCK(state);
  873         if (timer->et_flags & ET_FLAGS_PERCPU) {
  874                 state->handle = 1;
  875                 ipi_cpu(cpu, IPI_HARDCLOCK);
  876         } else {
  877                 if (!cpu_idle_wakeup(cpu))
  878                         ipi_cpu(cpu, IPI_AST);
  879         }
  880 }
  881 #endif
  882 
  883 /*
  884  * Report or change the active event timers hardware.
  885  */
  886 static int
  887 sysctl_kern_eventtimer_timer(SYSCTL_HANDLER_ARGS)
  888 {
  889         char buf[32];
  890         struct eventtimer *et;
  891         int error;
  892 
  893         ET_LOCK();
  894         et = timer;
  895         snprintf(buf, sizeof(buf), "%s", et->et_name);
  896         ET_UNLOCK();
  897         error = sysctl_handle_string(oidp, buf, sizeof(buf), req);
  898         ET_LOCK();
  899         et = timer;
  900         if (error != 0 || req->newptr == NULL ||
  901             strcasecmp(buf, et->et_name) == 0) {
  902                 ET_UNLOCK();
  903                 return (error);
  904         }
  905         et = et_find(buf, 0, 0);
  906         if (et == NULL) {
  907                 ET_UNLOCK();
  908                 return (ENOENT);
  909         }
  910         configtimer(0);
  911         et_free(timer);
  912         if (et->et_flags & ET_FLAGS_C3STOP)
  913                 cpu_disable_deep_sleep++;
  914         if (timer->et_flags & ET_FLAGS_C3STOP)
  915                 cpu_disable_deep_sleep--;
  916         periodic = want_periodic;
  917         timer = et;
  918         et_init(timer, timercb, NULL, NULL);
  919         configtimer(1);
  920         ET_UNLOCK();
  921         return (error);
  922 }
  923 SYSCTL_PROC(_kern_eventtimer, OID_AUTO, timer,
  924     CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE,
  925     0, 0, sysctl_kern_eventtimer_timer, "A", "Chosen event timer");
  926 
  927 /*
  928  * Report or change the active event timer periodicity.
  929  */
  930 static int
  931 sysctl_kern_eventtimer_periodic(SYSCTL_HANDLER_ARGS)
  932 {
  933         int error, val;
  934 
  935         val = periodic;
  936         error = sysctl_handle_int(oidp, &val, 0, req);
  937         if (error != 0 || req->newptr == NULL)
  938                 return (error);
  939         ET_LOCK();
  940         configtimer(0);
  941         periodic = want_periodic = val;
  942         configtimer(1);
  943         ET_UNLOCK();
  944         return (error);
  945 }
  946 SYSCTL_PROC(_kern_eventtimer, OID_AUTO, periodic,
  947     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE,
  948     0, 0, sysctl_kern_eventtimer_periodic, "I", "Enable event timer periodic mode");

Cache object: 3d0fc85d8c20288dcb0a7d55719390c6


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.