The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_clocksource.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
    3  *
    4  * Copyright (c) 2010-2013 Alexander Motin <mav@FreeBSD.org>
    5  * All rights reserved.
    6  *
    7  * Redistribution and use in source and binary forms, with or without
    8  * modification, are permitted provided that the following conditions
    9  * are met:
   10  * 1. Redistributions of source code must retain the above copyright
   11  *    notice, this list of conditions and the following disclaimer,
   12  *    without modification, immediately at the beginning of the file.
   13  * 2. Redistributions in binary form must reproduce the above copyright
   14  *    notice, this list of conditions and the following disclaimer in the
   15  *    documentation and/or other materials provided with the distribution.
   16  *
   17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
   18  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
   19  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
   20  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
   21  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
   22  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
   23  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
   24  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
   25  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
   26  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
   27  */
   28 
   29 #include <sys/cdefs.h>
   30 __FBSDID("$FreeBSD$");
   31 
   32 /*
   33  * Common routines to manage event timers hardware.
   34  */
   35 
   36 #include "opt_device_polling.h"
   37 
   38 #include <sys/param.h>
   39 #include <sys/systm.h>
   40 #include <sys/bus.h>
   41 #include <sys/limits.h>
   42 #include <sys/lock.h>
   43 #include <sys/kdb.h>
   44 #include <sys/ktr.h>
   45 #include <sys/mutex.h>
   46 #include <sys/proc.h>
   47 #include <sys/kernel.h>
   48 #include <sys/sched.h>
   49 #include <sys/smp.h>
   50 #include <sys/sysctl.h>
   51 #include <sys/timeet.h>
   52 #include <sys/timetc.h>
   53 
   54 #include <machine/atomic.h>
   55 #include <machine/clock.h>
   56 #include <machine/cpu.h>
   57 #include <machine/smp.h>
   58 
   59 int                     cpu_disable_c2_sleep = 0; /* Timer dies in C2. */
   60 int                     cpu_disable_c3_sleep = 0; /* Timer dies in C3. */
   61 
   62 static void             setuptimer(void);
   63 static void             loadtimer(sbintime_t now, int first);
   64 static int              doconfigtimer(void);
   65 static void             configtimer(int start);
   66 static int              round_freq(struct eventtimer *et, int freq);
   67 
   68 static sbintime_t       getnextcpuevent(int idle);
   69 static sbintime_t       getnextevent(void);
   70 static int              handleevents(sbintime_t now, int fake);
   71 
   72 static struct mtx       et_hw_mtx;
   73 
   74 #define ET_HW_LOCK(state)                                               \
   75         {                                                               \
   76                 if (timer->et_flags & ET_FLAGS_PERCPU)                  \
   77                         mtx_lock_spin(&(state)->et_hw_mtx);             \
   78                 else                                                    \
   79                         mtx_lock_spin(&et_hw_mtx);                      \
   80         }
   81 
   82 #define ET_HW_UNLOCK(state)                                             \
   83         {                                                               \
   84                 if (timer->et_flags & ET_FLAGS_PERCPU)                  \
   85                         mtx_unlock_spin(&(state)->et_hw_mtx);           \
   86                 else                                                    \
   87                         mtx_unlock_spin(&et_hw_mtx);                    \
   88         }
   89 
   90 static struct eventtimer *timer = NULL;
   91 static sbintime_t       timerperiod;    /* Timer period for periodic mode. */
   92 static sbintime_t       statperiod;     /* statclock() events period. */
   93 static sbintime_t       profperiod;     /* profclock() events period. */
   94 static sbintime_t       nexttick;       /* Next global timer tick time. */
   95 static u_int            busy = 1;       /* Reconfiguration is in progress. */
   96 static int              profiling;      /* Profiling events enabled. */
   97 
   98 static char             timername[32];  /* Wanted timer. */
   99 TUNABLE_STR("kern.eventtimer.timer", timername, sizeof(timername));
  100 
  101 static int              singlemul;      /* Multiplier for periodic mode. */
  102 SYSCTL_INT(_kern_eventtimer, OID_AUTO, singlemul, CTLFLAG_RWTUN, &singlemul,
  103     0, "Multiplier for periodic mode");
  104 
  105 static u_int            idletick;       /* Run periodic events when idle. */
  106 SYSCTL_UINT(_kern_eventtimer, OID_AUTO, idletick, CTLFLAG_RWTUN, &idletick,
  107     0, "Run periodic events when idle");
  108 
  109 static int              periodic;       /* Periodic or one-shot mode. */
  110 static int              want_periodic;  /* What mode to prefer. */
  111 TUNABLE_INT("kern.eventtimer.periodic", &want_periodic);
  112 
  113 struct pcpu_state {
  114         struct mtx      et_hw_mtx;      /* Per-CPU timer mutex. */
  115         u_int           action;         /* Reconfiguration requests. */
  116         u_int           handle;         /* Immediate handle resuests. */
  117         sbintime_t      now;            /* Last tick time. */
  118         sbintime_t      nextevent;      /* Next scheduled event on this CPU. */
  119         sbintime_t      nexttick;       /* Next timer tick time. */
  120         sbintime_t      nexthard;       /* Next hardclock() event. */
  121         sbintime_t      nextstat;       /* Next statclock() event. */
  122         sbintime_t      nextprof;       /* Next profclock() event. */
  123         sbintime_t      nextcall;       /* Next callout event. */
  124         sbintime_t      nextcallopt;    /* Next optional callout event. */
  125         int             ipi;            /* This CPU needs IPI. */
  126         int             idle;           /* This CPU is in idle mode. */
  127 };
  128 
  129 DPCPU_DEFINE_STATIC(struct pcpu_state, timerstate);
  130 DPCPU_DEFINE(sbintime_t, hardclocktime);
  131 
  132 /*
  133  * Timer broadcast IPI handler.
  134  */
  135 int
  136 hardclockintr(void)
  137 {
  138         sbintime_t now;
  139         struct pcpu_state *state;
  140         int done;
  141 
  142         if (doconfigtimer() || busy)
  143                 return (FILTER_HANDLED);
  144         state = DPCPU_PTR(timerstate);
  145         now = state->now;
  146         CTR3(KTR_SPARE2, "ipi  at %d:    now  %d.%08x",
  147             curcpu, (int)(now >> 32), (u_int)(now & 0xffffffff));
  148         done = handleevents(now, 0);
  149         return (done ? FILTER_HANDLED : FILTER_STRAY);
  150 }
  151 
  152 /*
  153  * Handle all events for specified time on this CPU
  154  */
  155 static int
  156 handleevents(sbintime_t now, int fake)
  157 {
  158         sbintime_t t, *hct;
  159         struct trapframe *frame;
  160         struct pcpu_state *state;
  161         int usermode;
  162         int done, runs;
  163 
  164         CTR3(KTR_SPARE2, "handle at %d:  now  %d.%08x",
  165             curcpu, (int)(now >> 32), (u_int)(now & 0xffffffff));
  166         done = 0;
  167         if (fake) {
  168                 frame = NULL;
  169                 usermode = 0;
  170         } else {
  171                 frame = curthread->td_intr_frame;
  172                 usermode = TRAPF_USERMODE(frame);
  173         }
  174 
  175         state = DPCPU_PTR(timerstate);
  176 
  177         runs = 0;
  178         while (now >= state->nexthard) {
  179                 state->nexthard += tick_sbt;
  180                 runs++;
  181         }
  182         if (runs) {
  183                 hct = DPCPU_PTR(hardclocktime);
  184                 *hct = state->nexthard - tick_sbt;
  185                 if (fake < 2) {
  186                         hardclock(runs, usermode);
  187                         done = 1;
  188                 }
  189         }
  190         runs = 0;
  191         while (now >= state->nextstat) {
  192                 state->nextstat += statperiod;
  193                 runs++;
  194         }
  195         if (runs && fake < 2) {
  196                 statclock(runs, usermode);
  197                 done = 1;
  198         }
  199         if (profiling) {
  200                 runs = 0;
  201                 while (now >= state->nextprof) {
  202                         state->nextprof += profperiod;
  203                         runs++;
  204                 }
  205                 if (runs && !fake) {
  206                         profclock(runs, usermode, TRAPF_PC(frame));
  207                         done = 1;
  208                 }
  209         } else
  210                 state->nextprof = state->nextstat;
  211         if (now >= state->nextcallopt || now >= state->nextcall) {
  212                 state->nextcall = state->nextcallopt = SBT_MAX;
  213                 callout_process(now);
  214         }
  215 
  216         t = getnextcpuevent(0);
  217         ET_HW_LOCK(state);
  218         if (!busy) {
  219                 state->idle = 0;
  220                 state->nextevent = t;
  221                 loadtimer(now, (fake == 2) &&
  222                     (timer->et_flags & ET_FLAGS_PERCPU));
  223         }
  224         ET_HW_UNLOCK(state);
  225         return (done);
  226 }
  227 
  228 /*
  229  * Schedule binuptime of the next event on current CPU.
  230  */
  231 static sbintime_t
  232 getnextcpuevent(int idle)
  233 {
  234         sbintime_t event;
  235         struct pcpu_state *state;
  236         u_int hardfreq;
  237 
  238         state = DPCPU_PTR(timerstate);
  239         /* Handle hardclock() events, skipping some if CPU is idle. */
  240         event = state->nexthard;
  241         if (idle) {
  242                 hardfreq = (u_int)hz / 2;
  243                 if (tc_min_ticktock_freq > 2
  244 #ifdef SMP
  245                     && curcpu == CPU_FIRST()
  246 #endif
  247                     )
  248                         hardfreq = hz / tc_min_ticktock_freq;
  249                 if (hardfreq > 1)
  250                         event += tick_sbt * (hardfreq - 1);
  251         }
  252         /* Handle callout events. */
  253         if (event > state->nextcall)
  254                 event = state->nextcall;
  255         if (!idle) { /* If CPU is active - handle other types of events. */
  256                 if (event > state->nextstat)
  257                         event = state->nextstat;
  258                 if (profiling && event > state->nextprof)
  259                         event = state->nextprof;
  260         }
  261         return (event);
  262 }
  263 
  264 /*
  265  * Schedule binuptime of the next event on all CPUs.
  266  */
  267 static sbintime_t
  268 getnextevent(void)
  269 {
  270         struct pcpu_state *state;
  271         sbintime_t event;
  272 #ifdef SMP
  273         int     cpu;
  274 #endif
  275 #ifdef KTR
  276         int     c;
  277 
  278         c = -1;
  279 #endif
  280         state = DPCPU_PTR(timerstate);
  281         event = state->nextevent;
  282 #ifdef SMP
  283         if ((timer->et_flags & ET_FLAGS_PERCPU) == 0) {
  284                 CPU_FOREACH(cpu) {
  285                         state = DPCPU_ID_PTR(cpu, timerstate);
  286                         if (event > state->nextevent) {
  287                                 event = state->nextevent;
  288 #ifdef KTR
  289                                 c = cpu;
  290 #endif
  291                         }
  292                 }
  293         }
  294 #endif
  295         CTR4(KTR_SPARE2, "next at %d:    next %d.%08x by %d",
  296             curcpu, (int)(event >> 32), (u_int)(event & 0xffffffff), c);
  297         return (event);
  298 }
  299 
  300 /* Hardware timer callback function. */
  301 static void
  302 timercb(struct eventtimer *et, void *arg)
  303 {
  304         sbintime_t now;
  305         sbintime_t *next;
  306         struct pcpu_state *state;
  307 #ifdef SMP
  308         int cpu, bcast;
  309 #endif
  310 
  311         /* Do not touch anything if somebody reconfiguring timers. */
  312         if (busy)
  313                 return;
  314         /* Update present and next tick times. */
  315         state = DPCPU_PTR(timerstate);
  316         if (et->et_flags & ET_FLAGS_PERCPU) {
  317                 next = &state->nexttick;
  318         } else
  319                 next = &nexttick;
  320         now = sbinuptime();
  321         if (periodic)
  322                 *next = now + timerperiod;
  323         else
  324                 *next = -1;     /* Next tick is not scheduled yet. */
  325         state->now = now;
  326         CTR3(KTR_SPARE2, "intr at %d:    now  %d.%08x",
  327             curcpu, (int)(now >> 32), (u_int)(now & 0xffffffff));
  328 
  329 #ifdef SMP
  330 #ifdef EARLY_AP_STARTUP
  331         MPASS(mp_ncpus == 1 || smp_started);
  332 #endif
  333         /* Prepare broadcasting to other CPUs for non-per-CPU timers. */
  334         bcast = 0;
  335 #ifdef EARLY_AP_STARTUP
  336         if ((et->et_flags & ET_FLAGS_PERCPU) == 0) {
  337 #else
  338         if ((et->et_flags & ET_FLAGS_PERCPU) == 0 && smp_started) {
  339 #endif
  340                 CPU_FOREACH(cpu) {
  341                         state = DPCPU_ID_PTR(cpu, timerstate);
  342                         ET_HW_LOCK(state);
  343                         state->now = now;
  344                         if (now >= state->nextevent) {
  345                                 state->nextevent += SBT_1S;
  346                                 if (curcpu != cpu) {
  347                                         state->ipi = 1;
  348                                         bcast = 1;
  349                                 }
  350                         }
  351                         ET_HW_UNLOCK(state);
  352                 }
  353         }
  354 #endif
  355 
  356         /* Handle events for this time on this CPU. */
  357         handleevents(now, 0);
  358 
  359 #ifdef SMP
  360         /* Broadcast interrupt to other CPUs for non-per-CPU timers. */
  361         if (bcast) {
  362                 CPU_FOREACH(cpu) {
  363                         if (curcpu == cpu)
  364                                 continue;
  365                         state = DPCPU_ID_PTR(cpu, timerstate);
  366                         if (state->ipi) {
  367                                 state->ipi = 0;
  368                                 ipi_cpu(cpu, IPI_HARDCLOCK);
  369                         }
  370                 }
  371         }
  372 #endif
  373 }
  374 
  375 /*
  376  * Load new value into hardware timer.
  377  */
  378 static void
  379 loadtimer(sbintime_t now, int start)
  380 {
  381         struct pcpu_state *state;
  382         sbintime_t new;
  383         sbintime_t *next;
  384         uint64_t tmp;
  385         int eq;
  386 
  387         if (timer->et_flags & ET_FLAGS_PERCPU) {
  388                 state = DPCPU_PTR(timerstate);
  389                 next = &state->nexttick;
  390         } else
  391                 next = &nexttick;
  392         if (periodic) {
  393                 if (start) {
  394                         /*
  395                          * Try to start all periodic timers aligned
  396                          * to period to make events synchronous.
  397                          */
  398                         tmp = now % timerperiod;
  399                         new = timerperiod - tmp;
  400                         if (new < tmp)          /* Left less then passed. */
  401                                 new += timerperiod;
  402                         CTR5(KTR_SPARE2, "load p at %d:   now %d.%08x first in %d.%08x",
  403                             curcpu, (int)(now >> 32), (u_int)(now & 0xffffffff),
  404                             (int)(new >> 32), (u_int)(new & 0xffffffff));
  405                         *next = new + now;
  406                         et_start(timer, new, timerperiod);
  407                 }
  408         } else {
  409                 new = getnextevent();
  410                 eq = (new == *next);
  411                 CTR4(KTR_SPARE2, "load at %d:    next %d.%08x eq %d",
  412                     curcpu, (int)(new >> 32), (u_int)(new & 0xffffffff), eq);
  413                 if (!eq) {
  414                         *next = new;
  415                         et_start(timer, new - now, 0);
  416                 }
  417         }
  418 }
  419 
  420 /*
  421  * Prepare event timer parameters after configuration changes.
  422  */
  423 static void
  424 setuptimer(void)
  425 {
  426         int freq;
  427 
  428         if (periodic && (timer->et_flags & ET_FLAGS_PERIODIC) == 0)
  429                 periodic = 0;
  430         else if (!periodic && (timer->et_flags & ET_FLAGS_ONESHOT) == 0)
  431                 periodic = 1;
  432         singlemul = MIN(MAX(singlemul, 1), 20);
  433         freq = hz * singlemul;
  434         while (freq < (profiling ? profhz : stathz))
  435                 freq += hz;
  436         freq = round_freq(timer, freq);
  437         timerperiod = SBT_1S / freq;
  438 }
  439 
  440 /*
  441  * Reconfigure specified per-CPU timer on other CPU. Called from IPI handler.
  442  */
  443 static int
  444 doconfigtimer(void)
  445 {
  446         sbintime_t now;
  447         struct pcpu_state *state;
  448 
  449         state = DPCPU_PTR(timerstate);
  450         switch (atomic_load_acq_int(&state->action)) {
  451         case 1:
  452                 now = sbinuptime();
  453                 ET_HW_LOCK(state);
  454                 loadtimer(now, 1);
  455                 ET_HW_UNLOCK(state);
  456                 state->handle = 0;
  457                 atomic_store_rel_int(&state->action, 0);
  458                 return (1);
  459         case 2:
  460                 ET_HW_LOCK(state);
  461                 et_stop(timer);
  462                 ET_HW_UNLOCK(state);
  463                 state->handle = 0;
  464                 atomic_store_rel_int(&state->action, 0);
  465                 return (1);
  466         }
  467         if (atomic_readandclear_int(&state->handle) && !busy) {
  468                 now = sbinuptime();
  469                 handleevents(now, 0);
  470                 return (1);
  471         }
  472         return (0);
  473 }
  474 
  475 /*
  476  * Reconfigure specified timer.
  477  * For per-CPU timers use IPI to make other CPUs to reconfigure.
  478  */
  479 static void
  480 configtimer(int start)
  481 {
  482         sbintime_t now, next;
  483         struct pcpu_state *state;
  484         int cpu;
  485 
  486         if (start) {
  487                 setuptimer();
  488                 now = sbinuptime();
  489         } else
  490                 now = 0;
  491         critical_enter();
  492         ET_HW_LOCK(DPCPU_PTR(timerstate));
  493         if (start) {
  494                 /* Initialize time machine parameters. */
  495                 next = now + timerperiod;
  496                 if (periodic)
  497                         nexttick = next;
  498                 else
  499                         nexttick = -1;
  500 #ifdef EARLY_AP_STARTUP
  501                 MPASS(mp_ncpus == 1 || smp_started);
  502 #endif
  503                 CPU_FOREACH(cpu) {
  504                         state = DPCPU_ID_PTR(cpu, timerstate);
  505                         state->now = now;
  506 #ifndef EARLY_AP_STARTUP
  507                         if (!smp_started && cpu != CPU_FIRST())
  508                                 state->nextevent = SBT_MAX;
  509                         else
  510 #endif
  511                                 state->nextevent = next;
  512                         if (periodic)
  513                                 state->nexttick = next;
  514                         else
  515                                 state->nexttick = -1;
  516                         state->nexthard = next;
  517                         state->nextstat = next;
  518                         state->nextprof = next;
  519                         state->nextcall = next;
  520                         state->nextcallopt = next;
  521                         hardclock_sync(cpu);
  522                 }
  523                 busy = 0;
  524                 /* Start global timer or per-CPU timer of this CPU. */
  525                 loadtimer(now, 1);
  526         } else {
  527                 busy = 1;
  528                 /* Stop global timer or per-CPU timer of this CPU. */
  529                 et_stop(timer);
  530         }
  531         ET_HW_UNLOCK(DPCPU_PTR(timerstate));
  532 #ifdef SMP
  533 #ifdef EARLY_AP_STARTUP
  534         /* If timer is global we are done. */
  535         if ((timer->et_flags & ET_FLAGS_PERCPU) == 0) {
  536 #else
  537         /* If timer is global or there is no other CPUs yet - we are done. */
  538         if ((timer->et_flags & ET_FLAGS_PERCPU) == 0 || !smp_started) {
  539 #endif
  540                 critical_exit();
  541                 return;
  542         }
  543         /* Set reconfigure flags for other CPUs. */
  544         CPU_FOREACH(cpu) {
  545                 state = DPCPU_ID_PTR(cpu, timerstate);
  546                 atomic_store_rel_int(&state->action,
  547                     (cpu == curcpu) ? 0 : ( start ? 1 : 2));
  548         }
  549         /* Broadcast reconfigure IPI. */
  550         ipi_all_but_self(IPI_HARDCLOCK);
  551         /* Wait for reconfiguration completed. */
  552 restart:
  553         cpu_spinwait();
  554         CPU_FOREACH(cpu) {
  555                 if (cpu == curcpu)
  556                         continue;
  557                 state = DPCPU_ID_PTR(cpu, timerstate);
  558                 if (atomic_load_acq_int(&state->action))
  559                         goto restart;
  560         }
  561 #endif
  562         critical_exit();
  563 }
  564 
  565 /*
  566  * Calculate nearest frequency supported by hardware timer.
  567  */
  568 static int
  569 round_freq(struct eventtimer *et, int freq)
  570 {
  571         uint64_t div;
  572 
  573         if (et->et_frequency != 0) {
  574                 div = lmax((et->et_frequency + freq / 2) / freq, 1);
  575                 if (et->et_flags & ET_FLAGS_POW2DIV)
  576                         div = 1 << (flsl(div + div / 2) - 1);
  577                 freq = (et->et_frequency + div / 2) / div;
  578         }
  579         if (et->et_min_period > SBT_1S)
  580                 panic("Event timer \"%s\" doesn't support sub-second periods!",
  581                     et->et_name);
  582         else if (et->et_min_period != 0)
  583                 freq = min(freq, SBT2FREQ(et->et_min_period));
  584         if (et->et_max_period < SBT_1S && et->et_max_period != 0)
  585                 freq = max(freq, SBT2FREQ(et->et_max_period));
  586         return (freq);
  587 }
  588 
  589 /*
  590  * Configure and start event timers (BSP part).
  591  */
  592 void
  593 cpu_initclocks_bsp(void)
  594 {
  595         struct pcpu_state *state;
  596         int base, div, cpu;
  597 
  598         mtx_init(&et_hw_mtx, "et_hw_mtx", NULL, MTX_SPIN);
  599         CPU_FOREACH(cpu) {
  600                 state = DPCPU_ID_PTR(cpu, timerstate);
  601                 mtx_init(&state->et_hw_mtx, "et_hw_mtx", NULL, MTX_SPIN);
  602                 state->nextcall = SBT_MAX;
  603                 state->nextcallopt = SBT_MAX;
  604         }
  605         periodic = want_periodic;
  606         /* Grab requested timer or the best of present. */
  607         if (timername[0])
  608                 timer = et_find(timername, 0, 0);
  609         if (timer == NULL && periodic) {
  610                 timer = et_find(NULL,
  611                     ET_FLAGS_PERIODIC, ET_FLAGS_PERIODIC);
  612         }
  613         if (timer == NULL) {
  614                 timer = et_find(NULL,
  615                     ET_FLAGS_ONESHOT, ET_FLAGS_ONESHOT);
  616         }
  617         if (timer == NULL && !periodic) {
  618                 timer = et_find(NULL,
  619                     ET_FLAGS_PERIODIC, ET_FLAGS_PERIODIC);
  620         }
  621         if (timer == NULL)
  622                 panic("No usable event timer found!");
  623         et_init(timer, timercb, NULL, NULL);
  624 
  625         /* Adapt to timer capabilities. */
  626         if (periodic && (timer->et_flags & ET_FLAGS_PERIODIC) == 0)
  627                 periodic = 0;
  628         else if (!periodic && (timer->et_flags & ET_FLAGS_ONESHOT) == 0)
  629                 periodic = 1;
  630         if (timer->et_flags & ET_FLAGS_C3STOP)
  631                 cpu_disable_c3_sleep++;
  632 
  633         /*
  634          * We honor the requested 'hz' value.
  635          * We want to run stathz in the neighborhood of 128hz.
  636          * We would like profhz to run as often as possible.
  637          */
  638         if (singlemul <= 0 || singlemul > 20) {
  639                 if (hz >= 1500 || (hz % 128) == 0)
  640                         singlemul = 1;
  641                 else if (hz >= 750)
  642                         singlemul = 2;
  643                 else
  644                         singlemul = 4;
  645         }
  646         if (periodic) {
  647                 base = round_freq(timer, hz * singlemul);
  648                 singlemul = max((base + hz / 2) / hz, 1);
  649                 hz = (base + singlemul / 2) / singlemul;
  650                 if (base <= 128)
  651                         stathz = base;
  652                 else {
  653                         div = base / 128;
  654                         if (div >= singlemul && (div % singlemul) == 0)
  655                                 div++;
  656                         stathz = base / div;
  657                 }
  658                 profhz = stathz;
  659                 while ((profhz + stathz) <= 128 * 64)
  660                         profhz += stathz;
  661                 profhz = round_freq(timer, profhz);
  662         } else {
  663                 hz = round_freq(timer, hz);
  664                 stathz = round_freq(timer, 127);
  665                 profhz = round_freq(timer, stathz * 64);
  666         }
  667         tick = 1000000 / hz;
  668         tick_sbt = SBT_1S / hz;
  669         tick_bt = sbttobt(tick_sbt);
  670         statperiod = SBT_1S / stathz;
  671         profperiod = SBT_1S / profhz;
  672         ET_LOCK();
  673         configtimer(1);
  674         ET_UNLOCK();
  675 }
  676 
  677 /*
  678  * Start per-CPU event timers on APs.
  679  */
  680 void
  681 cpu_initclocks_ap(void)
  682 {
  683         sbintime_t now;
  684         struct pcpu_state *state;
  685         struct thread *td;
  686 
  687         state = DPCPU_PTR(timerstate);
  688         now = sbinuptime();
  689         ET_HW_LOCK(state);
  690         state->now = now;
  691         hardclock_sync(curcpu);
  692         spinlock_enter();
  693         ET_HW_UNLOCK(state);
  694         td = curthread;
  695         td->td_intr_nesting_level++;
  696         handleevents(state->now, 2);
  697         td->td_intr_nesting_level--;
  698         spinlock_exit();
  699 }
  700 
  701 void
  702 suspendclock(void)
  703 {
  704         ET_LOCK();
  705         configtimer(0);
  706         ET_UNLOCK();
  707 }
  708 
  709 void
  710 resumeclock(void)
  711 {
  712         ET_LOCK();
  713         configtimer(1);
  714         ET_UNLOCK();
  715 }
  716 
  717 /*
  718  * Switch to profiling clock rates.
  719  */
  720 void
  721 cpu_startprofclock(void)
  722 {
  723 
  724         ET_LOCK();
  725         if (profiling == 0) {
  726                 if (periodic) {
  727                         configtimer(0);
  728                         profiling = 1;
  729                         configtimer(1);
  730                 } else
  731                         profiling = 1;
  732         } else
  733                 profiling++;
  734         ET_UNLOCK();
  735 }
  736 
  737 /*
  738  * Switch to regular clock rates.
  739  */
  740 void
  741 cpu_stopprofclock(void)
  742 {
  743 
  744         ET_LOCK();
  745         if (profiling == 1) {
  746                 if (periodic) {
  747                         configtimer(0);
  748                         profiling = 0;
  749                         configtimer(1);
  750                 } else
  751                 profiling = 0;
  752         } else
  753                 profiling--;
  754         ET_UNLOCK();
  755 }
  756 
  757 /*
  758  * Switch to idle mode (all ticks handled).
  759  */
  760 sbintime_t
  761 cpu_idleclock(void)
  762 {
  763         sbintime_t now, t;
  764         struct pcpu_state *state;
  765 
  766         if (idletick || busy ||
  767             (periodic && (timer->et_flags & ET_FLAGS_PERCPU))
  768 #ifdef DEVICE_POLLING
  769             || curcpu == CPU_FIRST()
  770 #endif
  771             )
  772                 return (-1);
  773         state = DPCPU_PTR(timerstate);
  774         if (periodic)
  775                 now = state->now;
  776         else
  777                 now = sbinuptime();
  778         CTR3(KTR_SPARE2, "idle at %d:    now  %d.%08x",
  779             curcpu, (int)(now >> 32), (u_int)(now & 0xffffffff));
  780         t = getnextcpuevent(1);
  781         ET_HW_LOCK(state);
  782         state->idle = 1;
  783         state->nextevent = t;
  784         if (!periodic)
  785                 loadtimer(now, 0);
  786         ET_HW_UNLOCK(state);
  787         return (MAX(t - now, 0));
  788 }
  789 
  790 /*
  791  * Switch to active mode (skip empty ticks).
  792  */
  793 void
  794 cpu_activeclock(void)
  795 {
  796         sbintime_t now;
  797         struct pcpu_state *state;
  798         struct thread *td;
  799 
  800         state = DPCPU_PTR(timerstate);
  801         if (state->idle == 0 || busy)
  802                 return;
  803         if (periodic)
  804                 now = state->now;
  805         else
  806                 now = sbinuptime();
  807         CTR3(KTR_SPARE2, "active at %d:  now  %d.%08x",
  808             curcpu, (int)(now >> 32), (u_int)(now & 0xffffffff));
  809         spinlock_enter();
  810         td = curthread;
  811         td->td_intr_nesting_level++;
  812         handleevents(now, 1);
  813         td->td_intr_nesting_level--;
  814         spinlock_exit();
  815 }
  816 
  817 /*
  818  * Change the frequency of the given timer.  This changes et->et_frequency and
  819  * if et is the active timer it reconfigures the timer on all CPUs.  This is
  820  * intended to be a private interface for the use of et_change_frequency() only.
  821  */
  822 void
  823 cpu_et_frequency(struct eventtimer *et, uint64_t newfreq)
  824 {
  825 
  826         ET_LOCK();
  827         if (et == timer) {
  828                 configtimer(0);
  829                 et->et_frequency = newfreq;
  830                 configtimer(1);
  831         } else
  832                 et->et_frequency = newfreq;
  833         ET_UNLOCK();
  834 }
  835 
  836 void
  837 cpu_new_callout(int cpu, sbintime_t bt, sbintime_t bt_opt)
  838 {
  839         struct pcpu_state *state;
  840 
  841         /* Do not touch anything if somebody reconfiguring timers. */
  842         if (busy)
  843                 return;
  844         CTR6(KTR_SPARE2, "new co at %d:    on %d at %d.%08x - %d.%08x",
  845             curcpu, cpu, (int)(bt_opt >> 32), (u_int)(bt_opt & 0xffffffff),
  846             (int)(bt >> 32), (u_int)(bt & 0xffffffff));
  847 
  848         KASSERT(!CPU_ABSENT(cpu), ("Absent CPU %d", cpu));
  849         state = DPCPU_ID_PTR(cpu, timerstate);
  850         ET_HW_LOCK(state);
  851 
  852         /*
  853          * If there is callout time already set earlier -- do nothing.
  854          * This check may appear redundant because we check already in
  855          * callout_process() but this double check guarantees we're safe
  856          * with respect to race conditions between interrupts execution
  857          * and scheduling.
  858          */
  859         state->nextcallopt = bt_opt;
  860         if (bt >= state->nextcall)
  861                 goto done;
  862         state->nextcall = bt;
  863         /* If there is some other event set earlier -- do nothing. */
  864         if (bt >= state->nextevent)
  865                 goto done;
  866         state->nextevent = bt;
  867         /* If timer is periodic -- there is nothing to reprogram. */
  868         if (periodic)
  869                 goto done;
  870         /* If timer is global or of the current CPU -- reprogram it. */
  871         if ((timer->et_flags & ET_FLAGS_PERCPU) == 0 || cpu == curcpu) {
  872                 loadtimer(sbinuptime(), 0);
  873 done:
  874                 ET_HW_UNLOCK(state);
  875                 return;
  876         }
  877         /* Otherwise make other CPU to reprogram it. */
  878         state->handle = 1;
  879         ET_HW_UNLOCK(state);
  880 #ifdef SMP
  881         ipi_cpu(cpu, IPI_HARDCLOCK);
  882 #endif
  883 }
  884 
  885 /*
  886  * Report or change the active event timers hardware.
  887  */
  888 static int
  889 sysctl_kern_eventtimer_timer(SYSCTL_HANDLER_ARGS)
  890 {
  891         char buf[32];
  892         struct eventtimer *et;
  893         int error;
  894 
  895         ET_LOCK();
  896         et = timer;
  897         snprintf(buf, sizeof(buf), "%s", et->et_name);
  898         ET_UNLOCK();
  899         error = sysctl_handle_string(oidp, buf, sizeof(buf), req);
  900         ET_LOCK();
  901         et = timer;
  902         if (error != 0 || req->newptr == NULL ||
  903             strcasecmp(buf, et->et_name) == 0) {
  904                 ET_UNLOCK();
  905                 return (error);
  906         }
  907         et = et_find(buf, 0, 0);
  908         if (et == NULL) {
  909                 ET_UNLOCK();
  910                 return (ENOENT);
  911         }
  912         configtimer(0);
  913         et_free(timer);
  914         if (et->et_flags & ET_FLAGS_C3STOP)
  915                 cpu_disable_c3_sleep++;
  916         if (timer->et_flags & ET_FLAGS_C3STOP)
  917                 cpu_disable_c3_sleep--;
  918         periodic = want_periodic;
  919         timer = et;
  920         et_init(timer, timercb, NULL, NULL);
  921         configtimer(1);
  922         ET_UNLOCK();
  923         return (error);
  924 }
  925 SYSCTL_PROC(_kern_eventtimer, OID_AUTO, timer,
  926     CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE,
  927     0, 0, sysctl_kern_eventtimer_timer, "A", "Chosen event timer");
  928 
  929 /*
  930  * Report or change the active event timer periodicity.
  931  */
  932 static int
  933 sysctl_kern_eventtimer_periodic(SYSCTL_HANDLER_ARGS)
  934 {
  935         int error, val;
  936 
  937         val = periodic;
  938         error = sysctl_handle_int(oidp, &val, 0, req);
  939         if (error != 0 || req->newptr == NULL)
  940                 return (error);
  941         ET_LOCK();
  942         configtimer(0);
  943         periodic = want_periodic = val;
  944         configtimer(1);
  945         ET_UNLOCK();
  946         return (error);
  947 }
  948 SYSCTL_PROC(_kern_eventtimer, OID_AUTO, periodic,
  949     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE,
  950     0, 0, sysctl_kern_eventtimer_periodic, "I", "Enable event timer periodic mode");
  951 
  952 #include "opt_ddb.h"
  953 
  954 #ifdef DDB
  955 #include <ddb/ddb.h>
  956 
  957 DB_SHOW_COMMAND(clocksource, db_show_clocksource)
  958 {
  959         struct pcpu_state *st;
  960         int c;
  961 
  962         CPU_FOREACH(c) {
  963                 st = DPCPU_ID_PTR(c, timerstate);
  964                 db_printf(
  965                     "CPU %2d: action %d handle %d  ipi %d idle %d\n"
  966                     "        now %#jx nevent %#jx (%jd)\n"
  967                     "        ntick %#jx (%jd) nhard %#jx (%jd)\n"
  968                     "        nstat %#jx (%jd) nprof %#jx (%jd)\n"
  969                     "        ncall %#jx (%jd) ncallopt %#jx (%jd)\n",
  970                     c, st->action, st->handle, st->ipi, st->idle,
  971                     (uintmax_t)st->now,
  972                     (uintmax_t)st->nextevent,
  973                     (uintmax_t)(st->nextevent - st->now) / tick_sbt,
  974                     (uintmax_t)st->nexttick,
  975                     (uintmax_t)(st->nexttick - st->now) / tick_sbt,
  976                     (uintmax_t)st->nexthard,
  977                     (uintmax_t)(st->nexthard - st->now) / tick_sbt,
  978                     (uintmax_t)st->nextstat,
  979                     (uintmax_t)(st->nextstat - st->now) / tick_sbt,
  980                     (uintmax_t)st->nextprof,
  981                     (uintmax_t)(st->nextprof - st->now) / tick_sbt,
  982                     (uintmax_t)st->nextcall,
  983                     (uintmax_t)(st->nextcall - st->now) / tick_sbt,
  984                     (uintmax_t)st->nextcallopt,
  985                     (uintmax_t)(st->nextcallopt - st->now) / tick_sbt);
  986         }
  987 }
  988 
  989 #endif

Cache object: 93f8e7ed22c659dfe41c65edcd787cd9


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.