The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_timeout.c

Version: -  FREEBSD  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-2  -  FREEBSD-11-1  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-4  -  FREEBSD-10-3  -  FREEBSD-10-2  -  FREEBSD-10-1  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-3  -  FREEBSD-9-2  -  FREEBSD-9-1  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-4  -  FREEBSD-8-3  -  FREEBSD-8-2  -  FREEBSD-8-1  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-4  -  FREEBSD-7-3  -  FREEBSD-7-2  -  FREEBSD-7-1  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-4  -  FREEBSD-6-3  -  FREEBSD-6-2  -  FREEBSD-6-1  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-5  -  FREEBSD-5-4  -  FREEBSD-5-3  -  FREEBSD-5-2  -  FREEBSD-5-1  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  linux-2.6  -  linux-2.4.22  -  MK83  -  MK84  -  PLAN9  -  DFBSD  -  NETBSD  -  NETBSD5  -  NETBSD4  -  NETBSD3  -  NETBSD20  -  OPENBSD  -  xnu-517  -  xnu-792  -  xnu-792.6.70  -  xnu-1228  -  xnu-1456.1.26  -  xnu-1699.24.8  -  xnu-2050.18.24  -  OPENSOLARIS  -  minix-3-1-1 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * SPDX-License-Identifier: BSD-3-Clause
    3  *
    4  * Copyright (c) 1982, 1986, 1991, 1993
    5  *      The Regents of the University of California.  All rights reserved.
    6  * (c) UNIX System Laboratories, Inc.
    7  * All or some portions of this file are derived from material licensed
    8  * to the University of California by American Telephone and Telegraph
    9  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
   10  * the permission of UNIX System Laboratories, Inc.
   11  *
   12  * Redistribution and use in source and binary forms, with or without
   13  * modification, are permitted provided that the following conditions
   14  * are met:
   15  * 1. Redistributions of source code must retain the above copyright
   16  *    notice, this list of conditions and the following disclaimer.
   17  * 2. Redistributions in binary form must reproduce the above copyright
   18  *    notice, this list of conditions and the following disclaimer in the
   19  *    documentation and/or other materials provided with the distribution.
   20  * 3. Neither the name of the University nor the names of its contributors
   21  *    may be used to endorse or promote products derived from this software
   22  *    without specific prior written permission.
   23  *
   24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
   25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   34  * SUCH DAMAGE.
   35  *
   36  *      From: @(#)kern_clock.c  8.5 (Berkeley) 1/21/94
   37  */
   38 
   39 #include <sys/cdefs.h>
   40 __FBSDID("$FreeBSD: stable/12/sys/kern/kern_timeout.c 355610 2019-12-11 15:15:21Z mav $");
   41 
   42 #include "opt_callout_profiling.h"
   43 #include "opt_ddb.h"
   44 #if defined(__arm__)
   45 #include "opt_timer.h"
   46 #endif
   47 #include "opt_rss.h"
   48 
   49 #include <sys/param.h>
   50 #include <sys/systm.h>
   51 #include <sys/bus.h>
   52 #include <sys/callout.h>
   53 #include <sys/domainset.h>
   54 #include <sys/file.h>
   55 #include <sys/interrupt.h>
   56 #include <sys/kernel.h>
   57 #include <sys/ktr.h>
   58 #include <sys/lock.h>
   59 #include <sys/malloc.h>
   60 #include <sys/mutex.h>
   61 #include <sys/proc.h>
   62 #include <sys/sdt.h>
   63 #include <sys/sleepqueue.h>
   64 #include <sys/sysctl.h>
   65 #include <sys/smp.h>
   66 
   67 #ifdef DDB
   68 #include <ddb/ddb.h>
   69 #include <machine/_inttypes.h>
   70 #endif
   71 
   72 #ifdef SMP
   73 #include <machine/cpu.h>
   74 #endif
   75 
   76 #ifndef NO_EVENTTIMERS
   77 DPCPU_DECLARE(sbintime_t, hardclocktime);
   78 #endif
   79 
   80 SDT_PROVIDER_DEFINE(callout_execute);
   81 SDT_PROBE_DEFINE1(callout_execute, , , callout__start, "struct callout *");
   82 SDT_PROBE_DEFINE1(callout_execute, , , callout__end, "struct callout *");
   83 
   84 #ifdef CALLOUT_PROFILING
   85 static int avg_depth;
   86 SYSCTL_INT(_debug, OID_AUTO, to_avg_depth, CTLFLAG_RD, &avg_depth, 0,
   87     "Average number of items examined per softclock call. Units = 1/1000");
   88 static int avg_gcalls;
   89 SYSCTL_INT(_debug, OID_AUTO, to_avg_gcalls, CTLFLAG_RD, &avg_gcalls, 0,
   90     "Average number of Giant callouts made per softclock call. Units = 1/1000");
   91 static int avg_lockcalls;
   92 SYSCTL_INT(_debug, OID_AUTO, to_avg_lockcalls, CTLFLAG_RD, &avg_lockcalls, 0,
   93     "Average number of lock callouts made per softclock call. Units = 1/1000");
   94 static int avg_mpcalls;
   95 SYSCTL_INT(_debug, OID_AUTO, to_avg_mpcalls, CTLFLAG_RD, &avg_mpcalls, 0,
   96     "Average number of MP callouts made per softclock call. Units = 1/1000");
   97 static int avg_depth_dir;
   98 SYSCTL_INT(_debug, OID_AUTO, to_avg_depth_dir, CTLFLAG_RD, &avg_depth_dir, 0,
   99     "Average number of direct callouts examined per callout_process call. "
  100     "Units = 1/1000");
  101 static int avg_lockcalls_dir;
  102 SYSCTL_INT(_debug, OID_AUTO, to_avg_lockcalls_dir, CTLFLAG_RD,
  103     &avg_lockcalls_dir, 0, "Average number of lock direct callouts made per "
  104     "callout_process call. Units = 1/1000");
  105 static int avg_mpcalls_dir;
  106 SYSCTL_INT(_debug, OID_AUTO, to_avg_mpcalls_dir, CTLFLAG_RD, &avg_mpcalls_dir,
  107     0, "Average number of MP direct callouts made per callout_process call. "
  108     "Units = 1/1000");
  109 #endif
  110 
  111 static int ncallout;
  112 SYSCTL_INT(_kern, OID_AUTO, ncallout, CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &ncallout, 0,
  113     "Number of entries in callwheel and size of timeout() preallocation");
  114 
  115 #ifdef  RSS
  116 static int pin_default_swi = 1;
  117 static int pin_pcpu_swi = 1;
  118 #else
  119 static int pin_default_swi = 0;
  120 static int pin_pcpu_swi = 0;
  121 #endif
  122 
  123 SYSCTL_INT(_kern, OID_AUTO, pin_default_swi, CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &pin_default_swi,
  124     0, "Pin the default (non-per-cpu) swi (shared with PCPU 0 swi)");
  125 SYSCTL_INT(_kern, OID_AUTO, pin_pcpu_swi, CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &pin_pcpu_swi,
  126     0, "Pin the per-CPU swis (except PCPU 0, which is also default");
  127 
  128 /*
  129  * TODO:
  130  *      allocate more timeout table slots when table overflows.
  131  */
  132 static u_int __read_mostly callwheelsize;
  133 static u_int __read_mostly callwheelmask;
  134 
  135 /*
  136  * The callout cpu exec entities represent informations necessary for
  137  * describing the state of callouts currently running on the CPU and the ones
  138  * necessary for migrating callouts to the new callout cpu. In particular,
  139  * the first entry of the array cc_exec_entity holds informations for callout
  140  * running in SWI thread context, while the second one holds informations
  141  * for callout running directly from hardware interrupt context.
  142  * The cached informations are very important for deferring migration when
  143  * the migrating callout is already running.
  144  */
  145 struct cc_exec {
  146         struct callout          *cc_curr;
  147         void                    (*cc_drain)(void *);
  148 #ifdef SMP
  149         void                    (*ce_migration_func)(void *);
  150         void                    *ce_migration_arg;
  151         int                     ce_migration_cpu;
  152         sbintime_t              ce_migration_time;
  153         sbintime_t              ce_migration_prec;
  154 #endif
  155         bool                    cc_cancel;
  156         bool                    cc_waiting;
  157 };
  158 
  159 /*
  160  * There is one struct callout_cpu per cpu, holding all relevant
  161  * state for the callout processing thread on the individual CPU.
  162  */
  163 struct callout_cpu {
  164         struct mtx_padalign     cc_lock;
  165         struct cc_exec          cc_exec_entity[2];
  166         struct callout          *cc_next;
  167         struct callout          *cc_callout;
  168         struct callout_list     *cc_callwheel;
  169         struct callout_tailq    cc_expireq;
  170         struct callout_slist    cc_callfree;
  171         sbintime_t              cc_firstevent;
  172         sbintime_t              cc_lastscan;
  173         void                    *cc_cookie;
  174         u_int                   cc_bucket;
  175         u_int                   cc_inited;
  176         char                    cc_ktr_event_name[20];
  177 };
  178 
  179 #define callout_migrating(c)    ((c)->c_iflags & CALLOUT_DFRMIGRATION)
  180 
  181 #define cc_exec_curr(cc, dir)           cc->cc_exec_entity[dir].cc_curr
  182 #define cc_exec_drain(cc, dir)          cc->cc_exec_entity[dir].cc_drain
  183 #define cc_exec_next(cc)                cc->cc_next
  184 #define cc_exec_cancel(cc, dir)         cc->cc_exec_entity[dir].cc_cancel
  185 #define cc_exec_waiting(cc, dir)        cc->cc_exec_entity[dir].cc_waiting
  186 #ifdef SMP
  187 #define cc_migration_func(cc, dir)      cc->cc_exec_entity[dir].ce_migration_func
  188 #define cc_migration_arg(cc, dir)       cc->cc_exec_entity[dir].ce_migration_arg
  189 #define cc_migration_cpu(cc, dir)       cc->cc_exec_entity[dir].ce_migration_cpu
  190 #define cc_migration_time(cc, dir)      cc->cc_exec_entity[dir].ce_migration_time
  191 #define cc_migration_prec(cc, dir)      cc->cc_exec_entity[dir].ce_migration_prec
  192 
  193 struct callout_cpu cc_cpu[MAXCPU];
  194 #define CPUBLOCK        MAXCPU
  195 #define CC_CPU(cpu)     (&cc_cpu[(cpu)])
  196 #define CC_SELF()       CC_CPU(PCPU_GET(cpuid))
  197 #else
  198 struct callout_cpu cc_cpu;
  199 #define CC_CPU(cpu)     &cc_cpu
  200 #define CC_SELF()       &cc_cpu
  201 #endif
  202 #define CC_LOCK(cc)     mtx_lock_spin(&(cc)->cc_lock)
  203 #define CC_UNLOCK(cc)   mtx_unlock_spin(&(cc)->cc_lock)
  204 #define CC_LOCK_ASSERT(cc)      mtx_assert(&(cc)->cc_lock, MA_OWNED)
  205 
  206 static int __read_mostly timeout_cpu;
  207 
  208 static void     callout_cpu_init(struct callout_cpu *cc, int cpu);
  209 static void     softclock_call_cc(struct callout *c, struct callout_cpu *cc,
  210 #ifdef CALLOUT_PROFILING
  211                     int *mpcalls, int *lockcalls, int *gcalls,
  212 #endif
  213                     int direct);
  214 
  215 static MALLOC_DEFINE(M_CALLOUT, "callout", "Callout datastructures");
  216 
  217 /**
  218  * Locked by cc_lock:
  219  *   cc_curr         - If a callout is in progress, it is cc_curr.
  220  *                     If cc_curr is non-NULL, threads waiting in
  221  *                     callout_drain() will be woken up as soon as the
  222  *                     relevant callout completes.
  223  *   cc_cancel       - Changing to 1 with both callout_lock and cc_lock held
  224  *                     guarantees that the current callout will not run.
  225  *                     The softclock() function sets this to 0 before it
  226  *                     drops callout_lock to acquire c_lock, and it calls
  227  *                     the handler only if curr_cancelled is still 0 after
  228  *                     cc_lock is successfully acquired.
  229  *   cc_waiting      - If a thread is waiting in callout_drain(), then
  230  *                     callout_wait is nonzero.  Set only when
  231  *                     cc_curr is non-NULL.
  232  */
  233 
  234 /*
  235  * Resets the execution entity tied to a specific callout cpu.
  236  */
  237 static void
  238 cc_cce_cleanup(struct callout_cpu *cc, int direct)
  239 {
  240 
  241         cc_exec_curr(cc, direct) = NULL;
  242         cc_exec_cancel(cc, direct) = false;
  243         cc_exec_waiting(cc, direct) = false;
  244 #ifdef SMP
  245         cc_migration_cpu(cc, direct) = CPUBLOCK;
  246         cc_migration_time(cc, direct) = 0;
  247         cc_migration_prec(cc, direct) = 0;
  248         cc_migration_func(cc, direct) = NULL;
  249         cc_migration_arg(cc, direct) = NULL;
  250 #endif
  251 }
  252 
  253 /*
  254  * Checks if migration is requested by a specific callout cpu.
  255  */
  256 static int
  257 cc_cce_migrating(struct callout_cpu *cc, int direct)
  258 {
  259 
  260 #ifdef SMP
  261         return (cc_migration_cpu(cc, direct) != CPUBLOCK);
  262 #else
  263         return (0);
  264 #endif
  265 }
  266 
  267 /*
  268  * Kernel low level callwheel initialization
  269  * called on the BSP during kernel startup.
  270  */
  271 static void
  272 callout_callwheel_init(void *dummy)
  273 {
  274         struct callout_cpu *cc;
  275 
  276         /*
  277          * Calculate the size of the callout wheel and the preallocated
  278          * timeout() structures.
  279          * XXX: Clip callout to result of previous function of maxusers
  280          * maximum 384.  This is still huge, but acceptable.
  281          */
  282         memset(CC_CPU(curcpu), 0, sizeof(cc_cpu));
  283         ncallout = imin(16 + maxproc + maxfiles, 18508);
  284         TUNABLE_INT_FETCH("kern.ncallout", &ncallout);
  285 
  286         /*
  287          * Calculate callout wheel size, should be next power of two higher
  288          * than 'ncallout'.
  289          */
  290         callwheelsize = 1 << fls(ncallout);
  291         callwheelmask = callwheelsize - 1;
  292 
  293         /*
  294          * Fetch whether we're pinning the swi's or not.
  295          */
  296         TUNABLE_INT_FETCH("kern.pin_default_swi", &pin_default_swi);
  297         TUNABLE_INT_FETCH("kern.pin_pcpu_swi", &pin_pcpu_swi);
  298 
  299         /*
  300          * Only BSP handles timeout(9) and receives a preallocation.
  301          *
  302          * XXX: Once all timeout(9) consumers are converted this can
  303          * be removed.
  304          */
  305         timeout_cpu = PCPU_GET(cpuid);
  306         cc = CC_CPU(timeout_cpu);
  307         cc->cc_callout = malloc(ncallout * sizeof(struct callout),
  308             M_CALLOUT, M_WAITOK);
  309         callout_cpu_init(cc, timeout_cpu);
  310 }
  311 SYSINIT(callwheel_init, SI_SUB_CPU, SI_ORDER_ANY, callout_callwheel_init, NULL);
  312 
  313 /*
  314  * Initialize the per-cpu callout structures.
  315  */
  316 static void
  317 callout_cpu_init(struct callout_cpu *cc, int cpu)
  318 {
  319         struct callout *c;
  320         int i;
  321 
  322         mtx_init(&cc->cc_lock, "callout", NULL, MTX_SPIN | MTX_RECURSE);
  323         SLIST_INIT(&cc->cc_callfree);
  324         cc->cc_inited = 1;
  325         cc->cc_callwheel = malloc_domainset(sizeof(struct callout_list) *
  326             callwheelsize, M_CALLOUT,
  327             DOMAINSET_PREF(pcpu_find(cpu)->pc_domain), M_WAITOK);
  328         for (i = 0; i < callwheelsize; i++)
  329                 LIST_INIT(&cc->cc_callwheel[i]);
  330         TAILQ_INIT(&cc->cc_expireq);
  331         cc->cc_firstevent = SBT_MAX;
  332         for (i = 0; i < 2; i++)
  333                 cc_cce_cleanup(cc, i);
  334         snprintf(cc->cc_ktr_event_name, sizeof(cc->cc_ktr_event_name),
  335             "callwheel cpu %d", cpu);
  336         if (cc->cc_callout == NULL)     /* Only BSP handles timeout(9) */
  337                 return;
  338         for (i = 0; i < ncallout; i++) {
  339                 c = &cc->cc_callout[i];
  340                 callout_init(c, 0);
  341                 c->c_iflags = CALLOUT_LOCAL_ALLOC;
  342                 SLIST_INSERT_HEAD(&cc->cc_callfree, c, c_links.sle);
  343         }
  344 }
  345 
  346 #ifdef SMP
  347 /*
  348  * Switches the cpu tied to a specific callout.
  349  * The function expects a locked incoming callout cpu and returns with
  350  * locked outcoming callout cpu.
  351  */
  352 static struct callout_cpu *
  353 callout_cpu_switch(struct callout *c, struct callout_cpu *cc, int new_cpu)
  354 {
  355         struct callout_cpu *new_cc;
  356 
  357         MPASS(c != NULL && cc != NULL);
  358         CC_LOCK_ASSERT(cc);
  359 
  360         /*
  361          * Avoid interrupts and preemption firing after the callout cpu
  362          * is blocked in order to avoid deadlocks as the new thread
  363          * may be willing to acquire the callout cpu lock.
  364          */
  365         c->c_cpu = CPUBLOCK;
  366         spinlock_enter();
  367         CC_UNLOCK(cc);
  368         new_cc = CC_CPU(new_cpu);
  369         CC_LOCK(new_cc);
  370         spinlock_exit();
  371         c->c_cpu = new_cpu;
  372         return (new_cc);
  373 }
  374 #endif
  375 
  376 /*
  377  * Start standard softclock thread.
  378  */
  379 static void
  380 start_softclock(void *dummy)
  381 {
  382         struct callout_cpu *cc;
  383         char name[MAXCOMLEN];
  384 #ifdef SMP
  385         int cpu;
  386         struct intr_event *ie;
  387 #endif
  388 
  389         cc = CC_CPU(timeout_cpu);
  390         snprintf(name, sizeof(name), "clock (%d)", timeout_cpu);
  391         if (swi_add(&clk_intr_event, name, softclock, cc, SWI_CLOCK,
  392             INTR_MPSAFE, &cc->cc_cookie))
  393                 panic("died while creating standard software ithreads");
  394         if (pin_default_swi &&
  395             (intr_event_bind(clk_intr_event, timeout_cpu) != 0)) {
  396                 printf("%s: timeout clock couldn't be pinned to cpu %d\n",
  397                     __func__,
  398                     timeout_cpu);
  399         }
  400 
  401 #ifdef SMP
  402         CPU_FOREACH(cpu) {
  403                 if (cpu == timeout_cpu)
  404                         continue;
  405                 cc = CC_CPU(cpu);
  406                 cc->cc_callout = NULL;  /* Only BSP handles timeout(9). */
  407                 callout_cpu_init(cc, cpu);
  408                 snprintf(name, sizeof(name), "clock (%d)", cpu);
  409                 ie = NULL;
  410                 if (swi_add(&ie, name, softclock, cc, SWI_CLOCK,
  411                     INTR_MPSAFE, &cc->cc_cookie))
  412                         panic("died while creating standard software ithreads");
  413                 if (pin_pcpu_swi && (intr_event_bind(ie, cpu) != 0)) {
  414                         printf("%s: per-cpu clock couldn't be pinned to "
  415                             "cpu %d\n",
  416                             __func__,
  417                             cpu);
  418                 }
  419         }
  420 #endif
  421 }
  422 SYSINIT(start_softclock, SI_SUB_SOFTINTR, SI_ORDER_FIRST, start_softclock, NULL);
  423 
  424 #define CC_HASH_SHIFT   8
  425 
  426 static inline u_int
  427 callout_hash(sbintime_t sbt)
  428 {
  429 
  430         return (sbt >> (32 - CC_HASH_SHIFT));
  431 }
  432 
  433 static inline u_int
  434 callout_get_bucket(sbintime_t sbt)
  435 {
  436 
  437         return (callout_hash(sbt) & callwheelmask);
  438 }
  439 
  440 void
  441 callout_process(sbintime_t now)
  442 {
  443         struct callout *tmp, *tmpn;
  444         struct callout_cpu *cc;
  445         struct callout_list *sc;
  446         sbintime_t first, last, max, tmp_max;
  447         uint32_t lookahead;
  448         u_int firstb, lastb, nowb;
  449 #ifdef CALLOUT_PROFILING
  450         int depth_dir = 0, mpcalls_dir = 0, lockcalls_dir = 0;
  451 #endif
  452 
  453         cc = CC_SELF();
  454         mtx_lock_spin_flags(&cc->cc_lock, MTX_QUIET);
  455 
  456         /* Compute the buckets of the last scan and present times. */
  457         firstb = callout_hash(cc->cc_lastscan);
  458         cc->cc_lastscan = now;
  459         nowb = callout_hash(now);
  460 
  461         /* Compute the last bucket and minimum time of the bucket after it. */
  462         if (nowb == firstb)
  463                 lookahead = (SBT_1S / 16);
  464         else if (nowb - firstb == 1)
  465                 lookahead = (SBT_1S / 8);
  466         else
  467                 lookahead = (SBT_1S / 2);
  468         first = last = now;
  469         first += (lookahead / 2);
  470         last += lookahead;
  471         last &= (0xffffffffffffffffLLU << (32 - CC_HASH_SHIFT));
  472         lastb = callout_hash(last) - 1;
  473         max = last;
  474 
  475         /*
  476          * Check if we wrapped around the entire wheel from the last scan.
  477          * In case, we need to scan entirely the wheel for pending callouts.
  478          */
  479         if (lastb - firstb >= callwheelsize) {
  480                 lastb = firstb + callwheelsize - 1;
  481                 if (nowb - firstb >= callwheelsize)
  482                         nowb = lastb;
  483         }
  484 
  485         /* Iterate callwheel from firstb to nowb and then up to lastb. */
  486         do {
  487                 sc = &cc->cc_callwheel[firstb & callwheelmask];
  488                 tmp = LIST_FIRST(sc);
  489                 while (tmp != NULL) {
  490                         /* Run the callout if present time within allowed. */
  491                         if (tmp->c_time <= now) {
  492                                 /*
  493                                  * Consumer told us the callout may be run
  494                                  * directly from hardware interrupt context.
  495                                  */
  496                                 if (tmp->c_iflags & CALLOUT_DIRECT) {
  497 #ifdef CALLOUT_PROFILING
  498                                         ++depth_dir;
  499 #endif
  500                                         cc_exec_next(cc) =
  501                                             LIST_NEXT(tmp, c_links.le);
  502                                         cc->cc_bucket = firstb & callwheelmask;
  503                                         LIST_REMOVE(tmp, c_links.le);
  504                                         softclock_call_cc(tmp, cc,
  505 #ifdef CALLOUT_PROFILING
  506                                             &mpcalls_dir, &lockcalls_dir, NULL,
  507 #endif
  508                                             1);
  509                                         tmp = cc_exec_next(cc);
  510                                         cc_exec_next(cc) = NULL;
  511                                 } else {
  512                                         tmpn = LIST_NEXT(tmp, c_links.le);
  513                                         LIST_REMOVE(tmp, c_links.le);
  514                                         TAILQ_INSERT_TAIL(&cc->cc_expireq,
  515                                             tmp, c_links.tqe);
  516                                         tmp->c_iflags |= CALLOUT_PROCESSED;
  517                                         tmp = tmpn;
  518                                 }
  519                                 continue;
  520                         }
  521                         /* Skip events from distant future. */
  522                         if (tmp->c_time >= max)
  523                                 goto next;
  524                         /*
  525                          * Event minimal time is bigger than present maximal
  526                          * time, so it cannot be aggregated.
  527                          */
  528                         if (tmp->c_time > last) {
  529                                 lastb = nowb;
  530                                 goto next;
  531                         }
  532                         /* Update first and last time, respecting this event. */
  533                         if (tmp->c_time < first)
  534                                 first = tmp->c_time;
  535                         tmp_max = tmp->c_time + tmp->c_precision;
  536                         if (tmp_max < last)
  537                                 last = tmp_max;
  538 next:
  539                         tmp = LIST_NEXT(tmp, c_links.le);
  540                 }
  541                 /* Proceed with the next bucket. */
  542                 firstb++;
  543                 /*
  544                  * Stop if we looked after present time and found
  545                  * some event we can't execute at now.
  546                  * Stop if we looked far enough into the future.
  547                  */
  548         } while (((int)(firstb - lastb)) <= 0);
  549         cc->cc_firstevent = last;
  550 #ifndef NO_EVENTTIMERS
  551         cpu_new_callout(curcpu, last, first);
  552 #endif
  553 #ifdef CALLOUT_PROFILING
  554         avg_depth_dir += (depth_dir * 1000 - avg_depth_dir) >> 8;
  555         avg_mpcalls_dir += (mpcalls_dir * 1000 - avg_mpcalls_dir) >> 8;
  556         avg_lockcalls_dir += (lockcalls_dir * 1000 - avg_lockcalls_dir) >> 8;
  557 #endif
  558         mtx_unlock_spin_flags(&cc->cc_lock, MTX_QUIET);
  559         /*
  560          * swi_sched acquires the thread lock, so we don't want to call it
  561          * with cc_lock held; incorrect locking order.
  562          */
  563         if (!TAILQ_EMPTY(&cc->cc_expireq))
  564                 swi_sched(cc->cc_cookie, 0);
  565 }
  566 
  567 static struct callout_cpu *
  568 callout_lock(struct callout *c)
  569 {
  570         struct callout_cpu *cc;
  571         int cpu;
  572 
  573         for (;;) {
  574                 cpu = c->c_cpu;
  575 #ifdef SMP
  576                 if (cpu == CPUBLOCK) {
  577                         while (c->c_cpu == CPUBLOCK)
  578                                 cpu_spinwait();
  579                         continue;
  580                 }
  581 #endif
  582                 cc = CC_CPU(cpu);
  583                 CC_LOCK(cc);
  584                 if (cpu == c->c_cpu)
  585                         break;
  586                 CC_UNLOCK(cc);
  587         }
  588         return (cc);
  589 }
  590 
  591 static void
  592 callout_cc_add(struct callout *c, struct callout_cpu *cc,
  593     sbintime_t sbt, sbintime_t precision, void (*func)(void *),
  594     void *arg, int cpu, int flags)
  595 {
  596         int bucket;
  597 
  598         CC_LOCK_ASSERT(cc);
  599         if (sbt < cc->cc_lastscan)
  600                 sbt = cc->cc_lastscan;
  601         c->c_arg = arg;
  602         c->c_iflags |= CALLOUT_PENDING;
  603         c->c_iflags &= ~CALLOUT_PROCESSED;
  604         c->c_flags |= CALLOUT_ACTIVE;
  605         if (flags & C_DIRECT_EXEC)
  606                 c->c_iflags |= CALLOUT_DIRECT;
  607         c->c_func = func;
  608         c->c_time = sbt;
  609         c->c_precision = precision;
  610         bucket = callout_get_bucket(c->c_time);
  611         CTR3(KTR_CALLOUT, "precision set for %p: %d.%08x",
  612             c, (int)(c->c_precision >> 32),
  613             (u_int)(c->c_precision & 0xffffffff));
  614         LIST_INSERT_HEAD(&cc->cc_callwheel[bucket], c, c_links.le);
  615         if (cc->cc_bucket == bucket)
  616                 cc_exec_next(cc) = c;
  617 #ifndef NO_EVENTTIMERS
  618         /*
  619          * Inform the eventtimers(4) subsystem there's a new callout
  620          * that has been inserted, but only if really required.
  621          */
  622         if (SBT_MAX - c->c_time < c->c_precision)
  623                 c->c_precision = SBT_MAX - c->c_time;
  624         sbt = c->c_time + c->c_precision;
  625         if (sbt < cc->cc_firstevent) {
  626                 cc->cc_firstevent = sbt;
  627                 cpu_new_callout(cpu, sbt, c->c_time);
  628         }
  629 #endif
  630 }
  631 
  632 static void
  633 callout_cc_del(struct callout *c, struct callout_cpu *cc)
  634 {
  635 
  636         if ((c->c_iflags & CALLOUT_LOCAL_ALLOC) == 0)
  637                 return;
  638         c->c_func = NULL;
  639         SLIST_INSERT_HEAD(&cc->cc_callfree, c, c_links.sle);
  640 }
  641 
  642 static void
  643 softclock_call_cc(struct callout *c, struct callout_cpu *cc,
  644 #ifdef CALLOUT_PROFILING
  645     int *mpcalls, int *lockcalls, int *gcalls,
  646 #endif
  647     int direct)
  648 {
  649         struct rm_priotracker tracker;
  650         void (*c_func)(void *);
  651         void *c_arg;
  652         struct lock_class *class;
  653         struct lock_object *c_lock;
  654         uintptr_t lock_status;
  655         int c_iflags;
  656 #ifdef SMP
  657         struct callout_cpu *new_cc;
  658         void (*new_func)(void *);
  659         void *new_arg;
  660         int flags, new_cpu;
  661         sbintime_t new_prec, new_time;
  662 #endif
  663 #if defined(DIAGNOSTIC) || defined(CALLOUT_PROFILING) 
  664         sbintime_t sbt1, sbt2;
  665         struct timespec ts2;
  666         static sbintime_t maxdt = 2 * SBT_1MS;  /* 2 msec */
  667         static timeout_t *lastfunc;
  668 #endif
  669 
  670         KASSERT((c->c_iflags & CALLOUT_PENDING) == CALLOUT_PENDING,
  671             ("softclock_call_cc: pend %p %x", c, c->c_iflags));
  672         KASSERT((c->c_flags & CALLOUT_ACTIVE) == CALLOUT_ACTIVE,
  673             ("softclock_call_cc: act %p %x", c, c->c_flags));
  674         class = (c->c_lock != NULL) ? LOCK_CLASS(c->c_lock) : NULL;
  675         lock_status = 0;
  676         if (c->c_flags & CALLOUT_SHAREDLOCK) {
  677                 if (class == &lock_class_rm)
  678                         lock_status = (uintptr_t)&tracker;
  679                 else
  680                         lock_status = 1;
  681         }
  682         c_lock = c->c_lock;
  683         c_func = c->c_func;
  684         c_arg = c->c_arg;
  685         c_iflags = c->c_iflags;
  686         if (c->c_iflags & CALLOUT_LOCAL_ALLOC)
  687                 c->c_iflags = CALLOUT_LOCAL_ALLOC;
  688         else
  689                 c->c_iflags &= ~CALLOUT_PENDING;
  690         
  691         cc_exec_curr(cc, direct) = c;
  692         cc_exec_cancel(cc, direct) = false;
  693         cc_exec_drain(cc, direct) = NULL;
  694         CC_UNLOCK(cc);
  695         if (c_lock != NULL) {
  696                 class->lc_lock(c_lock, lock_status);
  697                 /*
  698                  * The callout may have been cancelled
  699                  * while we switched locks.
  700                  */
  701                 if (cc_exec_cancel(cc, direct)) {
  702                         class->lc_unlock(c_lock);
  703                         goto skip;
  704                 }
  705                 /* The callout cannot be stopped now. */
  706                 cc_exec_cancel(cc, direct) = true;
  707                 if (c_lock == &Giant.lock_object) {
  708 #ifdef CALLOUT_PROFILING
  709                         (*gcalls)++;
  710 #endif
  711                         CTR3(KTR_CALLOUT, "callout giant %p func %p arg %p",
  712                             c, c_func, c_arg);
  713                 } else {
  714 #ifdef CALLOUT_PROFILING
  715                         (*lockcalls)++;
  716 #endif
  717                         CTR3(KTR_CALLOUT, "callout lock %p func %p arg %p",
  718                             c, c_func, c_arg);
  719                 }
  720         } else {
  721 #ifdef CALLOUT_PROFILING
  722                 (*mpcalls)++;
  723 #endif
  724                 CTR3(KTR_CALLOUT, "callout %p func %p arg %p",
  725                     c, c_func, c_arg);
  726         }
  727         KTR_STATE3(KTR_SCHED, "callout", cc->cc_ktr_event_name, "running",
  728             "func:%p", c_func, "arg:%p", c_arg, "direct:%d", direct);
  729 #if defined(DIAGNOSTIC) || defined(CALLOUT_PROFILING)
  730         sbt1 = sbinuptime();
  731 #endif
  732         THREAD_NO_SLEEPING();
  733         SDT_PROBE1(callout_execute, , , callout__start, c);
  734         c_func(c_arg);
  735         SDT_PROBE1(callout_execute, , , callout__end, c);
  736         THREAD_SLEEPING_OK();
  737 #if defined(DIAGNOSTIC) || defined(CALLOUT_PROFILING)
  738         sbt2 = sbinuptime();
  739         sbt2 -= sbt1;
  740         if (sbt2 > maxdt) {
  741                 if (lastfunc != c_func || sbt2 > maxdt * 2) {
  742                         ts2 = sbttots(sbt2);
  743                         printf(
  744                 "Expensive timeout(9) function: %p(%p) %jd.%09ld s\n",
  745                             c_func, c_arg, (intmax_t)ts2.tv_sec, ts2.tv_nsec);
  746                 }
  747                 maxdt = sbt2;
  748                 lastfunc = c_func;
  749         }
  750 #endif
  751         KTR_STATE0(KTR_SCHED, "callout", cc->cc_ktr_event_name, "idle");
  752         CTR1(KTR_CALLOUT, "callout %p finished", c);
  753         if ((c_iflags & CALLOUT_RETURNUNLOCKED) == 0)
  754                 class->lc_unlock(c_lock);
  755 skip:
  756         CC_LOCK(cc);
  757         KASSERT(cc_exec_curr(cc, direct) == c, ("mishandled cc_curr"));
  758         cc_exec_curr(cc, direct) = NULL;
  759         if (cc_exec_drain(cc, direct)) {
  760                 void (*drain)(void *);
  761                 
  762                 drain = cc_exec_drain(cc, direct);
  763                 cc_exec_drain(cc, direct) = NULL;
  764                 CC_UNLOCK(cc);
  765                 drain(c_arg);
  766                 CC_LOCK(cc);
  767         }
  768         if (cc_exec_waiting(cc, direct)) {
  769                 /*
  770                  * There is someone waiting for the
  771                  * callout to complete.
  772                  * If the callout was scheduled for
  773                  * migration just cancel it.
  774                  */
  775                 if (cc_cce_migrating(cc, direct)) {
  776                         cc_cce_cleanup(cc, direct);
  777 
  778                         /*
  779                          * It should be assert here that the callout is not
  780                          * destroyed but that is not easy.
  781                          */
  782                         c->c_iflags &= ~CALLOUT_DFRMIGRATION;
  783                 }
  784                 cc_exec_waiting(cc, direct) = false;
  785                 CC_UNLOCK(cc);
  786                 wakeup(&cc_exec_waiting(cc, direct));
  787                 CC_LOCK(cc);
  788         } else if (cc_cce_migrating(cc, direct)) {
  789                 KASSERT((c_iflags & CALLOUT_LOCAL_ALLOC) == 0,
  790                     ("Migrating legacy callout %p", c));
  791 #ifdef SMP
  792                 /*
  793                  * If the callout was scheduled for
  794                  * migration just perform it now.
  795                  */
  796                 new_cpu = cc_migration_cpu(cc, direct);
  797                 new_time = cc_migration_time(cc, direct);
  798                 new_prec = cc_migration_prec(cc, direct);
  799                 new_func = cc_migration_func(cc, direct);
  800                 new_arg = cc_migration_arg(cc, direct);
  801                 cc_cce_cleanup(cc, direct);
  802 
  803                 /*
  804                  * It should be assert here that the callout is not destroyed
  805                  * but that is not easy.
  806                  *
  807                  * As first thing, handle deferred callout stops.
  808                  */
  809                 if (!callout_migrating(c)) {
  810                         CTR3(KTR_CALLOUT,
  811                              "deferred cancelled %p func %p arg %p",
  812                              c, new_func, new_arg);
  813                         callout_cc_del(c, cc);
  814                         return;
  815                 }
  816                 c->c_iflags &= ~CALLOUT_DFRMIGRATION;
  817 
  818                 new_cc = callout_cpu_switch(c, cc, new_cpu);
  819                 flags = (direct) ? C_DIRECT_EXEC : 0;
  820                 callout_cc_add(c, new_cc, new_time, new_prec, new_func,
  821                     new_arg, new_cpu, flags);
  822                 CC_UNLOCK(new_cc);
  823                 CC_LOCK(cc);
  824 #else
  825                 panic("migration should not happen");
  826 #endif
  827         }
  828         /*
  829          * If the current callout is locally allocated (from
  830          * timeout(9)) then put it on the freelist.
  831          *
  832          * Note: we need to check the cached copy of c_iflags because
  833          * if it was not local, then it's not safe to deref the
  834          * callout pointer.
  835          */
  836         KASSERT((c_iflags & CALLOUT_LOCAL_ALLOC) == 0 ||
  837             c->c_iflags == CALLOUT_LOCAL_ALLOC,
  838             ("corrupted callout"));
  839         if (c_iflags & CALLOUT_LOCAL_ALLOC)
  840                 callout_cc_del(c, cc);
  841 }
  842 
  843 /*
  844  * The callout mechanism is based on the work of Adam M. Costello and
  845  * George Varghese, published in a technical report entitled "Redesigning
  846  * the BSD Callout and Timer Facilities" and modified slightly for inclusion
  847  * in FreeBSD by Justin T. Gibbs.  The original work on the data structures
  848  * used in this implementation was published by G. Varghese and T. Lauck in
  849  * the paper "Hashed and Hierarchical Timing Wheels: Data Structures for
  850  * the Efficient Implementation of a Timer Facility" in the Proceedings of
  851  * the 11th ACM Annual Symposium on Operating Systems Principles,
  852  * Austin, Texas Nov 1987.
  853  */
  854 
  855 /*
  856  * Software (low priority) clock interrupt.
  857  * Run periodic events from timeout queue.
  858  */
  859 void
  860 softclock(void *arg)
  861 {
  862         struct callout_cpu *cc;
  863         struct callout *c;
  864 #ifdef CALLOUT_PROFILING
  865         int depth = 0, gcalls = 0, lockcalls = 0, mpcalls = 0;
  866 #endif
  867 
  868         cc = (struct callout_cpu *)arg;
  869         CC_LOCK(cc);
  870         while ((c = TAILQ_FIRST(&cc->cc_expireq)) != NULL) {
  871                 TAILQ_REMOVE(&cc->cc_expireq, c, c_links.tqe);
  872                 softclock_call_cc(c, cc,
  873 #ifdef CALLOUT_PROFILING
  874                     &mpcalls, &lockcalls, &gcalls,
  875 #endif
  876                     0);
  877 #ifdef CALLOUT_PROFILING
  878                 ++depth;
  879 #endif
  880         }
  881 #ifdef CALLOUT_PROFILING
  882         avg_depth += (depth * 1000 - avg_depth) >> 8;
  883         avg_mpcalls += (mpcalls * 1000 - avg_mpcalls) >> 8;
  884         avg_lockcalls += (lockcalls * 1000 - avg_lockcalls) >> 8;
  885         avg_gcalls += (gcalls * 1000 - avg_gcalls) >> 8;
  886 #endif
  887         CC_UNLOCK(cc);
  888 }
  889 
  890 /*
  891  * timeout --
  892  *      Execute a function after a specified length of time.
  893  *
  894  * untimeout --
  895  *      Cancel previous timeout function call.
  896  *
  897  * callout_handle_init --
  898  *      Initialize a handle so that using it with untimeout is benign.
  899  *
  900  *      See AT&T BCI Driver Reference Manual for specification.  This
  901  *      implementation differs from that one in that although an
  902  *      identification value is returned from timeout, the original
  903  *      arguments to timeout as well as the identifier are used to
  904  *      identify entries for untimeout.
  905  */
  906 struct callout_handle
  907 timeout(timeout_t *ftn, void *arg, int to_ticks)
  908 {
  909         struct callout_cpu *cc;
  910         struct callout *new;
  911         struct callout_handle handle;
  912 
  913         cc = CC_CPU(timeout_cpu);
  914         CC_LOCK(cc);
  915         /* Fill in the next free callout structure. */
  916         new = SLIST_FIRST(&cc->cc_callfree);
  917         if (new == NULL)
  918                 /* XXX Attempt to malloc first */
  919                 panic("timeout table full");
  920         SLIST_REMOVE_HEAD(&cc->cc_callfree, c_links.sle);
  921         callout_reset(new, to_ticks, ftn, arg);
  922         handle.callout = new;
  923         CC_UNLOCK(cc);
  924 
  925         return (handle);
  926 }
  927 
  928 void
  929 untimeout(timeout_t *ftn, void *arg, struct callout_handle handle)
  930 {
  931         struct callout_cpu *cc;
  932 
  933         /*
  934          * Check for a handle that was initialized
  935          * by callout_handle_init, but never used
  936          * for a real timeout.
  937          */
  938         if (handle.callout == NULL)
  939                 return;
  940 
  941         cc = callout_lock(handle.callout);
  942         if (handle.callout->c_func == ftn && handle.callout->c_arg == arg)
  943                 callout_stop(handle.callout);
  944         CC_UNLOCK(cc);
  945 }
  946 
  947 void
  948 callout_handle_init(struct callout_handle *handle)
  949 {
  950         handle->callout = NULL;
  951 }
  952 
  953 void
  954 callout_when(sbintime_t sbt, sbintime_t precision, int flags,
  955     sbintime_t *res, sbintime_t *prec_res)
  956 {
  957         sbintime_t to_sbt, to_pr;
  958 
  959         if ((flags & (C_ABSOLUTE | C_PRECALC)) != 0) {
  960                 *res = sbt;
  961                 *prec_res = precision;
  962                 return;
  963         }
  964         if ((flags & C_HARDCLOCK) != 0 && sbt < tick_sbt)
  965                 sbt = tick_sbt;
  966         if ((flags & C_HARDCLOCK) != 0 ||
  967 #ifdef NO_EVENTTIMERS
  968             sbt >= sbt_timethreshold) {
  969                 to_sbt = getsbinuptime();
  970 
  971                 /* Add safety belt for the case of hz > 1000. */
  972                 to_sbt += tc_tick_sbt - tick_sbt;
  973 #else
  974             sbt >= sbt_tickthreshold) {
  975                 /*
  976                  * Obtain the time of the last hardclock() call on
  977                  * this CPU directly from the kern_clocksource.c.
  978                  * This value is per-CPU, but it is equal for all
  979                  * active ones.
  980                  */
  981 #ifdef __LP64__
  982                 to_sbt = DPCPU_GET(hardclocktime);
  983 #else
  984                 spinlock_enter();
  985                 to_sbt = DPCPU_GET(hardclocktime);
  986                 spinlock_exit();
  987 #endif
  988 #endif
  989                 if (cold && to_sbt == 0)
  990                         to_sbt = sbinuptime();
  991                 if ((flags & C_HARDCLOCK) == 0)
  992                         to_sbt += tick_sbt;
  993         } else
  994                 to_sbt = sbinuptime();
  995         if (SBT_MAX - to_sbt < sbt)
  996                 to_sbt = SBT_MAX;
  997         else
  998                 to_sbt += sbt;
  999         *res = to_sbt;
 1000         to_pr = ((C_PRELGET(flags) < 0) ? sbt >> tc_precexp :
 1001             sbt >> C_PRELGET(flags));
 1002         *prec_res = to_pr > precision ? to_pr : precision;
 1003 }
 1004 
 1005 /*
 1006  * New interface; clients allocate their own callout structures.
 1007  *
 1008  * callout_reset() - establish or change a timeout
 1009  * callout_stop() - disestablish a timeout
 1010  * callout_init() - initialize a callout structure so that it can
 1011  *      safely be passed to callout_reset() and callout_stop()
 1012  *
 1013  * <sys/callout.h> defines three convenience macros:
 1014  *
 1015  * callout_active() - returns truth if callout has not been stopped,
 1016  *      drained, or deactivated since the last time the callout was
 1017  *      reset.
 1018  * callout_pending() - returns truth if callout is still waiting for timeout
 1019  * callout_deactivate() - marks the callout as having been serviced
 1020  */
 1021 int
 1022 callout_reset_sbt_on(struct callout *c, sbintime_t sbt, sbintime_t prec,
 1023     void (*ftn)(void *), void *arg, int cpu, int flags)
 1024 {
 1025         sbintime_t to_sbt, precision;
 1026         struct callout_cpu *cc;
 1027         int cancelled, direct;
 1028         int ignore_cpu=0;
 1029 
 1030         cancelled = 0;
 1031         if (cpu == -1) {
 1032                 ignore_cpu = 1;
 1033         } else if ((cpu >= MAXCPU) ||
 1034                    ((CC_CPU(cpu))->cc_inited == 0)) {
 1035                 /* Invalid CPU spec */
 1036                 panic("Invalid CPU in callout %d", cpu);
 1037         }
 1038         callout_when(sbt, prec, flags, &to_sbt, &precision);
 1039 
 1040         /* 
 1041          * This flag used to be added by callout_cc_add, but the
 1042          * first time you call this we could end up with the
 1043          * wrong direct flag if we don't do it before we add.
 1044          */
 1045         if (flags & C_DIRECT_EXEC) {
 1046                 direct = 1;
 1047         } else {
 1048                 direct = 0;
 1049         }
 1050         KASSERT(!direct || c->c_lock == NULL,
 1051             ("%s: direct callout %p has lock", __func__, c));
 1052         cc = callout_lock(c);
 1053         /*
 1054          * Don't allow migration of pre-allocated callouts lest they
 1055          * become unbalanced or handle the case where the user does
 1056          * not care. 
 1057          */
 1058         if ((c->c_iflags & CALLOUT_LOCAL_ALLOC) ||
 1059             ignore_cpu) {
 1060                 cpu = c->c_cpu;
 1061         }
 1062 
 1063         if (cc_exec_curr(cc, direct) == c) {
 1064                 /*
 1065                  * We're being asked to reschedule a callout which is
 1066                  * currently in progress.  If there is a lock then we
 1067                  * can cancel the callout if it has not really started.
 1068                  */
 1069                 if (c->c_lock != NULL && !cc_exec_cancel(cc, direct))
 1070                         cancelled = cc_exec_cancel(cc, direct) = true;
 1071                 if (cc_exec_waiting(cc, direct) || cc_exec_drain(cc, direct)) {
 1072                         /*
 1073                          * Someone has called callout_drain to kill this
 1074                          * callout.  Don't reschedule.
 1075                          */
 1076                         CTR4(KTR_CALLOUT, "%s %p func %p arg %p",
 1077                             cancelled ? "cancelled" : "failed to cancel",
 1078                             c, c->c_func, c->c_arg);
 1079                         CC_UNLOCK(cc);
 1080                         return (cancelled);
 1081                 }
 1082 #ifdef SMP
 1083                 if (callout_migrating(c)) {
 1084                         /* 
 1085                          * This only occurs when a second callout_reset_sbt_on
 1086                          * is made after a previous one moved it into
 1087                          * deferred migration (below). Note we do *not* change
 1088                          * the prev_cpu even though the previous target may
 1089                          * be different.
 1090                          */
 1091                         cc_migration_cpu(cc, direct) = cpu;
 1092                         cc_migration_time(cc, direct) = to_sbt;
 1093                         cc_migration_prec(cc, direct) = precision;
 1094                         cc_migration_func(cc, direct) = ftn;
 1095                         cc_migration_arg(cc, direct) = arg;
 1096                         cancelled = 1;
 1097                         CC_UNLOCK(cc);
 1098                         return (cancelled);
 1099                 }
 1100 #endif
 1101         }
 1102         if (c->c_iflags & CALLOUT_PENDING) {
 1103                 if ((c->c_iflags & CALLOUT_PROCESSED) == 0) {
 1104                         if (cc_exec_next(cc) == c)
 1105                                 cc_exec_next(cc) = LIST_NEXT(c, c_links.le);
 1106                         LIST_REMOVE(c, c_links.le);
 1107                 } else {
 1108                         TAILQ_REMOVE(&cc->cc_expireq, c, c_links.tqe);
 1109                 }
 1110                 cancelled = 1;
 1111                 c->c_iflags &= ~ CALLOUT_PENDING;
 1112                 c->c_flags &= ~ CALLOUT_ACTIVE;
 1113         }
 1114 
 1115 #ifdef SMP
 1116         /*
 1117          * If the callout must migrate try to perform it immediately.
 1118          * If the callout is currently running, just defer the migration
 1119          * to a more appropriate moment.
 1120          */
 1121         if (c->c_cpu != cpu) {
 1122                 if (cc_exec_curr(cc, direct) == c) {
 1123                         /* 
 1124                          * Pending will have been removed since we are
 1125                          * actually executing the callout on another
 1126                          * CPU. That callout should be waiting on the
 1127                          * lock the caller holds. If we set both
 1128                          * active/and/pending after we return and the
 1129                          * lock on the executing callout proceeds, it
 1130                          * will then see pending is true and return.
 1131                          * At the return from the actual callout execution
 1132                          * the migration will occur in softclock_call_cc
 1133                          * and this new callout will be placed on the 
 1134                          * new CPU via a call to callout_cpu_switch() which
 1135                          * will get the lock on the right CPU followed
 1136                          * by a call callout_cc_add() which will add it there.
 1137                          * (see above in softclock_call_cc()).
 1138                          */
 1139                         cc_migration_cpu(cc, direct) = cpu;
 1140                         cc_migration_time(cc, direct) = to_sbt;
 1141                         cc_migration_prec(cc, direct) = precision;
 1142                         cc_migration_func(cc, direct) = ftn;
 1143                         cc_migration_arg(cc, direct) = arg;
 1144                         c->c_iflags |= (CALLOUT_DFRMIGRATION | CALLOUT_PENDING);
 1145                         c->c_flags |= CALLOUT_ACTIVE;
 1146                         CTR6(KTR_CALLOUT,
 1147                     "migration of %p func %p arg %p in %d.%08x to %u deferred",
 1148                             c, c->c_func, c->c_arg, (int)(to_sbt >> 32),
 1149                             (u_int)(to_sbt & 0xffffffff), cpu);
 1150                         CC_UNLOCK(cc);
 1151                         return (cancelled);
 1152                 }
 1153                 cc = callout_cpu_switch(c, cc, cpu);
 1154         }
 1155 #endif
 1156 
 1157         callout_cc_add(c, cc, to_sbt, precision, ftn, arg, cpu, flags);
 1158         CTR6(KTR_CALLOUT, "%sscheduled %p func %p arg %p in %d.%08x",
 1159             cancelled ? "re" : "", c, c->c_func, c->c_arg, (int)(to_sbt >> 32),
 1160             (u_int)(to_sbt & 0xffffffff));
 1161         CC_UNLOCK(cc);
 1162 
 1163         return (cancelled);
 1164 }
 1165 
 1166 /*
 1167  * Common idioms that can be optimized in the future.
 1168  */
 1169 int
 1170 callout_schedule_on(struct callout *c, int to_ticks, int cpu)
 1171 {
 1172         return callout_reset_on(c, to_ticks, c->c_func, c->c_arg, cpu);
 1173 }
 1174 
 1175 int
 1176 callout_schedule(struct callout *c, int to_ticks)
 1177 {
 1178         return callout_reset_on(c, to_ticks, c->c_func, c->c_arg, c->c_cpu);
 1179 }
 1180 
 1181 int
 1182 _callout_stop_safe(struct callout *c, int flags, void (*drain)(void *))
 1183 {
 1184         struct callout_cpu *cc, *old_cc;
 1185         struct lock_class *class;
 1186         int direct, sq_locked, use_lock;
 1187         int cancelled, not_on_a_list;
 1188 
 1189         if ((flags & CS_DRAIN) != 0)
 1190                 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, c->c_lock,
 1191                     "calling %s", __func__);
 1192 
 1193         /*
 1194          * Some old subsystems don't hold Giant while running a callout_stop(),
 1195          * so just discard this check for the moment.
 1196          */
 1197         if ((flags & CS_DRAIN) == 0 && c->c_lock != NULL) {
 1198                 if (c->c_lock == &Giant.lock_object)
 1199                         use_lock = mtx_owned(&Giant);
 1200                 else {
 1201                         use_lock = 1;
 1202                         class = LOCK_CLASS(c->c_lock);
 1203                         class->lc_assert(c->c_lock, LA_XLOCKED);
 1204                 }
 1205         } else
 1206                 use_lock = 0;
 1207         if (c->c_iflags & CALLOUT_DIRECT) {
 1208                 direct = 1;
 1209         } else {
 1210                 direct = 0;
 1211         }
 1212         sq_locked = 0;
 1213         old_cc = NULL;
 1214 again:
 1215         cc = callout_lock(c);
 1216 
 1217         if ((c->c_iflags & (CALLOUT_DFRMIGRATION | CALLOUT_PENDING)) ==
 1218             (CALLOUT_DFRMIGRATION | CALLOUT_PENDING) &&
 1219             ((c->c_flags & CALLOUT_ACTIVE) == CALLOUT_ACTIVE)) {
 1220                 /*
 1221                  * Special case where this slipped in while we
 1222                  * were migrating *as* the callout is about to
 1223                  * execute. The caller probably holds the lock
 1224                  * the callout wants.
 1225                  *
 1226                  * Get rid of the migration first. Then set
 1227                  * the flag that tells this code *not* to
 1228                  * try to remove it from any lists (its not
 1229                  * on one yet). When the callout wheel runs,
 1230                  * it will ignore this callout.
 1231                  */
 1232                 c->c_iflags &= ~CALLOUT_PENDING;
 1233                 c->c_flags &= ~CALLOUT_ACTIVE;
 1234                 not_on_a_list = 1;
 1235         } else {
 1236                 not_on_a_list = 0;
 1237         }
 1238 
 1239         /*
 1240          * If the callout was migrating while the callout cpu lock was
 1241          * dropped,  just drop the sleepqueue lock and check the states
 1242          * again.
 1243          */
 1244         if (sq_locked != 0 && cc != old_cc) {
 1245 #ifdef SMP
 1246                 CC_UNLOCK(cc);
 1247                 sleepq_release(&cc_exec_waiting(old_cc, direct));
 1248                 sq_locked = 0;
 1249                 old_cc = NULL;
 1250                 goto again;
 1251 #else
 1252                 panic("migration should not happen");
 1253 #endif
 1254         }
 1255 
 1256         /*
 1257          * If the callout is running, try to stop it or drain it.
 1258          */
 1259         if (cc_exec_curr(cc, direct) == c) {
 1260                 /*
 1261                  * Succeed we to stop it or not, we must clear the
 1262                  * active flag - this is what API users expect.  If we're
 1263                  * draining and the callout is currently executing, first wait
 1264                  * until it finishes.
 1265                  */
 1266                 if ((flags & CS_DRAIN) == 0)
 1267                         c->c_flags &= ~CALLOUT_ACTIVE;
 1268 
 1269                 if ((flags & CS_DRAIN) != 0) {
 1270                         /*
 1271                          * The current callout is running (or just
 1272                          * about to run) and blocking is allowed, so
 1273                          * just wait for the current invocation to
 1274                          * finish.
 1275                          */
 1276                         while (cc_exec_curr(cc, direct) == c) {
 1277                                 /*
 1278                                  * Use direct calls to sleepqueue interface
 1279                                  * instead of cv/msleep in order to avoid
 1280                                  * a LOR between cc_lock and sleepqueue
 1281                                  * chain spinlocks.  This piece of code
 1282                                  * emulates a msleep_spin() call actually.
 1283                                  *
 1284                                  * If we already have the sleepqueue chain
 1285                                  * locked, then we can safely block.  If we
 1286                                  * don't already have it locked, however,
 1287                                  * we have to drop the cc_lock to lock
 1288                                  * it.  This opens several races, so we
 1289                                  * restart at the beginning once we have
 1290                                  * both locks.  If nothing has changed, then
 1291                                  * we will end up back here with sq_locked
 1292                                  * set.
 1293                                  */
 1294                                 if (!sq_locked) {
 1295                                         CC_UNLOCK(cc);
 1296                                         sleepq_lock(
 1297                                             &cc_exec_waiting(cc, direct));
 1298                                         sq_locked = 1;
 1299                                         old_cc = cc;
 1300                                         goto again;
 1301                                 }
 1302 
 1303                                 /*
 1304                                  * Migration could be cancelled here, but
 1305                                  * as long as it is still not sure when it
 1306                                  * will be packed up, just let softclock()
 1307                                  * take care of it.
 1308                                  */
 1309                                 cc_exec_waiting(cc, direct) = true;
 1310                                 DROP_GIANT();
 1311                                 CC_UNLOCK(cc);
 1312                                 sleepq_add(
 1313                                     &cc_exec_waiting(cc, direct),
 1314                                     &cc->cc_lock.lock_object, "codrain",
 1315                                     SLEEPQ_SLEEP, 0);
 1316                                 sleepq_wait(
 1317                                     &cc_exec_waiting(cc, direct),
 1318                                              0);
 1319                                 sq_locked = 0;
 1320                                 old_cc = NULL;
 1321 
 1322                                 /* Reacquire locks previously released. */
 1323                                 PICKUP_GIANT();
 1324                                 CC_LOCK(cc);
 1325                         }
 1326                         c->c_flags &= ~CALLOUT_ACTIVE;
 1327                 } else if (use_lock &&
 1328                            !cc_exec_cancel(cc, direct) && (drain == NULL)) {
 1329                         
 1330                         /*
 1331                          * The current callout is waiting for its
 1332                          * lock which we hold.  Cancel the callout
 1333                          * and return.  After our caller drops the
 1334                          * lock, the callout will be skipped in
 1335                          * softclock(). This *only* works with a
 1336                          * callout_stop() *not* callout_drain() or
 1337                          * callout_async_drain().
 1338                          */
 1339                         cc_exec_cancel(cc, direct) = true;
 1340                         CTR3(KTR_CALLOUT, "cancelled %p func %p arg %p",
 1341                             c, c->c_func, c->c_arg);
 1342                         KASSERT(!cc_cce_migrating(cc, direct),
 1343                             ("callout wrongly scheduled for migration"));
 1344                         if (callout_migrating(c)) {
 1345                                 c->c_iflags &= ~CALLOUT_DFRMIGRATION;
 1346 #ifdef SMP
 1347                                 cc_migration_cpu(cc, direct) = CPUBLOCK;
 1348                                 cc_migration_time(cc, direct) = 0;
 1349                                 cc_migration_prec(cc, direct) = 0;
 1350                                 cc_migration_func(cc, direct) = NULL;
 1351                                 cc_migration_arg(cc, direct) = NULL;
 1352 #endif
 1353                         }
 1354                         CC_UNLOCK(cc);
 1355                         KASSERT(!sq_locked, ("sleepqueue chain locked"));
 1356                         return (1);
 1357                 } else if (callout_migrating(c)) {
 1358                         /*
 1359                          * The callout is currently being serviced
 1360                          * and the "next" callout is scheduled at
 1361                          * its completion with a migration. We remove
 1362                          * the migration flag so it *won't* get rescheduled,
 1363                          * but we can't stop the one thats running so
 1364                          * we return 0.
 1365                          */
 1366                         c->c_iflags &= ~CALLOUT_DFRMIGRATION;
 1367 #ifdef SMP
 1368                         /* 
 1369                          * We can't call cc_cce_cleanup here since
 1370                          * if we do it will remove .ce_curr and
 1371                          * its still running. This will prevent a
 1372                          * reschedule of the callout when the 
 1373                          * execution completes.
 1374                          */
 1375                         cc_migration_cpu(cc, direct) = CPUBLOCK;
 1376                         cc_migration_time(cc, direct) = 0;
 1377                         cc_migration_prec(cc, direct) = 0;
 1378                         cc_migration_func(cc, direct) = NULL;
 1379                         cc_migration_arg(cc, direct) = NULL;
 1380 #endif
 1381                         CTR3(KTR_CALLOUT, "postponing stop %p func %p arg %p",
 1382                             c, c->c_func, c->c_arg);
 1383                         if (drain) {
 1384                                 cc_exec_drain(cc, direct) = drain;
 1385                         }
 1386                         CC_UNLOCK(cc);
 1387                         return ((flags & CS_EXECUTING) != 0);
 1388                 }
 1389                 CTR3(KTR_CALLOUT, "failed to stop %p func %p arg %p",
 1390                     c, c->c_func, c->c_arg);
 1391                 if (drain) {
 1392                         cc_exec_drain(cc, direct) = drain;
 1393                 }
 1394                 KASSERT(!sq_locked, ("sleepqueue chain still locked"));
 1395                 cancelled = ((flags & CS_EXECUTING) != 0);
 1396         } else
 1397                 cancelled = 1;
 1398 
 1399         if (sq_locked)
 1400                 sleepq_release(&cc_exec_waiting(cc, direct));
 1401 
 1402         if ((c->c_iflags & CALLOUT_PENDING) == 0) {
 1403                 CTR3(KTR_CALLOUT, "failed to stop %p func %p arg %p",
 1404                     c, c->c_func, c->c_arg);
 1405                 /*
 1406                  * For not scheduled and not executing callout return
 1407                  * negative value.
 1408                  */
 1409                 if (cc_exec_curr(cc, direct) != c)
 1410                         cancelled = -1;
 1411                 CC_UNLOCK(cc);
 1412                 return (cancelled);
 1413         }
 1414 
 1415         c->c_iflags &= ~CALLOUT_PENDING;
 1416         c->c_flags &= ~CALLOUT_ACTIVE;
 1417 
 1418         CTR3(KTR_CALLOUT, "cancelled %p func %p arg %p",
 1419             c, c->c_func, c->c_arg);
 1420         if (not_on_a_list == 0) {
 1421                 if ((c->c_iflags & CALLOUT_PROCESSED) == 0) {
 1422                         if (cc_exec_next(cc) == c)
 1423                                 cc_exec_next(cc) = LIST_NEXT(c, c_links.le);
 1424                         LIST_REMOVE(c, c_links.le);
 1425                 } else {
 1426                         TAILQ_REMOVE(&cc->cc_expireq, c, c_links.tqe);
 1427                 }
 1428         }
 1429         callout_cc_del(c, cc);
 1430         CC_UNLOCK(cc);
 1431         return (cancelled);
 1432 }
 1433 
 1434 void
 1435 callout_init(struct callout *c, int mpsafe)
 1436 {
 1437         bzero(c, sizeof *c);
 1438         if (mpsafe) {
 1439                 c->c_lock = NULL;
 1440                 c->c_iflags = CALLOUT_RETURNUNLOCKED;
 1441         } else {
 1442                 c->c_lock = &Giant.lock_object;
 1443                 c->c_iflags = 0;
 1444         }
 1445         c->c_cpu = timeout_cpu;
 1446 }
 1447 
 1448 void
 1449 _callout_init_lock(struct callout *c, struct lock_object *lock, int flags)
 1450 {
 1451         bzero(c, sizeof *c);
 1452         c->c_lock = lock;
 1453         KASSERT((flags & ~(CALLOUT_RETURNUNLOCKED | CALLOUT_SHAREDLOCK)) == 0,
 1454             ("callout_init_lock: bad flags %d", flags));
 1455         KASSERT(lock != NULL || (flags & CALLOUT_RETURNUNLOCKED) == 0,
 1456             ("callout_init_lock: CALLOUT_RETURNUNLOCKED with no lock"));
 1457         KASSERT(lock == NULL || !(LOCK_CLASS(lock)->lc_flags &
 1458             (LC_SPINLOCK | LC_SLEEPABLE)), ("%s: invalid lock class",
 1459             __func__));
 1460         c->c_iflags = flags & (CALLOUT_RETURNUNLOCKED | CALLOUT_SHAREDLOCK);
 1461         c->c_cpu = timeout_cpu;
 1462 }
 1463 
 1464 #ifdef APM_FIXUP_CALLTODO
 1465 /* 
 1466  * Adjust the kernel calltodo timeout list.  This routine is used after 
 1467  * an APM resume to recalculate the calltodo timer list values with the 
 1468  * number of hz's we have been sleeping.  The next hardclock() will detect 
 1469  * that there are fired timers and run softclock() to execute them.
 1470  *
 1471  * Please note, I have not done an exhaustive analysis of what code this
 1472  * might break.  I am motivated to have my select()'s and alarm()'s that
 1473  * have expired during suspend firing upon resume so that the applications
 1474  * which set the timer can do the maintanence the timer was for as close
 1475  * as possible to the originally intended time.  Testing this code for a 
 1476  * week showed that resuming from a suspend resulted in 22 to 25 timers 
 1477  * firing, which seemed independent on whether the suspend was 2 hours or
 1478  * 2 days.  Your milage may vary.   - Ken Key <key@cs.utk.edu>
 1479  */
 1480 void
 1481 adjust_timeout_calltodo(struct timeval *time_change)
 1482 {
 1483         struct callout *p;
 1484         unsigned long delta_ticks;
 1485 
 1486         /* 
 1487          * How many ticks were we asleep?
 1488          * (stolen from tvtohz()).
 1489          */
 1490 
 1491         /* Don't do anything */
 1492         if (time_change->tv_sec < 0)
 1493                 return;
 1494         else if (time_change->tv_sec <= LONG_MAX / 1000000)
 1495                 delta_ticks = howmany(time_change->tv_sec * 1000000 +
 1496                     time_change->tv_usec, tick) + 1;
 1497         else if (time_change->tv_sec <= LONG_MAX / hz)
 1498                 delta_ticks = time_change->tv_sec * hz +
 1499                     howmany(time_change->tv_usec, tick) + 1;
 1500         else
 1501                 delta_ticks = LONG_MAX;
 1502 
 1503         if (delta_ticks > INT_MAX)
 1504                 delta_ticks = INT_MAX;
 1505 
 1506         /* 
 1507          * Now rip through the timer calltodo list looking for timers
 1508          * to expire.
 1509          */
 1510 
 1511         /* don't collide with softclock() */
 1512         CC_LOCK(cc);
 1513         for (p = calltodo.c_next; p != NULL; p = p->c_next) {
 1514                 p->c_time -= delta_ticks;
 1515 
 1516                 /* Break if the timer had more time on it than delta_ticks */
 1517                 if (p->c_time > 0)
 1518                         break;
 1519 
 1520                 /* take back the ticks the timer didn't use (p->c_time <= 0) */
 1521                 delta_ticks = -p->c_time;
 1522         }
 1523         CC_UNLOCK(cc);
 1524 
 1525         return;
 1526 }
 1527 #endif /* APM_FIXUP_CALLTODO */
 1528 
 1529 static int
 1530 flssbt(sbintime_t sbt)
 1531 {
 1532 
 1533         sbt += (uint64_t)sbt >> 1;
 1534         if (sizeof(long) >= sizeof(sbintime_t))
 1535                 return (flsl(sbt));
 1536         if (sbt >= SBT_1S)
 1537                 return (flsl(((uint64_t)sbt) >> 32) + 32);
 1538         return (flsl(sbt));
 1539 }
 1540 
 1541 /*
 1542  * Dump immediate statistic snapshot of the scheduled callouts.
 1543  */
 1544 static int
 1545 sysctl_kern_callout_stat(SYSCTL_HANDLER_ARGS)
 1546 {
 1547         struct callout *tmp;
 1548         struct callout_cpu *cc;
 1549         struct callout_list *sc;
 1550         sbintime_t maxpr, maxt, medpr, medt, now, spr, st, t;
 1551         int ct[64], cpr[64], ccpbk[32];
 1552         int error, val, i, count, tcum, pcum, maxc, c, medc;
 1553 #ifdef SMP
 1554         int cpu;
 1555 #endif
 1556 
 1557         val = 0;
 1558         error = sysctl_handle_int(oidp, &val, 0, req);
 1559         if (error != 0 || req->newptr == NULL)
 1560                 return (error);
 1561         count = maxc = 0;
 1562         st = spr = maxt = maxpr = 0;
 1563         bzero(ccpbk, sizeof(ccpbk));
 1564         bzero(ct, sizeof(ct));
 1565         bzero(cpr, sizeof(cpr));
 1566         now = sbinuptime();
 1567 #ifdef SMP
 1568         CPU_FOREACH(cpu) {
 1569                 cc = CC_CPU(cpu);
 1570 #else
 1571                 cc = CC_CPU(timeout_cpu);
 1572 #endif
 1573                 CC_LOCK(cc);
 1574                 for (i = 0; i < callwheelsize; i++) {
 1575                         sc = &cc->cc_callwheel[i];
 1576                         c = 0;
 1577                         LIST_FOREACH(tmp, sc, c_links.le) {
 1578                                 c++;
 1579                                 t = tmp->c_time - now;
 1580                                 if (t < 0)
 1581                                         t = 0;
 1582                                 st += t / SBT_1US;
 1583                                 spr += tmp->c_precision / SBT_1US;
 1584                                 if (t > maxt)
 1585                                         maxt = t;
 1586                                 if (tmp->c_precision > maxpr)
 1587                                         maxpr = tmp->c_precision;
 1588                                 ct[flssbt(t)]++;
 1589                                 cpr[flssbt(tmp->c_precision)]++;
 1590                         }
 1591                         if (c > maxc)
 1592                                 maxc = c;
 1593                         ccpbk[fls(c + c / 2)]++;
 1594                         count += c;
 1595                 }
 1596                 CC_UNLOCK(cc);
 1597 #ifdef SMP
 1598         }
 1599 #endif
 1600 
 1601         for (i = 0, tcum = 0; i < 64 && tcum < count / 2; i++)
 1602                 tcum += ct[i];
 1603         medt = (i >= 2) ? (((sbintime_t)1) << (i - 2)) : 0;
 1604         for (i = 0, pcum = 0; i < 64 && pcum < count / 2; i++)
 1605                 pcum += cpr[i];
 1606         medpr = (i >= 2) ? (((sbintime_t)1) << (i - 2)) : 0;
 1607         for (i = 0, c = 0; i < 32 && c < count / 2; i++)
 1608                 c += ccpbk[i];
 1609         medc = (i >= 2) ? (1 << (i - 2)) : 0;
 1610 
 1611         printf("Scheduled callouts statistic snapshot:\n");
 1612         printf("  Callouts: %6d  Buckets: %6d*%-3d  Bucket size: 0.%06ds\n",
 1613             count, callwheelsize, mp_ncpus, 1000000 >> CC_HASH_SHIFT);
 1614         printf("  C/Bk: med %5d         avg %6d.%06jd  max %6d\n",
 1615             medc,
 1616             count / callwheelsize / mp_ncpus,
 1617             (uint64_t)count * 1000000 / callwheelsize / mp_ncpus % 1000000,
 1618             maxc);
 1619         printf("  Time: med %5jd.%06jds avg %6jd.%06jds max %6jd.%06jds\n",
 1620             medt / SBT_1S, (medt & 0xffffffff) * 1000000 >> 32,
 1621             (st / count) / 1000000, (st / count) % 1000000,
 1622             maxt / SBT_1S, (maxt & 0xffffffff) * 1000000 >> 32);
 1623         printf("  Prec: med %5jd.%06jds avg %6jd.%06jds max %6jd.%06jds\n",
 1624             medpr / SBT_1S, (medpr & 0xffffffff) * 1000000 >> 32,
 1625             (spr / count) / 1000000, (spr / count) % 1000000,
 1626             maxpr / SBT_1S, (maxpr & 0xffffffff) * 1000000 >> 32);
 1627         printf("  Distribution:       \tbuckets\t   time\t   tcum\t"
 1628             "   prec\t   pcum\n");
 1629         for (i = 0, tcum = pcum = 0; i < 64; i++) {
 1630                 if (ct[i] == 0 && cpr[i] == 0)
 1631                         continue;
 1632                 t = (i != 0) ? (((sbintime_t)1) << (i - 1)) : 0;
 1633                 tcum += ct[i];
 1634                 pcum += cpr[i];
 1635                 printf("  %10jd.%06jds\t 2**%d\t%7d\t%7d\t%7d\t%7d\n",
 1636                     t / SBT_1S, (t & 0xffffffff) * 1000000 >> 32,
 1637                     i - 1 - (32 - CC_HASH_SHIFT),
 1638                     ct[i], tcum, cpr[i], pcum);
 1639         }
 1640         return (error);
 1641 }
 1642 SYSCTL_PROC(_kern, OID_AUTO, callout_stat,
 1643     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE,
 1644     0, 0, sysctl_kern_callout_stat, "I",
 1645     "Dump immediate statistic snapshot of the scheduled callouts");
 1646 
 1647 #ifdef DDB
 1648 static void
 1649 _show_callout(struct callout *c)
 1650 {
 1651 
 1652         db_printf("callout %p\n", c);
 1653 #define C_DB_PRINTF(f, e)       db_printf("   %s = " f "\n", #e, c->e);
 1654         db_printf("   &c_links = %p\n", &(c->c_links));
 1655         C_DB_PRINTF("%" PRId64, c_time);
 1656         C_DB_PRINTF("%" PRId64, c_precision);
 1657         C_DB_PRINTF("%p",       c_arg);
 1658         C_DB_PRINTF("%p",       c_func);
 1659         C_DB_PRINTF("%p",       c_lock);
 1660         C_DB_PRINTF("%#x",      c_flags);
 1661         C_DB_PRINTF("%#x",      c_iflags);
 1662         C_DB_PRINTF("%d",       c_cpu);
 1663 #undef  C_DB_PRINTF
 1664 }
 1665 
 1666 DB_SHOW_COMMAND(callout, db_show_callout)
 1667 {
 1668 
 1669         if (!have_addr) {
 1670                 db_printf("usage: show callout <struct callout *>\n");
 1671                 return;
 1672         }
 1673 
 1674         _show_callout((struct callout *)addr);
 1675 }
 1676 #endif /* DDB */

Cache object: 30fa4cf304aad77ee599a27fad348a08


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.