The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_timeout.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * SPDX-License-Identifier: BSD-3-Clause
    3  *
    4  * Copyright (c) 1982, 1986, 1991, 1993
    5  *      The Regents of the University of California.  All rights reserved.
    6  * (c) UNIX System Laboratories, Inc.
    7  * All or some portions of this file are derived from material licensed
    8  * to the University of California by American Telephone and Telegraph
    9  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
   10  * the permission of UNIX System Laboratories, Inc.
   11  *
   12  * Redistribution and use in source and binary forms, with or without
   13  * modification, are permitted provided that the following conditions
   14  * are met:
   15  * 1. Redistributions of source code must retain the above copyright
   16  *    notice, this list of conditions and the following disclaimer.
   17  * 2. Redistributions in binary form must reproduce the above copyright
   18  *    notice, this list of conditions and the following disclaimer in the
   19  *    documentation and/or other materials provided with the distribution.
   20  * 3. Neither the name of the University nor the names of its contributors
   21  *    may be used to endorse or promote products derived from this software
   22  *    without specific prior written permission.
   23  *
   24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
   25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   34  * SUCH DAMAGE.
   35  *
   36  *      From: @(#)kern_clock.c  8.5 (Berkeley) 1/21/94
   37  */
   38 
   39 #include <sys/cdefs.h>
   40 __FBSDID("$FreeBSD$");
   41 
   42 #include "opt_callout_profiling.h"
   43 #include "opt_ddb.h"
   44 #if defined(__arm__)
   45 #include "opt_timer.h"
   46 #endif
   47 #include "opt_rss.h"
   48 
   49 #include <sys/param.h>
   50 #include <sys/systm.h>
   51 #include <sys/bus.h>
   52 #include <sys/callout.h>
   53 #include <sys/domainset.h>
   54 #include <sys/file.h>
   55 #include <sys/interrupt.h>
   56 #include <sys/kernel.h>
   57 #include <sys/ktr.h>
   58 #include <sys/lock.h>
   59 #include <sys/malloc.h>
   60 #include <sys/mutex.h>
   61 #include <sys/proc.h>
   62 #include <sys/sdt.h>
   63 #include <sys/sleepqueue.h>
   64 #include <sys/sysctl.h>
   65 #include <sys/smp.h>
   66 
   67 #ifdef DDB
   68 #include <ddb/ddb.h>
   69 #include <machine/_inttypes.h>
   70 #endif
   71 
   72 #ifdef SMP
   73 #include <machine/cpu.h>
   74 #endif
   75 
   76 #ifndef NO_EVENTTIMERS
   77 DPCPU_DECLARE(sbintime_t, hardclocktime);
   78 #endif
   79 
   80 SDT_PROVIDER_DEFINE(callout_execute);
   81 SDT_PROBE_DEFINE1(callout_execute, , , callout__start, "struct callout *");
   82 SDT_PROBE_DEFINE1(callout_execute, , , callout__end, "struct callout *");
   83 
   84 #ifdef CALLOUT_PROFILING
   85 static int avg_depth;
   86 SYSCTL_INT(_debug, OID_AUTO, to_avg_depth, CTLFLAG_RD, &avg_depth, 0,
   87     "Average number of items examined per softclock call. Units = 1/1000");
   88 static int avg_gcalls;
   89 SYSCTL_INT(_debug, OID_AUTO, to_avg_gcalls, CTLFLAG_RD, &avg_gcalls, 0,
   90     "Average number of Giant callouts made per softclock call. Units = 1/1000");
   91 static int avg_lockcalls;
   92 SYSCTL_INT(_debug, OID_AUTO, to_avg_lockcalls, CTLFLAG_RD, &avg_lockcalls, 0,
   93     "Average number of lock callouts made per softclock call. Units = 1/1000");
   94 static int avg_mpcalls;
   95 SYSCTL_INT(_debug, OID_AUTO, to_avg_mpcalls, CTLFLAG_RD, &avg_mpcalls, 0,
   96     "Average number of MP callouts made per softclock call. Units = 1/1000");
   97 static int avg_depth_dir;
   98 SYSCTL_INT(_debug, OID_AUTO, to_avg_depth_dir, CTLFLAG_RD, &avg_depth_dir, 0,
   99     "Average number of direct callouts examined per callout_process call. "
  100     "Units = 1/1000");
  101 static int avg_lockcalls_dir;
  102 SYSCTL_INT(_debug, OID_AUTO, to_avg_lockcalls_dir, CTLFLAG_RD,
  103     &avg_lockcalls_dir, 0, "Average number of lock direct callouts made per "
  104     "callout_process call. Units = 1/1000");
  105 static int avg_mpcalls_dir;
  106 SYSCTL_INT(_debug, OID_AUTO, to_avg_mpcalls_dir, CTLFLAG_RD, &avg_mpcalls_dir,
  107     0, "Average number of MP direct callouts made per callout_process call. "
  108     "Units = 1/1000");
  109 #endif
  110 
  111 static int ncallout;
  112 SYSCTL_INT(_kern, OID_AUTO, ncallout, CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &ncallout, 0,
  113     "Number of entries in callwheel and size of timeout() preallocation");
  114 
  115 #ifdef  RSS
  116 static int pin_default_swi = 1;
  117 static int pin_pcpu_swi = 1;
  118 #else
  119 static int pin_default_swi = 0;
  120 static int pin_pcpu_swi = 0;
  121 #endif
  122 
  123 SYSCTL_INT(_kern, OID_AUTO, pin_default_swi, CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &pin_default_swi,
  124     0, "Pin the default (non-per-cpu) swi (shared with PCPU 0 swi)");
  125 SYSCTL_INT(_kern, OID_AUTO, pin_pcpu_swi, CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &pin_pcpu_swi,
  126     0, "Pin the per-CPU swis (except PCPU 0, which is also default");
  127 
  128 /*
  129  * TODO:
  130  *      allocate more timeout table slots when table overflows.
  131  */
  132 static u_int __read_mostly callwheelsize;
  133 static u_int __read_mostly callwheelmask;
  134 
  135 /*
  136  * The callout cpu exec entities represent informations necessary for
  137  * describing the state of callouts currently running on the CPU and the ones
  138  * necessary for migrating callouts to the new callout cpu. In particular,
  139  * the first entry of the array cc_exec_entity holds informations for callout
  140  * running in SWI thread context, while the second one holds informations
  141  * for callout running directly from hardware interrupt context.
  142  * The cached informations are very important for deferring migration when
  143  * the migrating callout is already running.
  144  */
  145 struct cc_exec {
  146         struct callout          *cc_curr;
  147         callout_func_t          *cc_drain;
  148 #ifdef SMP
  149         callout_func_t          *ce_migration_func;
  150         void                    *ce_migration_arg;
  151         int                     ce_migration_cpu;
  152         sbintime_t              ce_migration_time;
  153         sbintime_t              ce_migration_prec;
  154 #endif
  155         bool                    cc_cancel;
  156         bool                    cc_waiting;
  157 };
  158 
  159 /*
  160  * There is one struct callout_cpu per cpu, holding all relevant
  161  * state for the callout processing thread on the individual CPU.
  162  */
  163 struct callout_cpu {
  164         struct mtx_padalign     cc_lock;
  165         struct cc_exec          cc_exec_entity[2];
  166         struct callout          *cc_next;
  167         struct callout          *cc_callout;
  168         struct callout_list     *cc_callwheel;
  169         struct callout_tailq    cc_expireq;
  170         struct callout_slist    cc_callfree;
  171         sbintime_t              cc_firstevent;
  172         sbintime_t              cc_lastscan;
  173         void                    *cc_cookie;
  174         u_int                   cc_bucket;
  175         u_int                   cc_inited;
  176         char                    cc_ktr_event_name[20];
  177 };
  178 
  179 #define callout_migrating(c)    ((c)->c_iflags & CALLOUT_DFRMIGRATION)
  180 
  181 #define cc_exec_curr(cc, dir)           cc->cc_exec_entity[dir].cc_curr
  182 #define cc_exec_drain(cc, dir)          cc->cc_exec_entity[dir].cc_drain
  183 #define cc_exec_next(cc)                cc->cc_next
  184 #define cc_exec_cancel(cc, dir)         cc->cc_exec_entity[dir].cc_cancel
  185 #define cc_exec_waiting(cc, dir)        cc->cc_exec_entity[dir].cc_waiting
  186 #ifdef SMP
  187 #define cc_migration_func(cc, dir)      cc->cc_exec_entity[dir].ce_migration_func
  188 #define cc_migration_arg(cc, dir)       cc->cc_exec_entity[dir].ce_migration_arg
  189 #define cc_migration_cpu(cc, dir)       cc->cc_exec_entity[dir].ce_migration_cpu
  190 #define cc_migration_time(cc, dir)      cc->cc_exec_entity[dir].ce_migration_time
  191 #define cc_migration_prec(cc, dir)      cc->cc_exec_entity[dir].ce_migration_prec
  192 
  193 struct callout_cpu cc_cpu[MAXCPU];
  194 #define CPUBLOCK        MAXCPU
  195 #define CC_CPU(cpu)     (&cc_cpu[(cpu)])
  196 #define CC_SELF()       CC_CPU(PCPU_GET(cpuid))
  197 #else
  198 struct callout_cpu cc_cpu;
  199 #define CC_CPU(cpu)     &cc_cpu
  200 #define CC_SELF()       &cc_cpu
  201 #endif
  202 #define CC_LOCK(cc)     mtx_lock_spin(&(cc)->cc_lock)
  203 #define CC_UNLOCK(cc)   mtx_unlock_spin(&(cc)->cc_lock)
  204 #define CC_LOCK_ASSERT(cc)      mtx_assert(&(cc)->cc_lock, MA_OWNED)
  205 
  206 static int __read_mostly timeout_cpu;
  207 
  208 static void     callout_cpu_init(struct callout_cpu *cc, int cpu);
  209 static void     softclock_call_cc(struct callout *c, struct callout_cpu *cc,
  210 #ifdef CALLOUT_PROFILING
  211                     int *mpcalls, int *lockcalls, int *gcalls,
  212 #endif
  213                     int direct);
  214 
  215 static MALLOC_DEFINE(M_CALLOUT, "callout", "Callout datastructures");
  216 
  217 /**
  218  * Locked by cc_lock:
  219  *   cc_curr         - If a callout is in progress, it is cc_curr.
  220  *                     If cc_curr is non-NULL, threads waiting in
  221  *                     callout_drain() will be woken up as soon as the
  222  *                     relevant callout completes.
  223  *   cc_cancel       - Changing to 1 with both callout_lock and cc_lock held
  224  *                     guarantees that the current callout will not run.
  225  *                     The softclock() function sets this to 0 before it
  226  *                     drops callout_lock to acquire c_lock, and it calls
  227  *                     the handler only if curr_cancelled is still 0 after
  228  *                     cc_lock is successfully acquired.
  229  *   cc_waiting      - If a thread is waiting in callout_drain(), then
  230  *                     callout_wait is nonzero.  Set only when
  231  *                     cc_curr is non-NULL.
  232  */
  233 
  234 /*
  235  * Resets the execution entity tied to a specific callout cpu.
  236  */
  237 static void
  238 cc_cce_cleanup(struct callout_cpu *cc, int direct)
  239 {
  240 
  241         cc_exec_curr(cc, direct) = NULL;
  242         cc_exec_cancel(cc, direct) = false;
  243         cc_exec_waiting(cc, direct) = false;
  244 #ifdef SMP
  245         cc_migration_cpu(cc, direct) = CPUBLOCK;
  246         cc_migration_time(cc, direct) = 0;
  247         cc_migration_prec(cc, direct) = 0;
  248         cc_migration_func(cc, direct) = NULL;
  249         cc_migration_arg(cc, direct) = NULL;
  250 #endif
  251 }
  252 
  253 /*
  254  * Checks if migration is requested by a specific callout cpu.
  255  */
  256 static int
  257 cc_cce_migrating(struct callout_cpu *cc, int direct)
  258 {
  259 
  260 #ifdef SMP
  261         return (cc_migration_cpu(cc, direct) != CPUBLOCK);
  262 #else
  263         return (0);
  264 #endif
  265 }
  266 
  267 /*
  268  * Kernel low level callwheel initialization
  269  * called on the BSP during kernel startup.
  270  */
  271 static void
  272 callout_callwheel_init(void *dummy)
  273 {
  274         struct callout_cpu *cc;
  275 
  276         /*
  277          * Calculate the size of the callout wheel and the preallocated
  278          * timeout() structures.
  279          * XXX: Clip callout to result of previous function of maxusers
  280          * maximum 384.  This is still huge, but acceptable.
  281          */
  282         memset(CC_CPU(curcpu), 0, sizeof(cc_cpu));
  283         ncallout = imin(16 + maxproc + maxfiles, 18508);
  284         TUNABLE_INT_FETCH("kern.ncallout", &ncallout);
  285 
  286         /*
  287          * Calculate callout wheel size, should be next power of two higher
  288          * than 'ncallout'.
  289          */
  290         callwheelsize = 1 << fls(ncallout);
  291         callwheelmask = callwheelsize - 1;
  292 
  293         /*
  294          * Fetch whether we're pinning the swi's or not.
  295          */
  296         TUNABLE_INT_FETCH("kern.pin_default_swi", &pin_default_swi);
  297         TUNABLE_INT_FETCH("kern.pin_pcpu_swi", &pin_pcpu_swi);
  298 
  299         /*
  300          * Only BSP handles timeout(9) and receives a preallocation.
  301          *
  302          * XXX: Once all timeout(9) consumers are converted this can
  303          * be removed.
  304          */
  305         timeout_cpu = PCPU_GET(cpuid);
  306         cc = CC_CPU(timeout_cpu);
  307         cc->cc_callout = malloc(ncallout * sizeof(struct callout),
  308             M_CALLOUT, M_WAITOK);
  309         callout_cpu_init(cc, timeout_cpu);
  310 }
  311 SYSINIT(callwheel_init, SI_SUB_CPU, SI_ORDER_ANY, callout_callwheel_init, NULL);
  312 
  313 /*
  314  * Initialize the per-cpu callout structures.
  315  */
  316 static void
  317 callout_cpu_init(struct callout_cpu *cc, int cpu)
  318 {
  319         struct callout *c;
  320         int i;
  321 
  322         mtx_init(&cc->cc_lock, "callout", NULL, MTX_SPIN | MTX_RECURSE);
  323         SLIST_INIT(&cc->cc_callfree);
  324         cc->cc_inited = 1;
  325         cc->cc_callwheel = malloc_domainset(sizeof(struct callout_list) *
  326             callwheelsize, M_CALLOUT,
  327             DOMAINSET_PREF(pcpu_find(cpu)->pc_domain), M_WAITOK);
  328         for (i = 0; i < callwheelsize; i++)
  329                 LIST_INIT(&cc->cc_callwheel[i]);
  330         TAILQ_INIT(&cc->cc_expireq);
  331         cc->cc_firstevent = SBT_MAX;
  332         for (i = 0; i < 2; i++)
  333                 cc_cce_cleanup(cc, i);
  334         snprintf(cc->cc_ktr_event_name, sizeof(cc->cc_ktr_event_name),
  335             "callwheel cpu %d", cpu);
  336         if (cc->cc_callout == NULL)     /* Only BSP handles timeout(9) */
  337                 return;
  338         for (i = 0; i < ncallout; i++) {
  339                 c = &cc->cc_callout[i];
  340                 callout_init(c, 0);
  341                 c->c_iflags = CALLOUT_LOCAL_ALLOC;
  342                 SLIST_INSERT_HEAD(&cc->cc_callfree, c, c_links.sle);
  343         }
  344 }
  345 
  346 #ifdef SMP
  347 /*
  348  * Switches the cpu tied to a specific callout.
  349  * The function expects a locked incoming callout cpu and returns with
  350  * locked outcoming callout cpu.
  351  */
  352 static struct callout_cpu *
  353 callout_cpu_switch(struct callout *c, struct callout_cpu *cc, int new_cpu)
  354 {
  355         struct callout_cpu *new_cc;
  356 
  357         MPASS(c != NULL && cc != NULL);
  358         CC_LOCK_ASSERT(cc);
  359 
  360         /*
  361          * Avoid interrupts and preemption firing after the callout cpu
  362          * is blocked in order to avoid deadlocks as the new thread
  363          * may be willing to acquire the callout cpu lock.
  364          */
  365         c->c_cpu = CPUBLOCK;
  366         spinlock_enter();
  367         CC_UNLOCK(cc);
  368         new_cc = CC_CPU(new_cpu);
  369         CC_LOCK(new_cc);
  370         spinlock_exit();
  371         c->c_cpu = new_cpu;
  372         return (new_cc);
  373 }
  374 #endif
  375 
  376 /*
  377  * Start standard softclock thread.
  378  */
  379 static void
  380 start_softclock(void *dummy)
  381 {
  382         struct callout_cpu *cc;
  383         char name[MAXCOMLEN];
  384 #ifdef SMP
  385         int cpu;
  386         struct intr_event *ie;
  387 #endif
  388 
  389         cc = CC_CPU(timeout_cpu);
  390         snprintf(name, sizeof(name), "clock (%d)", timeout_cpu);
  391         if (swi_add(&clk_intr_event, name, softclock, cc, SWI_CLOCK,
  392             INTR_MPSAFE, &cc->cc_cookie))
  393                 panic("died while creating standard software ithreads");
  394         if (pin_default_swi &&
  395             (intr_event_bind(clk_intr_event, timeout_cpu) != 0)) {
  396                 printf("%s: timeout clock couldn't be pinned to cpu %d\n",
  397                     __func__,
  398                     timeout_cpu);
  399         }
  400 
  401 #ifdef SMP
  402         CPU_FOREACH(cpu) {
  403                 if (cpu == timeout_cpu)
  404                         continue;
  405                 cc = CC_CPU(cpu);
  406                 cc->cc_callout = NULL;  /* Only BSP handles timeout(9). */
  407                 callout_cpu_init(cc, cpu);
  408                 snprintf(name, sizeof(name), "clock (%d)", cpu);
  409                 ie = NULL;
  410                 if (swi_add(&ie, name, softclock, cc, SWI_CLOCK,
  411                     INTR_MPSAFE, &cc->cc_cookie))
  412                         panic("died while creating standard software ithreads");
  413                 if (pin_pcpu_swi && (intr_event_bind(ie, cpu) != 0)) {
  414                         printf("%s: per-cpu clock couldn't be pinned to "
  415                             "cpu %d\n",
  416                             __func__,
  417                             cpu);
  418                 }
  419         }
  420 #endif
  421 }
  422 SYSINIT(start_softclock, SI_SUB_SOFTINTR, SI_ORDER_FIRST, start_softclock, NULL);
  423 
  424 #define CC_HASH_SHIFT   8
  425 
  426 static inline u_int
  427 callout_hash(sbintime_t sbt)
  428 {
  429 
  430         return (sbt >> (32 - CC_HASH_SHIFT));
  431 }
  432 
  433 static inline u_int
  434 callout_get_bucket(sbintime_t sbt)
  435 {
  436 
  437         return (callout_hash(sbt) & callwheelmask);
  438 }
  439 
  440 void
  441 callout_process(sbintime_t now)
  442 {
  443         struct callout *tmp, *tmpn;
  444         struct callout_cpu *cc;
  445         struct callout_list *sc;
  446         sbintime_t first, last, max, tmp_max;
  447         uint32_t lookahead;
  448         u_int firstb, lastb, nowb;
  449 #ifdef CALLOUT_PROFILING
  450         int depth_dir = 0, mpcalls_dir = 0, lockcalls_dir = 0;
  451 #endif
  452 
  453         cc = CC_SELF();
  454         mtx_lock_spin_flags(&cc->cc_lock, MTX_QUIET);
  455 
  456         /* Compute the buckets of the last scan and present times. */
  457         firstb = callout_hash(cc->cc_lastscan);
  458         cc->cc_lastscan = now;
  459         nowb = callout_hash(now);
  460 
  461         /* Compute the last bucket and minimum time of the bucket after it. */
  462         if (nowb == firstb)
  463                 lookahead = (SBT_1S / 16);
  464         else if (nowb - firstb == 1)
  465                 lookahead = (SBT_1S / 8);
  466         else
  467                 lookahead = (SBT_1S / 2);
  468         first = last = now;
  469         first += (lookahead / 2);
  470         last += lookahead;
  471         last &= (0xffffffffffffffffLLU << (32 - CC_HASH_SHIFT));
  472         lastb = callout_hash(last) - 1;
  473         max = last;
  474 
  475         /*
  476          * Check if we wrapped around the entire wheel from the last scan.
  477          * In case, we need to scan entirely the wheel for pending callouts.
  478          */
  479         if (lastb - firstb >= callwheelsize) {
  480                 lastb = firstb + callwheelsize - 1;
  481                 if (nowb - firstb >= callwheelsize)
  482                         nowb = lastb;
  483         }
  484 
  485         /* Iterate callwheel from firstb to nowb and then up to lastb. */
  486         do {
  487                 sc = &cc->cc_callwheel[firstb & callwheelmask];
  488                 tmp = LIST_FIRST(sc);
  489                 while (tmp != NULL) {
  490                         /* Run the callout if present time within allowed. */
  491                         if (tmp->c_time <= now) {
  492                                 /*
  493                                  * Consumer told us the callout may be run
  494                                  * directly from hardware interrupt context.
  495                                  */
  496                                 if (tmp->c_iflags & CALLOUT_DIRECT) {
  497 #ifdef CALLOUT_PROFILING
  498                                         ++depth_dir;
  499 #endif
  500                                         cc_exec_next(cc) =
  501                                             LIST_NEXT(tmp, c_links.le);
  502                                         cc->cc_bucket = firstb & callwheelmask;
  503                                         LIST_REMOVE(tmp, c_links.le);
  504                                         softclock_call_cc(tmp, cc,
  505 #ifdef CALLOUT_PROFILING
  506                                             &mpcalls_dir, &lockcalls_dir, NULL,
  507 #endif
  508                                             1);
  509                                         tmp = cc_exec_next(cc);
  510                                         cc_exec_next(cc) = NULL;
  511                                 } else {
  512                                         tmpn = LIST_NEXT(tmp, c_links.le);
  513                                         LIST_REMOVE(tmp, c_links.le);
  514                                         TAILQ_INSERT_TAIL(&cc->cc_expireq,
  515                                             tmp, c_links.tqe);
  516                                         tmp->c_iflags |= CALLOUT_PROCESSED;
  517                                         tmp = tmpn;
  518                                 }
  519                                 continue;
  520                         }
  521                         /* Skip events from distant future. */
  522                         if (tmp->c_time >= max)
  523                                 goto next;
  524                         /*
  525                          * Event minimal time is bigger than present maximal
  526                          * time, so it cannot be aggregated.
  527                          */
  528                         if (tmp->c_time > last) {
  529                                 lastb = nowb;
  530                                 goto next;
  531                         }
  532                         /* Update first and last time, respecting this event. */
  533                         if (tmp->c_time < first)
  534                                 first = tmp->c_time;
  535                         tmp_max = tmp->c_time + tmp->c_precision;
  536                         if (tmp_max < last)
  537                                 last = tmp_max;
  538 next:
  539                         tmp = LIST_NEXT(tmp, c_links.le);
  540                 }
  541                 /* Proceed with the next bucket. */
  542                 firstb++;
  543                 /*
  544                  * Stop if we looked after present time and found
  545                  * some event we can't execute at now.
  546                  * Stop if we looked far enough into the future.
  547                  */
  548         } while (((int)(firstb - lastb)) <= 0);
  549         cc->cc_firstevent = last;
  550 #ifndef NO_EVENTTIMERS
  551         cpu_new_callout(curcpu, last, first);
  552 #endif
  553 #ifdef CALLOUT_PROFILING
  554         avg_depth_dir += (depth_dir * 1000 - avg_depth_dir) >> 8;
  555         avg_mpcalls_dir += (mpcalls_dir * 1000 - avg_mpcalls_dir) >> 8;
  556         avg_lockcalls_dir += (lockcalls_dir * 1000 - avg_lockcalls_dir) >> 8;
  557 #endif
  558         mtx_unlock_spin_flags(&cc->cc_lock, MTX_QUIET);
  559         /*
  560          * swi_sched acquires the thread lock, so we don't want to call it
  561          * with cc_lock held; incorrect locking order.
  562          */
  563         if (!TAILQ_EMPTY(&cc->cc_expireq))
  564                 swi_sched(cc->cc_cookie, 0);
  565 }
  566 
  567 static struct callout_cpu *
  568 callout_lock(struct callout *c)
  569 {
  570         struct callout_cpu *cc;
  571         int cpu;
  572 
  573         for (;;) {
  574                 cpu = c->c_cpu;
  575 #ifdef SMP
  576                 if (cpu == CPUBLOCK) {
  577                         while (c->c_cpu == CPUBLOCK)
  578                                 cpu_spinwait();
  579                         continue;
  580                 }
  581 #endif
  582                 cc = CC_CPU(cpu);
  583                 CC_LOCK(cc);
  584                 if (cpu == c->c_cpu)
  585                         break;
  586                 CC_UNLOCK(cc);
  587         }
  588         return (cc);
  589 }
  590 
  591 static void
  592 callout_cc_add(struct callout *c, struct callout_cpu *cc,
  593     sbintime_t sbt, sbintime_t precision, void (*func)(void *),
  594     void *arg, int cpu, int flags)
  595 {
  596         int bucket;
  597 
  598         CC_LOCK_ASSERT(cc);
  599         if (sbt < cc->cc_lastscan)
  600                 sbt = cc->cc_lastscan;
  601         c->c_arg = arg;
  602         c->c_iflags |= CALLOUT_PENDING;
  603         c->c_iflags &= ~CALLOUT_PROCESSED;
  604         c->c_flags |= CALLOUT_ACTIVE;
  605         if (flags & C_DIRECT_EXEC)
  606                 c->c_iflags |= CALLOUT_DIRECT;
  607         c->c_func = func;
  608         c->c_time = sbt;
  609         c->c_precision = precision;
  610         bucket = callout_get_bucket(c->c_time);
  611         CTR3(KTR_CALLOUT, "precision set for %p: %d.%08x",
  612             c, (int)(c->c_precision >> 32),
  613             (u_int)(c->c_precision & 0xffffffff));
  614         LIST_INSERT_HEAD(&cc->cc_callwheel[bucket], c, c_links.le);
  615         if (cc->cc_bucket == bucket)
  616                 cc_exec_next(cc) = c;
  617 #ifndef NO_EVENTTIMERS
  618         /*
  619          * Inform the eventtimers(4) subsystem there's a new callout
  620          * that has been inserted, but only if really required.
  621          */
  622         if (SBT_MAX - c->c_time < c->c_precision)
  623                 c->c_precision = SBT_MAX - c->c_time;
  624         sbt = c->c_time + c->c_precision;
  625         if (sbt < cc->cc_firstevent) {
  626                 cc->cc_firstevent = sbt;
  627                 cpu_new_callout(cpu, sbt, c->c_time);
  628         }
  629 #endif
  630 }
  631 
  632 static void
  633 callout_cc_del(struct callout *c, struct callout_cpu *cc)
  634 {
  635 
  636         if ((c->c_iflags & CALLOUT_LOCAL_ALLOC) == 0)
  637                 return;
  638         c->c_func = NULL;
  639         SLIST_INSERT_HEAD(&cc->cc_callfree, c, c_links.sle);
  640 }
  641 
  642 static void
  643 softclock_call_cc(struct callout *c, struct callout_cpu *cc,
  644 #ifdef CALLOUT_PROFILING
  645     int *mpcalls, int *lockcalls, int *gcalls,
  646 #endif
  647     int direct)
  648 {
  649         struct rm_priotracker tracker;
  650         callout_func_t *c_func, *drain;
  651         void *c_arg;
  652         struct lock_class *class;
  653         struct lock_object *c_lock;
  654         uintptr_t lock_status;
  655         int c_iflags;
  656 #ifdef SMP
  657         struct callout_cpu *new_cc;
  658         callout_func_t *new_func;
  659         void *new_arg;
  660         int flags, new_cpu;
  661         sbintime_t new_prec, new_time;
  662 #endif
  663 #if defined(DIAGNOSTIC) || defined(CALLOUT_PROFILING) 
  664         sbintime_t sbt1, sbt2;
  665         struct timespec ts2;
  666         static sbintime_t maxdt = 2 * SBT_1MS;  /* 2 msec */
  667         static callout_func_t *lastfunc;
  668 #endif
  669 
  670         KASSERT((c->c_iflags & CALLOUT_PENDING) == CALLOUT_PENDING,
  671             ("softclock_call_cc: pend %p %x", c, c->c_iflags));
  672         KASSERT((c->c_flags & CALLOUT_ACTIVE) == CALLOUT_ACTIVE,
  673             ("softclock_call_cc: act %p %x", c, c->c_flags));
  674         class = (c->c_lock != NULL) ? LOCK_CLASS(c->c_lock) : NULL;
  675         lock_status = 0;
  676         if (c->c_iflags & CALLOUT_SHAREDLOCK) {
  677                 if (class == &lock_class_rm)
  678                         lock_status = (uintptr_t)&tracker;
  679                 else
  680                         lock_status = 1;
  681         }
  682         c_lock = c->c_lock;
  683         c_func = c->c_func;
  684         c_arg = c->c_arg;
  685         c_iflags = c->c_iflags;
  686         if (c->c_iflags & CALLOUT_LOCAL_ALLOC)
  687                 c->c_iflags = CALLOUT_LOCAL_ALLOC;
  688         else
  689                 c->c_iflags &= ~CALLOUT_PENDING;
  690         
  691         cc_exec_curr(cc, direct) = c;
  692         cc_exec_cancel(cc, direct) = false;
  693         cc_exec_drain(cc, direct) = NULL;
  694         CC_UNLOCK(cc);
  695         if (c_lock != NULL) {
  696                 class->lc_lock(c_lock, lock_status);
  697                 /*
  698                  * The callout may have been cancelled
  699                  * while we switched locks.
  700                  */
  701                 if (cc_exec_cancel(cc, direct)) {
  702                         class->lc_unlock(c_lock);
  703                         goto skip;
  704                 }
  705                 /* The callout cannot be stopped now. */
  706                 cc_exec_cancel(cc, direct) = true;
  707                 if (c_lock == &Giant.lock_object) {
  708 #ifdef CALLOUT_PROFILING
  709                         (*gcalls)++;
  710 #endif
  711                         CTR3(KTR_CALLOUT, "callout giant %p func %p arg %p",
  712                             c, c_func, c_arg);
  713                 } else {
  714 #ifdef CALLOUT_PROFILING
  715                         (*lockcalls)++;
  716 #endif
  717                         CTR3(KTR_CALLOUT, "callout lock %p func %p arg %p",
  718                             c, c_func, c_arg);
  719                 }
  720         } else {
  721 #ifdef CALLOUT_PROFILING
  722                 (*mpcalls)++;
  723 #endif
  724                 CTR3(KTR_CALLOUT, "callout %p func %p arg %p",
  725                     c, c_func, c_arg);
  726         }
  727         KTR_STATE3(KTR_SCHED, "callout", cc->cc_ktr_event_name, "running",
  728             "func:%p", c_func, "arg:%p", c_arg, "direct:%d", direct);
  729 #if defined(DIAGNOSTIC) || defined(CALLOUT_PROFILING)
  730         sbt1 = sbinuptime();
  731 #endif
  732         THREAD_NO_SLEEPING();
  733         SDT_PROBE1(callout_execute, , , callout__start, c);
  734         c_func(c_arg);
  735         SDT_PROBE1(callout_execute, , , callout__end, c);
  736         THREAD_SLEEPING_OK();
  737 #if defined(DIAGNOSTIC) || defined(CALLOUT_PROFILING)
  738         sbt2 = sbinuptime();
  739         sbt2 -= sbt1;
  740         if (sbt2 > maxdt) {
  741                 if (lastfunc != c_func || sbt2 > maxdt * 2) {
  742                         ts2 = sbttots(sbt2);
  743                         printf(
  744                 "Expensive timeout(9) function: %p(%p) %jd.%09ld s\n",
  745                             c_func, c_arg, (intmax_t)ts2.tv_sec, ts2.tv_nsec);
  746                 }
  747                 maxdt = sbt2;
  748                 lastfunc = c_func;
  749         }
  750 #endif
  751         KTR_STATE0(KTR_SCHED, "callout", cc->cc_ktr_event_name, "idle");
  752         CTR1(KTR_CALLOUT, "callout %p finished", c);
  753         if ((c_iflags & CALLOUT_RETURNUNLOCKED) == 0)
  754                 class->lc_unlock(c_lock);
  755 skip:
  756         CC_LOCK(cc);
  757         KASSERT(cc_exec_curr(cc, direct) == c, ("mishandled cc_curr"));
  758         cc_exec_curr(cc, direct) = NULL;
  759         if (cc_exec_drain(cc, direct)) {
  760                 drain = cc_exec_drain(cc, direct);
  761                 cc_exec_drain(cc, direct) = NULL;
  762                 CC_UNLOCK(cc);
  763                 drain(c_arg);
  764                 CC_LOCK(cc);
  765         }
  766         if (cc_exec_waiting(cc, direct)) {
  767                 /*
  768                  * There is someone waiting for the
  769                  * callout to complete.
  770                  * If the callout was scheduled for
  771                  * migration just cancel it.
  772                  */
  773                 if (cc_cce_migrating(cc, direct)) {
  774                         cc_cce_cleanup(cc, direct);
  775 
  776                         /*
  777                          * It should be assert here that the callout is not
  778                          * destroyed but that is not easy.
  779                          */
  780                         c->c_iflags &= ~CALLOUT_DFRMIGRATION;
  781                 }
  782                 cc_exec_waiting(cc, direct) = false;
  783                 CC_UNLOCK(cc);
  784                 wakeup(&cc_exec_waiting(cc, direct));
  785                 CC_LOCK(cc);
  786         } else if (cc_cce_migrating(cc, direct)) {
  787                 KASSERT((c_iflags & CALLOUT_LOCAL_ALLOC) == 0,
  788                     ("Migrating legacy callout %p", c));
  789 #ifdef SMP
  790                 /*
  791                  * If the callout was scheduled for
  792                  * migration just perform it now.
  793                  */
  794                 new_cpu = cc_migration_cpu(cc, direct);
  795                 new_time = cc_migration_time(cc, direct);
  796                 new_prec = cc_migration_prec(cc, direct);
  797                 new_func = cc_migration_func(cc, direct);
  798                 new_arg = cc_migration_arg(cc, direct);
  799                 cc_cce_cleanup(cc, direct);
  800 
  801                 /*
  802                  * It should be assert here that the callout is not destroyed
  803                  * but that is not easy.
  804                  *
  805                  * As first thing, handle deferred callout stops.
  806                  */
  807                 if (!callout_migrating(c)) {
  808                         CTR3(KTR_CALLOUT,
  809                              "deferred cancelled %p func %p arg %p",
  810                              c, new_func, new_arg);
  811                         callout_cc_del(c, cc);
  812                         return;
  813                 }
  814                 c->c_iflags &= ~CALLOUT_DFRMIGRATION;
  815 
  816                 new_cc = callout_cpu_switch(c, cc, new_cpu);
  817                 flags = (direct) ? C_DIRECT_EXEC : 0;
  818                 callout_cc_add(c, new_cc, new_time, new_prec, new_func,
  819                     new_arg, new_cpu, flags);
  820                 CC_UNLOCK(new_cc);
  821                 CC_LOCK(cc);
  822 #else
  823                 panic("migration should not happen");
  824 #endif
  825         }
  826         /*
  827          * If the current callout is locally allocated (from
  828          * timeout(9)) then put it on the freelist.
  829          *
  830          * Note: we need to check the cached copy of c_iflags because
  831          * if it was not local, then it's not safe to deref the
  832          * callout pointer.
  833          */
  834         KASSERT((c_iflags & CALLOUT_LOCAL_ALLOC) == 0 ||
  835             c->c_iflags == CALLOUT_LOCAL_ALLOC,
  836             ("corrupted callout"));
  837         if (c_iflags & CALLOUT_LOCAL_ALLOC)
  838                 callout_cc_del(c, cc);
  839 }
  840 
  841 /*
  842  * The callout mechanism is based on the work of Adam M. Costello and
  843  * George Varghese, published in a technical report entitled "Redesigning
  844  * the BSD Callout and Timer Facilities" and modified slightly for inclusion
  845  * in FreeBSD by Justin T. Gibbs.  The original work on the data structures
  846  * used in this implementation was published by G. Varghese and T. Lauck in
  847  * the paper "Hashed and Hierarchical Timing Wheels: Data Structures for
  848  * the Efficient Implementation of a Timer Facility" in the Proceedings of
  849  * the 11th ACM Annual Symposium on Operating Systems Principles,
  850  * Austin, Texas Nov 1987.
  851  */
  852 
  853 /*
  854  * Software (low priority) clock interrupt.
  855  * Run periodic events from timeout queue.
  856  */
  857 void
  858 softclock(void *arg)
  859 {
  860         struct callout_cpu *cc;
  861         struct callout *c;
  862 #ifdef CALLOUT_PROFILING
  863         int depth = 0, gcalls = 0, lockcalls = 0, mpcalls = 0;
  864 #endif
  865 
  866         cc = (struct callout_cpu *)arg;
  867         CC_LOCK(cc);
  868         while ((c = TAILQ_FIRST(&cc->cc_expireq)) != NULL) {
  869                 TAILQ_REMOVE(&cc->cc_expireq, c, c_links.tqe);
  870                 softclock_call_cc(c, cc,
  871 #ifdef CALLOUT_PROFILING
  872                     &mpcalls, &lockcalls, &gcalls,
  873 #endif
  874                     0);
  875 #ifdef CALLOUT_PROFILING
  876                 ++depth;
  877 #endif
  878         }
  879 #ifdef CALLOUT_PROFILING
  880         avg_depth += (depth * 1000 - avg_depth) >> 8;
  881         avg_mpcalls += (mpcalls * 1000 - avg_mpcalls) >> 8;
  882         avg_lockcalls += (lockcalls * 1000 - avg_lockcalls) >> 8;
  883         avg_gcalls += (gcalls * 1000 - avg_gcalls) >> 8;
  884 #endif
  885         CC_UNLOCK(cc);
  886 }
  887 
  888 /*
  889  * timeout --
  890  *      Execute a function after a specified length of time.
  891  *
  892  * untimeout --
  893  *      Cancel previous timeout function call.
  894  *
  895  * callout_handle_init --
  896  *      Initialize a handle so that using it with untimeout is benign.
  897  *
  898  *      See AT&T BCI Driver Reference Manual for specification.  This
  899  *      implementation differs from that one in that although an
  900  *      identification value is returned from timeout, the original
  901  *      arguments to timeout as well as the identifier are used to
  902  *      identify entries for untimeout.
  903  */
  904 struct callout_handle
  905 timeout(timeout_t *ftn, void *arg, int to_ticks)
  906 {
  907         struct callout_cpu *cc;
  908         struct callout *new;
  909         struct callout_handle handle;
  910 
  911         cc = CC_CPU(timeout_cpu);
  912         CC_LOCK(cc);
  913         /* Fill in the next free callout structure. */
  914         new = SLIST_FIRST(&cc->cc_callfree);
  915         if (new == NULL)
  916                 /* XXX Attempt to malloc first */
  917                 panic("timeout table full");
  918         SLIST_REMOVE_HEAD(&cc->cc_callfree, c_links.sle);
  919         callout_reset(new, to_ticks, ftn, arg);
  920         handle.callout = new;
  921         CC_UNLOCK(cc);
  922 
  923         return (handle);
  924 }
  925 
  926 void
  927 untimeout(timeout_t *ftn, void *arg, struct callout_handle handle)
  928 {
  929         struct callout_cpu *cc;
  930 
  931         /*
  932          * Check for a handle that was initialized
  933          * by callout_handle_init, but never used
  934          * for a real timeout.
  935          */
  936         if (handle.callout == NULL)
  937                 return;
  938 
  939         cc = callout_lock(handle.callout);
  940         if (handle.callout->c_func == ftn && handle.callout->c_arg == arg)
  941                 callout_stop(handle.callout);
  942         CC_UNLOCK(cc);
  943 }
  944 
  945 void
  946 callout_handle_init(struct callout_handle *handle)
  947 {
  948         handle->callout = NULL;
  949 }
  950 
  951 void
  952 callout_when(sbintime_t sbt, sbintime_t precision, int flags,
  953     sbintime_t *res, sbintime_t *prec_res)
  954 {
  955         sbintime_t to_sbt, to_pr;
  956 
  957         if ((flags & (C_ABSOLUTE | C_PRECALC)) != 0) {
  958                 *res = sbt;
  959                 *prec_res = precision;
  960                 return;
  961         }
  962         if ((flags & C_HARDCLOCK) != 0 && sbt < tick_sbt)
  963                 sbt = tick_sbt;
  964         if ((flags & C_HARDCLOCK) != 0 ||
  965 #ifdef NO_EVENTTIMERS
  966             sbt >= sbt_timethreshold) {
  967                 to_sbt = getsbinuptime();
  968 
  969                 /* Add safety belt for the case of hz > 1000. */
  970                 to_sbt += tc_tick_sbt - tick_sbt;
  971 #else
  972             sbt >= sbt_tickthreshold) {
  973                 /*
  974                  * Obtain the time of the last hardclock() call on
  975                  * this CPU directly from the kern_clocksource.c.
  976                  * This value is per-CPU, but it is equal for all
  977                  * active ones.
  978                  */
  979 #ifdef __LP64__
  980                 to_sbt = DPCPU_GET(hardclocktime);
  981 #else
  982                 spinlock_enter();
  983                 to_sbt = DPCPU_GET(hardclocktime);
  984                 spinlock_exit();
  985 #endif
  986 #endif
  987                 if (cold && to_sbt == 0)
  988                         to_sbt = sbinuptime();
  989                 if ((flags & C_HARDCLOCK) == 0)
  990                         to_sbt += tick_sbt;
  991         } else
  992                 to_sbt = sbinuptime();
  993         if (SBT_MAX - to_sbt < sbt)
  994                 to_sbt = SBT_MAX;
  995         else
  996                 to_sbt += sbt;
  997         *res = to_sbt;
  998         to_pr = ((C_PRELGET(flags) < 0) ? sbt >> tc_precexp :
  999             sbt >> C_PRELGET(flags));
 1000         *prec_res = to_pr > precision ? to_pr : precision;
 1001 }
 1002 
 1003 /*
 1004  * New interface; clients allocate their own callout structures.
 1005  *
 1006  * callout_reset() - establish or change a timeout
 1007  * callout_stop() - disestablish a timeout
 1008  * callout_init() - initialize a callout structure so that it can
 1009  *      safely be passed to callout_reset() and callout_stop()
 1010  *
 1011  * <sys/callout.h> defines three convenience macros:
 1012  *
 1013  * callout_active() - returns truth if callout has not been stopped,
 1014  *      drained, or deactivated since the last time the callout was
 1015  *      reset.
 1016  * callout_pending() - returns truth if callout is still waiting for timeout
 1017  * callout_deactivate() - marks the callout as having been serviced
 1018  */
 1019 int
 1020 callout_reset_sbt_on(struct callout *c, sbintime_t sbt, sbintime_t prec,
 1021     callout_func_t *ftn, void *arg, int cpu, int flags)
 1022 {
 1023         sbintime_t to_sbt, precision;
 1024         struct callout_cpu *cc;
 1025         int cancelled, direct;
 1026         int ignore_cpu=0;
 1027 
 1028         cancelled = 0;
 1029         if (cpu == -1) {
 1030                 ignore_cpu = 1;
 1031         } else if ((cpu >= MAXCPU) ||
 1032                    ((CC_CPU(cpu))->cc_inited == 0)) {
 1033                 /* Invalid CPU spec */
 1034                 panic("Invalid CPU in callout %d", cpu);
 1035         }
 1036         callout_when(sbt, prec, flags, &to_sbt, &precision);
 1037 
 1038         /* 
 1039          * This flag used to be added by callout_cc_add, but the
 1040          * first time you call this we could end up with the
 1041          * wrong direct flag if we don't do it before we add.
 1042          */
 1043         if (flags & C_DIRECT_EXEC) {
 1044                 direct = 1;
 1045         } else {
 1046                 direct = 0;
 1047         }
 1048         KASSERT(!direct || c->c_lock == NULL,
 1049             ("%s: direct callout %p has lock", __func__, c));
 1050         cc = callout_lock(c);
 1051         /*
 1052          * Don't allow migration of pre-allocated callouts lest they
 1053          * become unbalanced or handle the case where the user does
 1054          * not care. 
 1055          */
 1056         if ((c->c_iflags & CALLOUT_LOCAL_ALLOC) ||
 1057             ignore_cpu) {
 1058                 cpu = c->c_cpu;
 1059         }
 1060 
 1061         if (cc_exec_curr(cc, direct) == c) {
 1062                 /*
 1063                  * We're being asked to reschedule a callout which is
 1064                  * currently in progress.  If there is a lock then we
 1065                  * can cancel the callout if it has not really started.
 1066                  */
 1067                 if (c->c_lock != NULL && !cc_exec_cancel(cc, direct))
 1068                         cancelled = cc_exec_cancel(cc, direct) = true;
 1069                 if (cc_exec_waiting(cc, direct) || cc_exec_drain(cc, direct)) {
 1070                         /*
 1071                          * Someone has called callout_drain to kill this
 1072                          * callout.  Don't reschedule.
 1073                          */
 1074                         CTR4(KTR_CALLOUT, "%s %p func %p arg %p",
 1075                             cancelled ? "cancelled" : "failed to cancel",
 1076                             c, c->c_func, c->c_arg);
 1077                         CC_UNLOCK(cc);
 1078                         return (cancelled);
 1079                 }
 1080 #ifdef SMP
 1081                 if (callout_migrating(c)) {
 1082                         /* 
 1083                          * This only occurs when a second callout_reset_sbt_on
 1084                          * is made after a previous one moved it into
 1085                          * deferred migration (below). Note we do *not* change
 1086                          * the prev_cpu even though the previous target may
 1087                          * be different.
 1088                          */
 1089                         cc_migration_cpu(cc, direct) = cpu;
 1090                         cc_migration_time(cc, direct) = to_sbt;
 1091                         cc_migration_prec(cc, direct) = precision;
 1092                         cc_migration_func(cc, direct) = ftn;
 1093                         cc_migration_arg(cc, direct) = arg;
 1094                         cancelled = 1;
 1095                         CC_UNLOCK(cc);
 1096                         return (cancelled);
 1097                 }
 1098 #endif
 1099         }
 1100         if (c->c_iflags & CALLOUT_PENDING) {
 1101                 if ((c->c_iflags & CALLOUT_PROCESSED) == 0) {
 1102                         if (cc_exec_next(cc) == c)
 1103                                 cc_exec_next(cc) = LIST_NEXT(c, c_links.le);
 1104                         LIST_REMOVE(c, c_links.le);
 1105                 } else {
 1106                         TAILQ_REMOVE(&cc->cc_expireq, c, c_links.tqe);
 1107                 }
 1108                 cancelled = 1;
 1109                 c->c_iflags &= ~ CALLOUT_PENDING;
 1110                 c->c_flags &= ~ CALLOUT_ACTIVE;
 1111         }
 1112 
 1113 #ifdef SMP
 1114         /*
 1115          * If the callout must migrate try to perform it immediately.
 1116          * If the callout is currently running, just defer the migration
 1117          * to a more appropriate moment.
 1118          */
 1119         if (c->c_cpu != cpu) {
 1120                 if (cc_exec_curr(cc, direct) == c) {
 1121                         /* 
 1122                          * Pending will have been removed since we are
 1123                          * actually executing the callout on another
 1124                          * CPU. That callout should be waiting on the
 1125                          * lock the caller holds. If we set both
 1126                          * active/and/pending after we return and the
 1127                          * lock on the executing callout proceeds, it
 1128                          * will then see pending is true and return.
 1129                          * At the return from the actual callout execution
 1130                          * the migration will occur in softclock_call_cc
 1131                          * and this new callout will be placed on the 
 1132                          * new CPU via a call to callout_cpu_switch() which
 1133                          * will get the lock on the right CPU followed
 1134                          * by a call callout_cc_add() which will add it there.
 1135                          * (see above in softclock_call_cc()).
 1136                          */
 1137                         cc_migration_cpu(cc, direct) = cpu;
 1138                         cc_migration_time(cc, direct) = to_sbt;
 1139                         cc_migration_prec(cc, direct) = precision;
 1140                         cc_migration_func(cc, direct) = ftn;
 1141                         cc_migration_arg(cc, direct) = arg;
 1142                         c->c_iflags |= (CALLOUT_DFRMIGRATION | CALLOUT_PENDING);
 1143                         c->c_flags |= CALLOUT_ACTIVE;
 1144                         CTR6(KTR_CALLOUT,
 1145                     "migration of %p func %p arg %p in %d.%08x to %u deferred",
 1146                             c, c->c_func, c->c_arg, (int)(to_sbt >> 32),
 1147                             (u_int)(to_sbt & 0xffffffff), cpu);
 1148                         CC_UNLOCK(cc);
 1149                         return (cancelled);
 1150                 }
 1151                 cc = callout_cpu_switch(c, cc, cpu);
 1152         }
 1153 #endif
 1154 
 1155         callout_cc_add(c, cc, to_sbt, precision, ftn, arg, cpu, flags);
 1156         CTR6(KTR_CALLOUT, "%sscheduled %p func %p arg %p in %d.%08x",
 1157             cancelled ? "re" : "", c, c->c_func, c->c_arg, (int)(to_sbt >> 32),
 1158             (u_int)(to_sbt & 0xffffffff));
 1159         CC_UNLOCK(cc);
 1160 
 1161         return (cancelled);
 1162 }
 1163 
 1164 /*
 1165  * Common idioms that can be optimized in the future.
 1166  */
 1167 int
 1168 callout_schedule_on(struct callout *c, int to_ticks, int cpu)
 1169 {
 1170         return callout_reset_on(c, to_ticks, c->c_func, c->c_arg, cpu);
 1171 }
 1172 
 1173 int
 1174 callout_schedule(struct callout *c, int to_ticks)
 1175 {
 1176         return callout_reset_on(c, to_ticks, c->c_func, c->c_arg, c->c_cpu);
 1177 }
 1178 
 1179 int
 1180 _callout_stop_safe(struct callout *c, int flags, callout_func_t *drain)
 1181 {
 1182         struct callout_cpu *cc, *old_cc;
 1183         struct lock_class *class;
 1184         int direct, sq_locked, use_lock;
 1185         int cancelled, not_on_a_list;
 1186 
 1187         if ((flags & CS_DRAIN) != 0)
 1188                 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, c->c_lock,
 1189                     "calling %s", __func__);
 1190 
 1191         KASSERT((flags & CS_DRAIN) == 0 || drain == NULL,
 1192             ("Cannot set drain callback and CS_DRAIN flag at the same time"));
 1193 
 1194         /*
 1195          * Some old subsystems don't hold Giant while running a callout_stop(),
 1196          * so just discard this check for the moment.
 1197          */
 1198         if ((flags & CS_DRAIN) == 0 && c->c_lock != NULL) {
 1199                 if (c->c_lock == &Giant.lock_object)
 1200                         use_lock = mtx_owned(&Giant);
 1201                 else {
 1202                         use_lock = 1;
 1203                         class = LOCK_CLASS(c->c_lock);
 1204                         class->lc_assert(c->c_lock, LA_XLOCKED);
 1205                 }
 1206         } else
 1207                 use_lock = 0;
 1208         if (c->c_iflags & CALLOUT_DIRECT) {
 1209                 direct = 1;
 1210         } else {
 1211                 direct = 0;
 1212         }
 1213         sq_locked = 0;
 1214         old_cc = NULL;
 1215 again:
 1216         cc = callout_lock(c);
 1217 
 1218         if ((c->c_iflags & (CALLOUT_DFRMIGRATION | CALLOUT_PENDING)) ==
 1219             (CALLOUT_DFRMIGRATION | CALLOUT_PENDING) &&
 1220             ((c->c_flags & CALLOUT_ACTIVE) == CALLOUT_ACTIVE)) {
 1221                 /*
 1222                  * Special case where this slipped in while we
 1223                  * were migrating *as* the callout is about to
 1224                  * execute. The caller probably holds the lock
 1225                  * the callout wants.
 1226                  *
 1227                  * Get rid of the migration first. Then set
 1228                  * the flag that tells this code *not* to
 1229                  * try to remove it from any lists (its not
 1230                  * on one yet). When the callout wheel runs,
 1231                  * it will ignore this callout.
 1232                  */
 1233                 c->c_iflags &= ~CALLOUT_PENDING;
 1234                 c->c_flags &= ~CALLOUT_ACTIVE;
 1235                 not_on_a_list = 1;
 1236         } else {
 1237                 not_on_a_list = 0;
 1238         }
 1239 
 1240         /*
 1241          * If the callout was migrating while the callout cpu lock was
 1242          * dropped,  just drop the sleepqueue lock and check the states
 1243          * again.
 1244          */
 1245         if (sq_locked != 0 && cc != old_cc) {
 1246 #ifdef SMP
 1247                 CC_UNLOCK(cc);
 1248                 sleepq_release(&cc_exec_waiting(old_cc, direct));
 1249                 sq_locked = 0;
 1250                 old_cc = NULL;
 1251                 goto again;
 1252 #else
 1253                 panic("migration should not happen");
 1254 #endif
 1255         }
 1256 
 1257         /*
 1258          * If the callout is running, try to stop it or drain it.
 1259          */
 1260         if (cc_exec_curr(cc, direct) == c) {
 1261                 /*
 1262                  * Succeed we to stop it or not, we must clear the
 1263                  * active flag - this is what API users expect.  If we're
 1264                  * draining and the callout is currently executing, first wait
 1265                  * until it finishes.
 1266                  */
 1267                 if ((flags & CS_DRAIN) == 0)
 1268                         c->c_flags &= ~CALLOUT_ACTIVE;
 1269 
 1270                 if ((flags & CS_DRAIN) != 0) {
 1271                         /*
 1272                          * The current callout is running (or just
 1273                          * about to run) and blocking is allowed, so
 1274                          * just wait for the current invocation to
 1275                          * finish.
 1276                          */
 1277                         if (cc_exec_curr(cc, direct) == c) {
 1278                                 /*
 1279                                  * Use direct calls to sleepqueue interface
 1280                                  * instead of cv/msleep in order to avoid
 1281                                  * a LOR between cc_lock and sleepqueue
 1282                                  * chain spinlocks.  This piece of code
 1283                                  * emulates a msleep_spin() call actually.
 1284                                  *
 1285                                  * If we already have the sleepqueue chain
 1286                                  * locked, then we can safely block.  If we
 1287                                  * don't already have it locked, however,
 1288                                  * we have to drop the cc_lock to lock
 1289                                  * it.  This opens several races, so we
 1290                                  * restart at the beginning once we have
 1291                                  * both locks.  If nothing has changed, then
 1292                                  * we will end up back here with sq_locked
 1293                                  * set.
 1294                                  */
 1295                                 if (!sq_locked) {
 1296                                         CC_UNLOCK(cc);
 1297                                         sleepq_lock(
 1298                                             &cc_exec_waiting(cc, direct));
 1299                                         sq_locked = 1;
 1300                                         old_cc = cc;
 1301                                         goto again;
 1302                                 }
 1303 
 1304                                 /*
 1305                                  * Migration could be cancelled here, but
 1306                                  * as long as it is still not sure when it
 1307                                  * will be packed up, just let softclock()
 1308                                  * take care of it.
 1309                                  */
 1310                                 cc_exec_waiting(cc, direct) = true;
 1311                                 DROP_GIANT();
 1312                                 CC_UNLOCK(cc);
 1313                                 sleepq_add(
 1314                                     &cc_exec_waiting(cc, direct),
 1315                                     &cc->cc_lock.lock_object, "codrain",
 1316                                     SLEEPQ_SLEEP, 0);
 1317                                 sleepq_wait(
 1318                                     &cc_exec_waiting(cc, direct),
 1319                                              0);
 1320                                 sq_locked = 0;
 1321                                 old_cc = NULL;
 1322 
 1323                                 /* Reacquire locks previously released. */
 1324                                 PICKUP_GIANT();
 1325                                 goto again;
 1326                         }
 1327                         c->c_flags &= ~CALLOUT_ACTIVE;
 1328                 } else if (use_lock &&
 1329                            !cc_exec_cancel(cc, direct) && (drain == NULL)) {
 1330                         
 1331                         /*
 1332                          * The current callout is waiting for its
 1333                          * lock which we hold.  Cancel the callout
 1334                          * and return.  After our caller drops the
 1335                          * lock, the callout will be skipped in
 1336                          * softclock(). This *only* works with a
 1337                          * callout_stop() *not* callout_drain() or
 1338                          * callout_async_drain().
 1339                          */
 1340                         cc_exec_cancel(cc, direct) = true;
 1341                         CTR3(KTR_CALLOUT, "cancelled %p func %p arg %p",
 1342                             c, c->c_func, c->c_arg);
 1343                         KASSERT(!cc_cce_migrating(cc, direct),
 1344                             ("callout wrongly scheduled for migration"));
 1345                         if (callout_migrating(c)) {
 1346                                 c->c_iflags &= ~CALLOUT_DFRMIGRATION;
 1347 #ifdef SMP
 1348                                 cc_migration_cpu(cc, direct) = CPUBLOCK;
 1349                                 cc_migration_time(cc, direct) = 0;
 1350                                 cc_migration_prec(cc, direct) = 0;
 1351                                 cc_migration_func(cc, direct) = NULL;
 1352                                 cc_migration_arg(cc, direct) = NULL;
 1353 #endif
 1354                         }
 1355                         CC_UNLOCK(cc);
 1356                         KASSERT(!sq_locked, ("sleepqueue chain locked"));
 1357                         return (1);
 1358                 } else if (callout_migrating(c)) {
 1359                         /*
 1360                          * The callout is currently being serviced
 1361                          * and the "next" callout is scheduled at
 1362                          * its completion with a migration. We remove
 1363                          * the migration flag so it *won't* get rescheduled,
 1364                          * but we can't stop the one thats running so
 1365                          * we return 0.
 1366                          */
 1367                         c->c_iflags &= ~CALLOUT_DFRMIGRATION;
 1368 #ifdef SMP
 1369                         /* 
 1370                          * We can't call cc_cce_cleanup here since
 1371                          * if we do it will remove .ce_curr and
 1372                          * its still running. This will prevent a
 1373                          * reschedule of the callout when the 
 1374                          * execution completes.
 1375                          */
 1376                         cc_migration_cpu(cc, direct) = CPUBLOCK;
 1377                         cc_migration_time(cc, direct) = 0;
 1378                         cc_migration_prec(cc, direct) = 0;
 1379                         cc_migration_func(cc, direct) = NULL;
 1380                         cc_migration_arg(cc, direct) = NULL;
 1381 #endif
 1382                         CTR3(KTR_CALLOUT, "postponing stop %p func %p arg %p",
 1383                             c, c->c_func, c->c_arg);
 1384                         if (drain) {
 1385                                 KASSERT(cc_exec_drain(cc, direct) == NULL,
 1386                                     ("callout drain function already set to %p",
 1387                                     cc_exec_drain(cc, direct)));
 1388                                 cc_exec_drain(cc, direct) = drain;
 1389                         }
 1390                         CC_UNLOCK(cc);
 1391                         return ((flags & CS_EXECUTING) != 0);
 1392                 } else {
 1393                         CTR3(KTR_CALLOUT, "failed to stop %p func %p arg %p",
 1394                             c, c->c_func, c->c_arg);
 1395                         if (drain) {
 1396                                 KASSERT(cc_exec_drain(cc, direct) == NULL,
 1397                                     ("callout drain function already set to %p",
 1398                                     cc_exec_drain(cc, direct)));
 1399                                 cc_exec_drain(cc, direct) = drain;
 1400                         }
 1401                 }
 1402                 KASSERT(!sq_locked, ("sleepqueue chain still locked"));
 1403                 cancelled = ((flags & CS_EXECUTING) != 0);
 1404         } else
 1405                 cancelled = 1;
 1406 
 1407         if (sq_locked)
 1408                 sleepq_release(&cc_exec_waiting(cc, direct));
 1409 
 1410         if ((c->c_iflags & CALLOUT_PENDING) == 0) {
 1411                 CTR3(KTR_CALLOUT, "failed to stop %p func %p arg %p",
 1412                     c, c->c_func, c->c_arg);
 1413                 /*
 1414                  * For not scheduled and not executing callout return
 1415                  * negative value.
 1416                  */
 1417                 if (cc_exec_curr(cc, direct) != c)
 1418                         cancelled = -1;
 1419                 CC_UNLOCK(cc);
 1420                 return (cancelled);
 1421         }
 1422 
 1423         c->c_iflags &= ~CALLOUT_PENDING;
 1424         c->c_flags &= ~CALLOUT_ACTIVE;
 1425 
 1426         CTR3(KTR_CALLOUT, "cancelled %p func %p arg %p",
 1427             c, c->c_func, c->c_arg);
 1428         if (not_on_a_list == 0) {
 1429                 if ((c->c_iflags & CALLOUT_PROCESSED) == 0) {
 1430                         if (cc_exec_next(cc) == c)
 1431                                 cc_exec_next(cc) = LIST_NEXT(c, c_links.le);
 1432                         LIST_REMOVE(c, c_links.le);
 1433                 } else {
 1434                         TAILQ_REMOVE(&cc->cc_expireq, c, c_links.tqe);
 1435                 }
 1436         }
 1437         callout_cc_del(c, cc);
 1438         CC_UNLOCK(cc);
 1439         return (cancelled);
 1440 }
 1441 
 1442 void
 1443 callout_init(struct callout *c, int mpsafe)
 1444 {
 1445         bzero(c, sizeof *c);
 1446         if (mpsafe) {
 1447                 c->c_lock = NULL;
 1448                 c->c_iflags = CALLOUT_RETURNUNLOCKED;
 1449         } else {
 1450                 c->c_lock = &Giant.lock_object;
 1451                 c->c_iflags = 0;
 1452         }
 1453         c->c_cpu = timeout_cpu;
 1454 }
 1455 
 1456 void
 1457 _callout_init_lock(struct callout *c, struct lock_object *lock, int flags)
 1458 {
 1459         bzero(c, sizeof *c);
 1460         c->c_lock = lock;
 1461         KASSERT((flags & ~(CALLOUT_RETURNUNLOCKED | CALLOUT_SHAREDLOCK)) == 0,
 1462             ("callout_init_lock: bad flags %d", flags));
 1463         KASSERT(lock != NULL || (flags & CALLOUT_RETURNUNLOCKED) == 0,
 1464             ("callout_init_lock: CALLOUT_RETURNUNLOCKED with no lock"));
 1465         KASSERT(lock == NULL || !(LOCK_CLASS(lock)->lc_flags &
 1466             (LC_SPINLOCK | LC_SLEEPABLE)), ("%s: invalid lock class",
 1467             __func__));
 1468         c->c_iflags = flags & (CALLOUT_RETURNUNLOCKED | CALLOUT_SHAREDLOCK);
 1469         c->c_cpu = timeout_cpu;
 1470 }
 1471 
 1472 #ifdef APM_FIXUP_CALLTODO
 1473 /* 
 1474  * Adjust the kernel calltodo timeout list.  This routine is used after 
 1475  * an APM resume to recalculate the calltodo timer list values with the 
 1476  * number of hz's we have been sleeping.  The next hardclock() will detect 
 1477  * that there are fired timers and run softclock() to execute them.
 1478  *
 1479  * Please note, I have not done an exhaustive analysis of what code this
 1480  * might break.  I am motivated to have my select()'s and alarm()'s that
 1481  * have expired during suspend firing upon resume so that the applications
 1482  * which set the timer can do the maintanence the timer was for as close
 1483  * as possible to the originally intended time.  Testing this code for a 
 1484  * week showed that resuming from a suspend resulted in 22 to 25 timers 
 1485  * firing, which seemed independent on whether the suspend was 2 hours or
 1486  * 2 days.  Your milage may vary.   - Ken Key <key@cs.utk.edu>
 1487  */
 1488 void
 1489 adjust_timeout_calltodo(struct timeval *time_change)
 1490 {
 1491         struct callout *p;
 1492         unsigned long delta_ticks;
 1493 
 1494         /* 
 1495          * How many ticks were we asleep?
 1496          * (stolen from tvtohz()).
 1497          */
 1498 
 1499         /* Don't do anything */
 1500         if (time_change->tv_sec < 0)
 1501                 return;
 1502         else if (time_change->tv_sec <= LONG_MAX / 1000000)
 1503                 delta_ticks = howmany(time_change->tv_sec * 1000000 +
 1504                     time_change->tv_usec, tick) + 1;
 1505         else if (time_change->tv_sec <= LONG_MAX / hz)
 1506                 delta_ticks = time_change->tv_sec * hz +
 1507                     howmany(time_change->tv_usec, tick) + 1;
 1508         else
 1509                 delta_ticks = LONG_MAX;
 1510 
 1511         if (delta_ticks > INT_MAX)
 1512                 delta_ticks = INT_MAX;
 1513 
 1514         /* 
 1515          * Now rip through the timer calltodo list looking for timers
 1516          * to expire.
 1517          */
 1518 
 1519         /* don't collide with softclock() */
 1520         CC_LOCK(cc);
 1521         for (p = calltodo.c_next; p != NULL; p = p->c_next) {
 1522                 p->c_time -= delta_ticks;
 1523 
 1524                 /* Break if the timer had more time on it than delta_ticks */
 1525                 if (p->c_time > 0)
 1526                         break;
 1527 
 1528                 /* take back the ticks the timer didn't use (p->c_time <= 0) */
 1529                 delta_ticks = -p->c_time;
 1530         }
 1531         CC_UNLOCK(cc);
 1532 
 1533         return;
 1534 }
 1535 #endif /* APM_FIXUP_CALLTODO */
 1536 
 1537 static int
 1538 flssbt(sbintime_t sbt)
 1539 {
 1540 
 1541         sbt += (uint64_t)sbt >> 1;
 1542         if (sizeof(long) >= sizeof(sbintime_t))
 1543                 return (flsl(sbt));
 1544         if (sbt >= SBT_1S)
 1545                 return (flsl(((uint64_t)sbt) >> 32) + 32);
 1546         return (flsl(sbt));
 1547 }
 1548 
 1549 /*
 1550  * Dump immediate statistic snapshot of the scheduled callouts.
 1551  */
 1552 static int
 1553 sysctl_kern_callout_stat(SYSCTL_HANDLER_ARGS)
 1554 {
 1555         struct callout *tmp;
 1556         struct callout_cpu *cc;
 1557         struct callout_list *sc;
 1558         sbintime_t maxpr, maxt, medpr, medt, now, spr, st, t;
 1559         int ct[64], cpr[64], ccpbk[32];
 1560         int error, val, i, count, tcum, pcum, maxc, c, medc;
 1561 #ifdef SMP
 1562         int cpu;
 1563 #endif
 1564 
 1565         val = 0;
 1566         error = sysctl_handle_int(oidp, &val, 0, req);
 1567         if (error != 0 || req->newptr == NULL)
 1568                 return (error);
 1569         count = maxc = 0;
 1570         st = spr = maxt = maxpr = 0;
 1571         bzero(ccpbk, sizeof(ccpbk));
 1572         bzero(ct, sizeof(ct));
 1573         bzero(cpr, sizeof(cpr));
 1574         now = sbinuptime();
 1575 #ifdef SMP
 1576         CPU_FOREACH(cpu) {
 1577                 cc = CC_CPU(cpu);
 1578 #else
 1579                 cc = CC_CPU(timeout_cpu);
 1580 #endif
 1581                 CC_LOCK(cc);
 1582                 for (i = 0; i < callwheelsize; i++) {
 1583                         sc = &cc->cc_callwheel[i];
 1584                         c = 0;
 1585                         LIST_FOREACH(tmp, sc, c_links.le) {
 1586                                 c++;
 1587                                 t = tmp->c_time - now;
 1588                                 if (t < 0)
 1589                                         t = 0;
 1590                                 st += t / SBT_1US;
 1591                                 spr += tmp->c_precision / SBT_1US;
 1592                                 if (t > maxt)
 1593                                         maxt = t;
 1594                                 if (tmp->c_precision > maxpr)
 1595                                         maxpr = tmp->c_precision;
 1596                                 ct[flssbt(t)]++;
 1597                                 cpr[flssbt(tmp->c_precision)]++;
 1598                         }
 1599                         if (c > maxc)
 1600                                 maxc = c;
 1601                         ccpbk[fls(c + c / 2)]++;
 1602                         count += c;
 1603                 }
 1604                 CC_UNLOCK(cc);
 1605 #ifdef SMP
 1606         }
 1607 #endif
 1608 
 1609         for (i = 0, tcum = 0; i < 64 && tcum < count / 2; i++)
 1610                 tcum += ct[i];
 1611         medt = (i >= 2) ? (((sbintime_t)1) << (i - 2)) : 0;
 1612         for (i = 0, pcum = 0; i < 64 && pcum < count / 2; i++)
 1613                 pcum += cpr[i];
 1614         medpr = (i >= 2) ? (((sbintime_t)1) << (i - 2)) : 0;
 1615         for (i = 0, c = 0; i < 32 && c < count / 2; i++)
 1616                 c += ccpbk[i];
 1617         medc = (i >= 2) ? (1 << (i - 2)) : 0;
 1618 
 1619         printf("Scheduled callouts statistic snapshot:\n");
 1620         printf("  Callouts: %6d  Buckets: %6d*%-3d  Bucket size: 0.%06ds\n",
 1621             count, callwheelsize, mp_ncpus, 1000000 >> CC_HASH_SHIFT);
 1622         printf("  C/Bk: med %5d         avg %6d.%06jd  max %6d\n",
 1623             medc,
 1624             count / callwheelsize / mp_ncpus,
 1625             (uint64_t)count * 1000000 / callwheelsize / mp_ncpus % 1000000,
 1626             maxc);
 1627         printf("  Time: med %5jd.%06jds avg %6jd.%06jds max %6jd.%06jds\n",
 1628             medt / SBT_1S, (medt & 0xffffffff) * 1000000 >> 32,
 1629             (st / count) / 1000000, (st / count) % 1000000,
 1630             maxt / SBT_1S, (maxt & 0xffffffff) * 1000000 >> 32);
 1631         printf("  Prec: med %5jd.%06jds avg %6jd.%06jds max %6jd.%06jds\n",
 1632             medpr / SBT_1S, (medpr & 0xffffffff) * 1000000 >> 32,
 1633             (spr / count) / 1000000, (spr / count) % 1000000,
 1634             maxpr / SBT_1S, (maxpr & 0xffffffff) * 1000000 >> 32);
 1635         printf("  Distribution:       \tbuckets\t   time\t   tcum\t"
 1636             "   prec\t   pcum\n");
 1637         for (i = 0, tcum = pcum = 0; i < 64; i++) {
 1638                 if (ct[i] == 0 && cpr[i] == 0)
 1639                         continue;
 1640                 t = (i != 0) ? (((sbintime_t)1) << (i - 1)) : 0;
 1641                 tcum += ct[i];
 1642                 pcum += cpr[i];
 1643                 printf("  %10jd.%06jds\t 2**%d\t%7d\t%7d\t%7d\t%7d\n",
 1644                     t / SBT_1S, (t & 0xffffffff) * 1000000 >> 32,
 1645                     i - 1 - (32 - CC_HASH_SHIFT),
 1646                     ct[i], tcum, cpr[i], pcum);
 1647         }
 1648         return (error);
 1649 }
 1650 SYSCTL_PROC(_kern, OID_AUTO, callout_stat,
 1651     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE,
 1652     0, 0, sysctl_kern_callout_stat, "I",
 1653     "Dump immediate statistic snapshot of the scheduled callouts");
 1654 
 1655 #ifdef DDB
 1656 static void
 1657 _show_callout(struct callout *c)
 1658 {
 1659 
 1660         db_printf("callout %p\n", c);
 1661 #define C_DB_PRINTF(f, e)       db_printf("   %s = " f "\n", #e, c->e);
 1662         db_printf("   &c_links = %p\n", &(c->c_links));
 1663         C_DB_PRINTF("%" PRId64, c_time);
 1664         C_DB_PRINTF("%" PRId64, c_precision);
 1665         C_DB_PRINTF("%p",       c_arg);
 1666         C_DB_PRINTF("%p",       c_func);
 1667         C_DB_PRINTF("%p",       c_lock);
 1668         C_DB_PRINTF("%#x",      c_flags);
 1669         C_DB_PRINTF("%#x",      c_iflags);
 1670         C_DB_PRINTF("%d",       c_cpu);
 1671 #undef  C_DB_PRINTF
 1672 }
 1673 
 1674 DB_SHOW_COMMAND(callout, db_show_callout)
 1675 {
 1676 
 1677         if (!have_addr) {
 1678                 db_printf("usage: show callout <struct callout *>\n");
 1679                 return;
 1680         }
 1681 
 1682         _show_callout((struct callout *)addr);
 1683 }
 1684 #endif /* DDB */

Cache object: 30fa4cf304aad77ee599a27fad348a08


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.