The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_timeout.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * SPDX-License-Identifier: BSD-3-Clause
    3  *
    4  * Copyright (c) 1982, 1986, 1991, 1993
    5  *      The Regents of the University of California.  All rights reserved.
    6  * (c) UNIX System Laboratories, Inc.
    7  * All or some portions of this file are derived from material licensed
    8  * to the University of California by American Telephone and Telegraph
    9  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
   10  * the permission of UNIX System Laboratories, Inc.
   11  *
   12  * Redistribution and use in source and binary forms, with or without
   13  * modification, are permitted provided that the following conditions
   14  * are met:
   15  * 1. Redistributions of source code must retain the above copyright
   16  *    notice, this list of conditions and the following disclaimer.
   17  * 2. Redistributions in binary form must reproduce the above copyright
   18  *    notice, this list of conditions and the following disclaimer in the
   19  *    documentation and/or other materials provided with the distribution.
   20  * 3. Neither the name of the University nor the names of its contributors
   21  *    may be used to endorse or promote products derived from this software
   22  *    without specific prior written permission.
   23  *
   24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
   25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   34  * SUCH DAMAGE.
   35  *
   36  *      From: @(#)kern_clock.c  8.5 (Berkeley) 1/21/94
   37  */
   38 
   39 #include <sys/cdefs.h>
   40 __FBSDID("$FreeBSD$");
   41 
   42 #include "opt_callout_profiling.h"
   43 #include "opt_ddb.h"
   44 #include "opt_rss.h"
   45 
   46 #include <sys/param.h>
   47 #include <sys/systm.h>
   48 #include <sys/bus.h>
   49 #include <sys/callout.h>
   50 #include <sys/domainset.h>
   51 #include <sys/file.h>
   52 #include <sys/interrupt.h>
   53 #include <sys/kernel.h>
   54 #include <sys/ktr.h>
   55 #include <sys/lock.h>
   56 #include <sys/malloc.h>
   57 #include <sys/mutex.h>
   58 #include <sys/proc.h>
   59 #include <sys/sdt.h>
   60 #include <sys/sleepqueue.h>
   61 #include <sys/sysctl.h>
   62 #include <sys/smp.h>
   63 
   64 #ifdef DDB
   65 #include <ddb/ddb.h>
   66 #include <ddb/db_sym.h>
   67 #include <machine/_inttypes.h>
   68 #endif
   69 
   70 #ifdef SMP
   71 #include <machine/cpu.h>
   72 #endif
   73 
   74 DPCPU_DECLARE(sbintime_t, hardclocktime);
   75 
   76 SDT_PROVIDER_DEFINE(callout_execute);
   77 SDT_PROBE_DEFINE1(callout_execute, , , callout__start, "struct callout *");
   78 SDT_PROBE_DEFINE1(callout_execute, , , callout__end, "struct callout *");
   79 
   80 #ifdef CALLOUT_PROFILING
   81 static int avg_depth;
   82 SYSCTL_INT(_debug, OID_AUTO, to_avg_depth, CTLFLAG_RD, &avg_depth, 0,
   83     "Average number of items examined per softclock call. Units = 1/1000");
   84 static int avg_gcalls;
   85 SYSCTL_INT(_debug, OID_AUTO, to_avg_gcalls, CTLFLAG_RD, &avg_gcalls, 0,
   86     "Average number of Giant callouts made per softclock call. Units = 1/1000");
   87 static int avg_lockcalls;
   88 SYSCTL_INT(_debug, OID_AUTO, to_avg_lockcalls, CTLFLAG_RD, &avg_lockcalls, 0,
   89     "Average number of lock callouts made per softclock call. Units = 1/1000");
   90 static int avg_mpcalls;
   91 SYSCTL_INT(_debug, OID_AUTO, to_avg_mpcalls, CTLFLAG_RD, &avg_mpcalls, 0,
   92     "Average number of MP callouts made per softclock call. Units = 1/1000");
   93 static int avg_depth_dir;
   94 SYSCTL_INT(_debug, OID_AUTO, to_avg_depth_dir, CTLFLAG_RD, &avg_depth_dir, 0,
   95     "Average number of direct callouts examined per callout_process call. "
   96     "Units = 1/1000");
   97 static int avg_lockcalls_dir;
   98 SYSCTL_INT(_debug, OID_AUTO, to_avg_lockcalls_dir, CTLFLAG_RD,
   99     &avg_lockcalls_dir, 0, "Average number of lock direct callouts made per "
  100     "callout_process call. Units = 1/1000");
  101 static int avg_mpcalls_dir;
  102 SYSCTL_INT(_debug, OID_AUTO, to_avg_mpcalls_dir, CTLFLAG_RD, &avg_mpcalls_dir,
  103     0, "Average number of MP direct callouts made per callout_process call. "
  104     "Units = 1/1000");
  105 #endif
  106 
  107 static int ncallout;
  108 SYSCTL_INT(_kern, OID_AUTO, ncallout, CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &ncallout, 0,
  109     "Number of entries in callwheel and size of timeout() preallocation");
  110 
  111 #ifdef  RSS
  112 static int pin_default_swi = 1;
  113 static int pin_pcpu_swi = 1;
  114 #else
  115 static int pin_default_swi = 0;
  116 static int pin_pcpu_swi = 0;
  117 #endif
  118 
  119 SYSCTL_INT(_kern, OID_AUTO, pin_default_swi, CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &pin_default_swi,
  120     0, "Pin the default (non-per-cpu) swi (shared with PCPU 0 swi)");
  121 SYSCTL_INT(_kern, OID_AUTO, pin_pcpu_swi, CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &pin_pcpu_swi,
  122     0, "Pin the per-CPU swis (except PCPU 0, which is also default)");
  123 
  124 /*
  125  * TODO:
  126  *      allocate more timeout table slots when table overflows.
  127  */
  128 static u_int __read_mostly callwheelsize;
  129 static u_int __read_mostly callwheelmask;
  130 
  131 /*
  132  * The callout cpu exec entities represent informations necessary for
  133  * describing the state of callouts currently running on the CPU and the ones
  134  * necessary for migrating callouts to the new callout cpu. In particular,
  135  * the first entry of the array cc_exec_entity holds informations for callout
  136  * running in SWI thread context, while the second one holds informations
  137  * for callout running directly from hardware interrupt context.
  138  * The cached informations are very important for deferring migration when
  139  * the migrating callout is already running.
  140  */
  141 struct cc_exec {
  142         struct callout          *cc_curr;
  143         callout_func_t          *cc_drain;
  144         void                    *cc_last_func;
  145         void                    *cc_last_arg;
  146 #ifdef SMP
  147         callout_func_t          *ce_migration_func;
  148         void                    *ce_migration_arg;
  149         sbintime_t              ce_migration_time;
  150         sbintime_t              ce_migration_prec;
  151         int                     ce_migration_cpu;
  152 #endif
  153         bool                    cc_cancel;
  154         bool                    cc_waiting;
  155 };
  156 
  157 /*
  158  * There is one struct callout_cpu per cpu, holding all relevant
  159  * state for the callout processing thread on the individual CPU.
  160  */
  161 struct callout_cpu {
  162         struct mtx_padalign     cc_lock;
  163         struct cc_exec          cc_exec_entity[2];
  164         struct callout          *cc_next;
  165         struct callout_list     *cc_callwheel;
  166         struct callout_tailq    cc_expireq;
  167         sbintime_t              cc_firstevent;
  168         sbintime_t              cc_lastscan;
  169         void                    *cc_cookie;
  170         u_int                   cc_bucket;
  171         u_int                   cc_inited;
  172 #ifdef KTR
  173         char                    cc_ktr_event_name[20];
  174 #endif
  175 };
  176 
  177 #define callout_migrating(c)    ((c)->c_iflags & CALLOUT_DFRMIGRATION)
  178 
  179 #define cc_exec_curr(cc, dir)           cc->cc_exec_entity[dir].cc_curr
  180 #define cc_exec_last_func(cc, dir)      cc->cc_exec_entity[dir].cc_last_func
  181 #define cc_exec_last_arg(cc, dir)       cc->cc_exec_entity[dir].cc_last_arg
  182 #define cc_exec_drain(cc, dir)          cc->cc_exec_entity[dir].cc_drain
  183 #define cc_exec_next(cc)                cc->cc_next
  184 #define cc_exec_cancel(cc, dir)         cc->cc_exec_entity[dir].cc_cancel
  185 #define cc_exec_waiting(cc, dir)        cc->cc_exec_entity[dir].cc_waiting
  186 #ifdef SMP
  187 #define cc_migration_func(cc, dir)      cc->cc_exec_entity[dir].ce_migration_func
  188 #define cc_migration_arg(cc, dir)       cc->cc_exec_entity[dir].ce_migration_arg
  189 #define cc_migration_cpu(cc, dir)       cc->cc_exec_entity[dir].ce_migration_cpu
  190 #define cc_migration_time(cc, dir)      cc->cc_exec_entity[dir].ce_migration_time
  191 #define cc_migration_prec(cc, dir)      cc->cc_exec_entity[dir].ce_migration_prec
  192 
  193 static struct callout_cpu cc_cpu[MAXCPU];
  194 #define CPUBLOCK        MAXCPU
  195 #define CC_CPU(cpu)     (&cc_cpu[(cpu)])
  196 #define CC_SELF()       CC_CPU(PCPU_GET(cpuid))
  197 #else
  198 static struct callout_cpu cc_cpu;
  199 #define CC_CPU(cpu)     (&cc_cpu)
  200 #define CC_SELF()       (&cc_cpu)
  201 #endif
  202 #define CC_LOCK(cc)     mtx_lock_spin(&(cc)->cc_lock)
  203 #define CC_UNLOCK(cc)   mtx_unlock_spin(&(cc)->cc_lock)
  204 #define CC_LOCK_ASSERT(cc)      mtx_assert(&(cc)->cc_lock, MA_OWNED)
  205 
  206 static int __read_mostly cc_default_cpu;
  207 
  208 static void     callout_cpu_init(struct callout_cpu *cc, int cpu);
  209 static void     softclock_call_cc(struct callout *c, struct callout_cpu *cc,
  210 #ifdef CALLOUT_PROFILING
  211                     int *mpcalls, int *lockcalls, int *gcalls,
  212 #endif
  213                     int direct);
  214 
  215 static MALLOC_DEFINE(M_CALLOUT, "callout", "Callout datastructures");
  216 
  217 /**
  218  * Locked by cc_lock:
  219  *   cc_curr         - If a callout is in progress, it is cc_curr.
  220  *                     If cc_curr is non-NULL, threads waiting in
  221  *                     callout_drain() will be woken up as soon as the
  222  *                     relevant callout completes.
  223  *   cc_cancel       - Changing to 1 with both callout_lock and cc_lock held
  224  *                     guarantees that the current callout will not run.
  225  *                     The softclock() function sets this to 0 before it
  226  *                     drops callout_lock to acquire c_lock, and it calls
  227  *                     the handler only if curr_cancelled is still 0 after
  228  *                     cc_lock is successfully acquired.
  229  *   cc_waiting      - If a thread is waiting in callout_drain(), then
  230  *                     callout_wait is nonzero.  Set only when
  231  *                     cc_curr is non-NULL.
  232  */
  233 
  234 /*
  235  * Resets the execution entity tied to a specific callout cpu.
  236  */
  237 static void
  238 cc_cce_cleanup(struct callout_cpu *cc, int direct)
  239 {
  240 
  241         cc_exec_curr(cc, direct) = NULL;
  242         cc_exec_cancel(cc, direct) = false;
  243         cc_exec_waiting(cc, direct) = false;
  244 #ifdef SMP
  245         cc_migration_cpu(cc, direct) = CPUBLOCK;
  246         cc_migration_time(cc, direct) = 0;
  247         cc_migration_prec(cc, direct) = 0;
  248         cc_migration_func(cc, direct) = NULL;
  249         cc_migration_arg(cc, direct) = NULL;
  250 #endif
  251 }
  252 
  253 /*
  254  * Checks if migration is requested by a specific callout cpu.
  255  */
  256 static int
  257 cc_cce_migrating(struct callout_cpu *cc, int direct)
  258 {
  259 
  260 #ifdef SMP
  261         return (cc_migration_cpu(cc, direct) != CPUBLOCK);
  262 #else
  263         return (0);
  264 #endif
  265 }
  266 
  267 /*
  268  * Kernel low level callwheel initialization
  269  * called on the BSP during kernel startup.
  270  */
  271 static void
  272 callout_callwheel_init(void *dummy)
  273 {
  274         struct callout_cpu *cc;
  275         int cpu;
  276 
  277         /*
  278          * Calculate the size of the callout wheel and the preallocated
  279          * timeout() structures.
  280          * XXX: Clip callout to result of previous function of maxusers
  281          * maximum 384.  This is still huge, but acceptable.
  282          */
  283         ncallout = imin(16 + maxproc + maxfiles, 18508);
  284         TUNABLE_INT_FETCH("kern.ncallout", &ncallout);
  285 
  286         /*
  287          * Calculate callout wheel size, should be next power of two higher
  288          * than 'ncallout'.
  289          */
  290         callwheelsize = 1 << fls(ncallout);
  291         callwheelmask = callwheelsize - 1;
  292 
  293         /*
  294          * Fetch whether we're pinning the swi's or not.
  295          */
  296         TUNABLE_INT_FETCH("kern.pin_default_swi", &pin_default_swi);
  297         TUNABLE_INT_FETCH("kern.pin_pcpu_swi", &pin_pcpu_swi);
  298 
  299         /*
  300          * Initialize callout wheels.  The software interrupt threads
  301          * are created later.
  302          */
  303         cc_default_cpu = PCPU_GET(cpuid);
  304         CPU_FOREACH(cpu) {
  305                 cc = CC_CPU(cpu);
  306                 callout_cpu_init(cc, cpu);
  307         }
  308 }
  309 SYSINIT(callwheel_init, SI_SUB_CPU, SI_ORDER_ANY, callout_callwheel_init, NULL);
  310 
  311 /*
  312  * Initialize the per-cpu callout structures.
  313  */
  314 static void
  315 callout_cpu_init(struct callout_cpu *cc, int cpu)
  316 {
  317         int i;
  318 
  319         mtx_init(&cc->cc_lock, "callout", NULL, MTX_SPIN | MTX_RECURSE);
  320         cc->cc_inited = 1;
  321         cc->cc_callwheel = malloc_domainset(sizeof(struct callout_list) *
  322             callwheelsize, M_CALLOUT,
  323             DOMAINSET_PREF(pcpu_find(cpu)->pc_domain), M_WAITOK);
  324         for (i = 0; i < callwheelsize; i++)
  325                 LIST_INIT(&cc->cc_callwheel[i]);
  326         TAILQ_INIT(&cc->cc_expireq);
  327         cc->cc_firstevent = SBT_MAX;
  328         for (i = 0; i < 2; i++)
  329                 cc_cce_cleanup(cc, i);
  330 #ifdef KTR
  331         snprintf(cc->cc_ktr_event_name, sizeof(cc->cc_ktr_event_name),
  332             "callwheel cpu %d", cpu);
  333 #endif
  334 }
  335 
  336 #ifdef SMP
  337 /*
  338  * Switches the cpu tied to a specific callout.
  339  * The function expects a locked incoming callout cpu and returns with
  340  * locked outcoming callout cpu.
  341  */
  342 static struct callout_cpu *
  343 callout_cpu_switch(struct callout *c, struct callout_cpu *cc, int new_cpu)
  344 {
  345         struct callout_cpu *new_cc;
  346 
  347         MPASS(c != NULL && cc != NULL);
  348         CC_LOCK_ASSERT(cc);
  349 
  350         /*
  351          * Avoid interrupts and preemption firing after the callout cpu
  352          * is blocked in order to avoid deadlocks as the new thread
  353          * may be willing to acquire the callout cpu lock.
  354          */
  355         c->c_cpu = CPUBLOCK;
  356         spinlock_enter();
  357         CC_UNLOCK(cc);
  358         new_cc = CC_CPU(new_cpu);
  359         CC_LOCK(new_cc);
  360         spinlock_exit();
  361         c->c_cpu = new_cpu;
  362         return (new_cc);
  363 }
  364 #endif
  365 
  366 /*
  367  * Start softclock threads.
  368  */
  369 static void
  370 start_softclock(void *dummy)
  371 {
  372         struct callout_cpu *cc;
  373         char name[MAXCOMLEN];
  374         int cpu;
  375         bool pin_swi;
  376         struct intr_event *ie;
  377 
  378         CPU_FOREACH(cpu) {
  379                 cc = CC_CPU(cpu);
  380                 snprintf(name, sizeof(name), "clock (%d)", cpu);
  381                 ie = NULL;
  382                 if (swi_add(&ie, name, softclock, cc, SWI_CLOCK,
  383                     INTR_MPSAFE, &cc->cc_cookie))
  384                         panic("died while creating standard software ithreads");
  385                 if (cpu == cc_default_cpu)
  386                         pin_swi = pin_default_swi;
  387                 else
  388                         pin_swi = pin_pcpu_swi;
  389                 if (pin_swi && (intr_event_bind(ie, cpu) != 0)) {
  390                         printf("%s: %s clock couldn't be pinned to cpu %d\n",
  391                             __func__,
  392                             cpu == cc_default_cpu ? "default" : "per-cpu",
  393                             cpu);
  394                 }
  395         }
  396 }
  397 SYSINIT(start_softclock, SI_SUB_SOFTINTR, SI_ORDER_FIRST, start_softclock, NULL);
  398 
  399 #define CC_HASH_SHIFT   8
  400 
  401 static inline u_int
  402 callout_hash(sbintime_t sbt)
  403 {
  404 
  405         return (sbt >> (32 - CC_HASH_SHIFT));
  406 }
  407 
  408 static inline u_int
  409 callout_get_bucket(sbintime_t sbt)
  410 {
  411 
  412         return (callout_hash(sbt) & callwheelmask);
  413 }
  414 
  415 void
  416 callout_process(sbintime_t now)
  417 {
  418         struct callout *tmp, *tmpn;
  419         struct callout_cpu *cc;
  420         struct callout_list *sc;
  421         sbintime_t first, last, max, tmp_max;
  422         uint32_t lookahead;
  423         u_int firstb, lastb, nowb;
  424 #ifdef CALLOUT_PROFILING
  425         int depth_dir = 0, mpcalls_dir = 0, lockcalls_dir = 0;
  426 #endif
  427 
  428         cc = CC_SELF();
  429         mtx_lock_spin_flags(&cc->cc_lock, MTX_QUIET);
  430 
  431         /* Compute the buckets of the last scan and present times. */
  432         firstb = callout_hash(cc->cc_lastscan);
  433         cc->cc_lastscan = now;
  434         nowb = callout_hash(now);
  435 
  436         /* Compute the last bucket and minimum time of the bucket after it. */
  437         if (nowb == firstb)
  438                 lookahead = (SBT_1S / 16);
  439         else if (nowb - firstb == 1)
  440                 lookahead = (SBT_1S / 8);
  441         else
  442                 lookahead = (SBT_1S / 2);
  443         first = last = now;
  444         first += (lookahead / 2);
  445         last += lookahead;
  446         last &= (0xffffffffffffffffLLU << (32 - CC_HASH_SHIFT));
  447         lastb = callout_hash(last) - 1;
  448         max = last;
  449 
  450         /*
  451          * Check if we wrapped around the entire wheel from the last scan.
  452          * In case, we need to scan entirely the wheel for pending callouts.
  453          */
  454         if (lastb - firstb >= callwheelsize) {
  455                 lastb = firstb + callwheelsize - 1;
  456                 if (nowb - firstb >= callwheelsize)
  457                         nowb = lastb;
  458         }
  459 
  460         /* Iterate callwheel from firstb to nowb and then up to lastb. */
  461         do {
  462                 sc = &cc->cc_callwheel[firstb & callwheelmask];
  463                 tmp = LIST_FIRST(sc);
  464                 while (tmp != NULL) {
  465                         /* Run the callout if present time within allowed. */
  466                         if (tmp->c_time <= now) {
  467                                 /*
  468                                  * Consumer told us the callout may be run
  469                                  * directly from hardware interrupt context.
  470                                  */
  471                                 if (tmp->c_iflags & CALLOUT_DIRECT) {
  472 #ifdef CALLOUT_PROFILING
  473                                         ++depth_dir;
  474 #endif
  475                                         cc_exec_next(cc) =
  476                                             LIST_NEXT(tmp, c_links.le);
  477                                         cc->cc_bucket = firstb & callwheelmask;
  478                                         LIST_REMOVE(tmp, c_links.le);
  479                                         softclock_call_cc(tmp, cc,
  480 #ifdef CALLOUT_PROFILING
  481                                             &mpcalls_dir, &lockcalls_dir, NULL,
  482 #endif
  483                                             1);
  484                                         tmp = cc_exec_next(cc);
  485                                         cc_exec_next(cc) = NULL;
  486                                 } else {
  487                                         tmpn = LIST_NEXT(tmp, c_links.le);
  488                                         LIST_REMOVE(tmp, c_links.le);
  489                                         TAILQ_INSERT_TAIL(&cc->cc_expireq,
  490                                             tmp, c_links.tqe);
  491                                         tmp->c_iflags |= CALLOUT_PROCESSED;
  492                                         tmp = tmpn;
  493                                 }
  494                                 continue;
  495                         }
  496                         /* Skip events from distant future. */
  497                         if (tmp->c_time >= max)
  498                                 goto next;
  499                         /*
  500                          * Event minimal time is bigger than present maximal
  501                          * time, so it cannot be aggregated.
  502                          */
  503                         if (tmp->c_time > last) {
  504                                 lastb = nowb;
  505                                 goto next;
  506                         }
  507                         /* Update first and last time, respecting this event. */
  508                         if (tmp->c_time < first)
  509                                 first = tmp->c_time;
  510                         tmp_max = tmp->c_time + tmp->c_precision;
  511                         if (tmp_max < last)
  512                                 last = tmp_max;
  513 next:
  514                         tmp = LIST_NEXT(tmp, c_links.le);
  515                 }
  516                 /* Proceed with the next bucket. */
  517                 firstb++;
  518                 /*
  519                  * Stop if we looked after present time and found
  520                  * some event we can't execute at now.
  521                  * Stop if we looked far enough into the future.
  522                  */
  523         } while (((int)(firstb - lastb)) <= 0);
  524         cc->cc_firstevent = last;
  525         cpu_new_callout(curcpu, last, first);
  526 
  527 #ifdef CALLOUT_PROFILING
  528         avg_depth_dir += (depth_dir * 1000 - avg_depth_dir) >> 8;
  529         avg_mpcalls_dir += (mpcalls_dir * 1000 - avg_mpcalls_dir) >> 8;
  530         avg_lockcalls_dir += (lockcalls_dir * 1000 - avg_lockcalls_dir) >> 8;
  531 #endif
  532         mtx_unlock_spin_flags(&cc->cc_lock, MTX_QUIET);
  533         /*
  534          * swi_sched acquires the thread lock, so we don't want to call it
  535          * with cc_lock held; incorrect locking order.
  536          */
  537         if (!TAILQ_EMPTY(&cc->cc_expireq))
  538                 swi_sched(cc->cc_cookie, 0);
  539 }
  540 
  541 static struct callout_cpu *
  542 callout_lock(struct callout *c)
  543 {
  544         struct callout_cpu *cc;
  545         int cpu;
  546 
  547         for (;;) {
  548                 cpu = c->c_cpu;
  549 #ifdef SMP
  550                 if (cpu == CPUBLOCK) {
  551                         while (c->c_cpu == CPUBLOCK)
  552                                 cpu_spinwait();
  553                         continue;
  554                 }
  555 #endif
  556                 cc = CC_CPU(cpu);
  557                 CC_LOCK(cc);
  558                 if (cpu == c->c_cpu)
  559                         break;
  560                 CC_UNLOCK(cc);
  561         }
  562         return (cc);
  563 }
  564 
  565 static void
  566 callout_cc_add(struct callout *c, struct callout_cpu *cc,
  567     sbintime_t sbt, sbintime_t precision, void (*func)(void *),
  568     void *arg, int cpu, int flags)
  569 {
  570         int bucket;
  571 
  572         CC_LOCK_ASSERT(cc);
  573         if (sbt < cc->cc_lastscan)
  574                 sbt = cc->cc_lastscan;
  575         c->c_arg = arg;
  576         c->c_iflags |= CALLOUT_PENDING;
  577         c->c_iflags &= ~CALLOUT_PROCESSED;
  578         c->c_flags |= CALLOUT_ACTIVE;
  579         if (flags & C_DIRECT_EXEC)
  580                 c->c_iflags |= CALLOUT_DIRECT;
  581         c->c_func = func;
  582         c->c_time = sbt;
  583         c->c_precision = precision;
  584         bucket = callout_get_bucket(c->c_time);
  585         CTR3(KTR_CALLOUT, "precision set for %p: %d.%08x",
  586             c, (int)(c->c_precision >> 32),
  587             (u_int)(c->c_precision & 0xffffffff));
  588         LIST_INSERT_HEAD(&cc->cc_callwheel[bucket], c, c_links.le);
  589         if (cc->cc_bucket == bucket)
  590                 cc_exec_next(cc) = c;
  591 
  592         /*
  593          * Inform the eventtimers(4) subsystem there's a new callout
  594          * that has been inserted, but only if really required.
  595          */
  596         if (SBT_MAX - c->c_time < c->c_precision)
  597                 c->c_precision = SBT_MAX - c->c_time;
  598         sbt = c->c_time + c->c_precision;
  599         if (sbt < cc->cc_firstevent) {
  600                 cc->cc_firstevent = sbt;
  601                 cpu_new_callout(cpu, sbt, c->c_time);
  602         }
  603 }
  604 
  605 static void
  606 softclock_call_cc(struct callout *c, struct callout_cpu *cc,
  607 #ifdef CALLOUT_PROFILING
  608     int *mpcalls, int *lockcalls, int *gcalls,
  609 #endif
  610     int direct)
  611 {
  612         struct rm_priotracker tracker;
  613         callout_func_t *c_func, *drain;
  614         void *c_arg;
  615         struct lock_class *class;
  616         struct lock_object *c_lock;
  617         uintptr_t lock_status;
  618         int c_iflags;
  619 #ifdef SMP
  620         struct callout_cpu *new_cc;
  621         callout_func_t *new_func;
  622         void *new_arg;
  623         int flags, new_cpu;
  624         sbintime_t new_prec, new_time;
  625 #endif
  626 #if defined(DIAGNOSTIC) || defined(CALLOUT_PROFILING) 
  627         sbintime_t sbt1, sbt2;
  628         struct timespec ts2;
  629         static sbintime_t maxdt = 2 * SBT_1MS;  /* 2 msec */
  630         static callout_func_t *lastfunc;
  631 #endif
  632 
  633         KASSERT((c->c_iflags & CALLOUT_PENDING) == CALLOUT_PENDING,
  634             ("softclock_call_cc: pend %p %x", c, c->c_iflags));
  635         KASSERT((c->c_flags & CALLOUT_ACTIVE) == CALLOUT_ACTIVE,
  636             ("softclock_call_cc: act %p %x", c, c->c_flags));
  637         class = (c->c_lock != NULL) ? LOCK_CLASS(c->c_lock) : NULL;
  638         lock_status = 0;
  639         if (c->c_flags & CALLOUT_SHAREDLOCK) {
  640                 if (class == &lock_class_rm)
  641                         lock_status = (uintptr_t)&tracker;
  642                 else
  643                         lock_status = 1;
  644         }
  645         c_lock = c->c_lock;
  646         c_func = c->c_func;
  647         c_arg = c->c_arg;
  648         c_iflags = c->c_iflags;
  649         c->c_iflags &= ~CALLOUT_PENDING;
  650 
  651         cc_exec_curr(cc, direct) = c;
  652         cc_exec_last_func(cc, direct) = c_func;
  653         cc_exec_last_arg(cc, direct) = c_arg;
  654         cc_exec_cancel(cc, direct) = false;
  655         cc_exec_drain(cc, direct) = NULL;
  656         CC_UNLOCK(cc);
  657         if (c_lock != NULL) {
  658                 class->lc_lock(c_lock, lock_status);
  659                 /*
  660                  * The callout may have been cancelled
  661                  * while we switched locks.
  662                  */
  663                 if (cc_exec_cancel(cc, direct)) {
  664                         class->lc_unlock(c_lock);
  665                         goto skip;
  666                 }
  667                 /* The callout cannot be stopped now. */
  668                 cc_exec_cancel(cc, direct) = true;
  669                 if (c_lock == &Giant.lock_object) {
  670 #ifdef CALLOUT_PROFILING
  671                         (*gcalls)++;
  672 #endif
  673                         CTR3(KTR_CALLOUT, "callout giant %p func %p arg %p",
  674                             c, c_func, c_arg);
  675                 } else {
  676 #ifdef CALLOUT_PROFILING
  677                         (*lockcalls)++;
  678 #endif
  679                         CTR3(KTR_CALLOUT, "callout lock %p func %p arg %p",
  680                             c, c_func, c_arg);
  681                 }
  682         } else {
  683 #ifdef CALLOUT_PROFILING
  684                 (*mpcalls)++;
  685 #endif
  686                 CTR3(KTR_CALLOUT, "callout %p func %p arg %p",
  687                     c, c_func, c_arg);
  688         }
  689         KTR_STATE3(KTR_SCHED, "callout", cc->cc_ktr_event_name, "running",
  690             "func:%p", c_func, "arg:%p", c_arg, "direct:%d", direct);
  691 #if defined(DIAGNOSTIC) || defined(CALLOUT_PROFILING)
  692         sbt1 = sbinuptime();
  693 #endif
  694         THREAD_NO_SLEEPING();
  695         SDT_PROBE1(callout_execute, , , callout__start, c);
  696         c_func(c_arg);
  697         SDT_PROBE1(callout_execute, , , callout__end, c);
  698         THREAD_SLEEPING_OK();
  699 #if defined(DIAGNOSTIC) || defined(CALLOUT_PROFILING)
  700         sbt2 = sbinuptime();
  701         sbt2 -= sbt1;
  702         if (sbt2 > maxdt) {
  703                 if (lastfunc != c_func || sbt2 > maxdt * 2) {
  704                         ts2 = sbttots(sbt2);
  705                         printf(
  706                 "Expensive timeout(9) function: %p(%p) %jd.%09ld s\n",
  707                             c_func, c_arg, (intmax_t)ts2.tv_sec, ts2.tv_nsec);
  708                 }
  709                 maxdt = sbt2;
  710                 lastfunc = c_func;
  711         }
  712 #endif
  713         KTR_STATE0(KTR_SCHED, "callout", cc->cc_ktr_event_name, "idle");
  714         CTR1(KTR_CALLOUT, "callout %p finished", c);
  715         if ((c_iflags & CALLOUT_RETURNUNLOCKED) == 0)
  716                 class->lc_unlock(c_lock);
  717 skip:
  718         CC_LOCK(cc);
  719         KASSERT(cc_exec_curr(cc, direct) == c, ("mishandled cc_curr"));
  720         cc_exec_curr(cc, direct) = NULL;
  721         if (cc_exec_drain(cc, direct)) {
  722                 drain = cc_exec_drain(cc, direct);
  723                 cc_exec_drain(cc, direct) = NULL;
  724                 CC_UNLOCK(cc);
  725                 drain(c_arg);
  726                 CC_LOCK(cc);
  727         }
  728         if (cc_exec_waiting(cc, direct)) {
  729                 /*
  730                  * There is someone waiting for the
  731                  * callout to complete.
  732                  * If the callout was scheduled for
  733                  * migration just cancel it.
  734                  */
  735                 if (cc_cce_migrating(cc, direct)) {
  736                         cc_cce_cleanup(cc, direct);
  737 
  738                         /*
  739                          * It should be assert here that the callout is not
  740                          * destroyed but that is not easy.
  741                          */
  742                         c->c_iflags &= ~CALLOUT_DFRMIGRATION;
  743                 }
  744                 cc_exec_waiting(cc, direct) = false;
  745                 CC_UNLOCK(cc);
  746                 wakeup(&cc_exec_waiting(cc, direct));
  747                 CC_LOCK(cc);
  748         } else if (cc_cce_migrating(cc, direct)) {
  749 #ifdef SMP
  750                 /*
  751                  * If the callout was scheduled for
  752                  * migration just perform it now.
  753                  */
  754                 new_cpu = cc_migration_cpu(cc, direct);
  755                 new_time = cc_migration_time(cc, direct);
  756                 new_prec = cc_migration_prec(cc, direct);
  757                 new_func = cc_migration_func(cc, direct);
  758                 new_arg = cc_migration_arg(cc, direct);
  759                 cc_cce_cleanup(cc, direct);
  760 
  761                 /*
  762                  * It should be assert here that the callout is not destroyed
  763                  * but that is not easy.
  764                  *
  765                  * As first thing, handle deferred callout stops.
  766                  */
  767                 if (!callout_migrating(c)) {
  768                         CTR3(KTR_CALLOUT,
  769                              "deferred cancelled %p func %p arg %p",
  770                              c, new_func, new_arg);
  771                         return;
  772                 }
  773                 c->c_iflags &= ~CALLOUT_DFRMIGRATION;
  774 
  775                 new_cc = callout_cpu_switch(c, cc, new_cpu);
  776                 flags = (direct) ? C_DIRECT_EXEC : 0;
  777                 callout_cc_add(c, new_cc, new_time, new_prec, new_func,
  778                     new_arg, new_cpu, flags);
  779                 CC_UNLOCK(new_cc);
  780                 CC_LOCK(cc);
  781 #else
  782                 panic("migration should not happen");
  783 #endif
  784         }
  785 }
  786 
  787 /*
  788  * The callout mechanism is based on the work of Adam M. Costello and
  789  * George Varghese, published in a technical report entitled "Redesigning
  790  * the BSD Callout and Timer Facilities" and modified slightly for inclusion
  791  * in FreeBSD by Justin T. Gibbs.  The original work on the data structures
  792  * used in this implementation was published by G. Varghese and T. Lauck in
  793  * the paper "Hashed and Hierarchical Timing Wheels: Data Structures for
  794  * the Efficient Implementation of a Timer Facility" in the Proceedings of
  795  * the 11th ACM Annual Symposium on Operating Systems Principles,
  796  * Austin, Texas Nov 1987.
  797  */
  798 
  799 /*
  800  * Software (low priority) clock interrupt.
  801  * Run periodic events from timeout queue.
  802  */
  803 void
  804 softclock(void *arg)
  805 {
  806         struct callout_cpu *cc;
  807         struct callout *c;
  808 #ifdef CALLOUT_PROFILING
  809         int depth = 0, gcalls = 0, lockcalls = 0, mpcalls = 0;
  810 #endif
  811 
  812         cc = (struct callout_cpu *)arg;
  813         CC_LOCK(cc);
  814         while ((c = TAILQ_FIRST(&cc->cc_expireq)) != NULL) {
  815                 TAILQ_REMOVE(&cc->cc_expireq, c, c_links.tqe);
  816                 softclock_call_cc(c, cc,
  817 #ifdef CALLOUT_PROFILING
  818                     &mpcalls, &lockcalls, &gcalls,
  819 #endif
  820                     0);
  821 #ifdef CALLOUT_PROFILING
  822                 ++depth;
  823 #endif
  824         }
  825 #ifdef CALLOUT_PROFILING
  826         avg_depth += (depth * 1000 - avg_depth) >> 8;
  827         avg_mpcalls += (mpcalls * 1000 - avg_mpcalls) >> 8;
  828         avg_lockcalls += (lockcalls * 1000 - avg_lockcalls) >> 8;
  829         avg_gcalls += (gcalls * 1000 - avg_gcalls) >> 8;
  830 #endif
  831         CC_UNLOCK(cc);
  832 }
  833 
  834 void
  835 callout_when(sbintime_t sbt, sbintime_t precision, int flags,
  836     sbintime_t *res, sbintime_t *prec_res)
  837 {
  838         sbintime_t to_sbt, to_pr;
  839 
  840         if ((flags & (C_ABSOLUTE | C_PRECALC)) != 0) {
  841                 *res = sbt;
  842                 *prec_res = precision;
  843                 return;
  844         }
  845         if ((flags & C_HARDCLOCK) != 0 && sbt < tick_sbt)
  846                 sbt = tick_sbt;
  847         if ((flags & C_HARDCLOCK) != 0 || sbt >= sbt_tickthreshold) {
  848                 /*
  849                  * Obtain the time of the last hardclock() call on
  850                  * this CPU directly from the kern_clocksource.c.
  851                  * This value is per-CPU, but it is equal for all
  852                  * active ones.
  853                  */
  854 #ifdef __LP64__
  855                 to_sbt = DPCPU_GET(hardclocktime);
  856 #else
  857                 spinlock_enter();
  858                 to_sbt = DPCPU_GET(hardclocktime);
  859                 spinlock_exit();
  860 #endif
  861                 if (cold && to_sbt == 0)
  862                         to_sbt = sbinuptime();
  863                 if ((flags & C_HARDCLOCK) == 0)
  864                         to_sbt += tick_sbt;
  865         } else
  866                 to_sbt = sbinuptime();
  867         if (SBT_MAX - to_sbt < sbt)
  868                 to_sbt = SBT_MAX;
  869         else
  870                 to_sbt += sbt;
  871         *res = to_sbt;
  872         to_pr = ((C_PRELGET(flags) < 0) ? sbt >> tc_precexp :
  873             sbt >> C_PRELGET(flags));
  874         *prec_res = to_pr > precision ? to_pr : precision;
  875 }
  876 
  877 /*
  878  * New interface; clients allocate their own callout structures.
  879  *
  880  * callout_reset() - establish or change a timeout
  881  * callout_stop() - disestablish a timeout
  882  * callout_init() - initialize a callout structure so that it can
  883  *      safely be passed to callout_reset() and callout_stop()
  884  *
  885  * <sys/callout.h> defines three convenience macros:
  886  *
  887  * callout_active() - returns truth if callout has not been stopped,
  888  *      drained, or deactivated since the last time the callout was
  889  *      reset.
  890  * callout_pending() - returns truth if callout is still waiting for timeout
  891  * callout_deactivate() - marks the callout as having been serviced
  892  */
  893 int
  894 callout_reset_sbt_on(struct callout *c, sbintime_t sbt, sbintime_t prec,
  895     callout_func_t *ftn, void *arg, int cpu, int flags)
  896 {
  897         sbintime_t to_sbt, precision;
  898         struct callout_cpu *cc;
  899         int cancelled, direct;
  900         int ignore_cpu=0;
  901 
  902         cancelled = 0;
  903         if (cpu == -1) {
  904                 ignore_cpu = 1;
  905         } else if ((cpu >= MAXCPU) ||
  906                    ((CC_CPU(cpu))->cc_inited == 0)) {
  907                 /* Invalid CPU spec */
  908                 panic("Invalid CPU in callout %d", cpu);
  909         }
  910         callout_when(sbt, prec, flags, &to_sbt, &precision);
  911 
  912         /* 
  913          * This flag used to be added by callout_cc_add, but the
  914          * first time you call this we could end up with the
  915          * wrong direct flag if we don't do it before we add.
  916          */
  917         if (flags & C_DIRECT_EXEC) {
  918                 direct = 1;
  919         } else {
  920                 direct = 0;
  921         }
  922         KASSERT(!direct || c->c_lock == NULL ||
  923             (LOCK_CLASS(c->c_lock)->lc_flags & LC_SPINLOCK),
  924             ("%s: direct callout %p has non-spin lock", __func__, c));
  925         cc = callout_lock(c);
  926         /*
  927          * Don't allow migration if the user does not care.
  928          */
  929         if (ignore_cpu) {
  930                 cpu = c->c_cpu;
  931         }
  932 
  933         if (cc_exec_curr(cc, direct) == c) {
  934                 /*
  935                  * We're being asked to reschedule a callout which is
  936                  * currently in progress.  If there is a lock then we
  937                  * can cancel the callout if it has not really started.
  938                  */
  939                 if (c->c_lock != NULL && !cc_exec_cancel(cc, direct))
  940                         cancelled = cc_exec_cancel(cc, direct) = true;
  941                 if (cc_exec_waiting(cc, direct) || cc_exec_drain(cc, direct)) {
  942                         /*
  943                          * Someone has called callout_drain to kill this
  944                          * callout.  Don't reschedule.
  945                          */
  946                         CTR4(KTR_CALLOUT, "%s %p func %p arg %p",
  947                             cancelled ? "cancelled" : "failed to cancel",
  948                             c, c->c_func, c->c_arg);
  949                         CC_UNLOCK(cc);
  950                         return (cancelled);
  951                 }
  952 #ifdef SMP
  953                 if (callout_migrating(c)) {
  954                         /* 
  955                          * This only occurs when a second callout_reset_sbt_on
  956                          * is made after a previous one moved it into
  957                          * deferred migration (below). Note we do *not* change
  958                          * the prev_cpu even though the previous target may
  959                          * be different.
  960                          */
  961                         cc_migration_cpu(cc, direct) = cpu;
  962                         cc_migration_time(cc, direct) = to_sbt;
  963                         cc_migration_prec(cc, direct) = precision;
  964                         cc_migration_func(cc, direct) = ftn;
  965                         cc_migration_arg(cc, direct) = arg;
  966                         cancelled = 1;
  967                         CC_UNLOCK(cc);
  968                         return (cancelled);
  969                 }
  970 #endif
  971         }
  972         if (c->c_iflags & CALLOUT_PENDING) {
  973                 if ((c->c_iflags & CALLOUT_PROCESSED) == 0) {
  974                         if (cc_exec_next(cc) == c)
  975                                 cc_exec_next(cc) = LIST_NEXT(c, c_links.le);
  976                         LIST_REMOVE(c, c_links.le);
  977                 } else {
  978                         TAILQ_REMOVE(&cc->cc_expireq, c, c_links.tqe);
  979                 }
  980                 cancelled = 1;
  981                 c->c_iflags &= ~ CALLOUT_PENDING;
  982                 c->c_flags &= ~ CALLOUT_ACTIVE;
  983         }
  984 
  985 #ifdef SMP
  986         /*
  987          * If the callout must migrate try to perform it immediately.
  988          * If the callout is currently running, just defer the migration
  989          * to a more appropriate moment.
  990          */
  991         if (c->c_cpu != cpu) {
  992                 if (cc_exec_curr(cc, direct) == c) {
  993                         /* 
  994                          * Pending will have been removed since we are
  995                          * actually executing the callout on another
  996                          * CPU. That callout should be waiting on the
  997                          * lock the caller holds. If we set both
  998                          * active/and/pending after we return and the
  999                          * lock on the executing callout proceeds, it
 1000                          * will then see pending is true and return.
 1001                          * At the return from the actual callout execution
 1002                          * the migration will occur in softclock_call_cc
 1003                          * and this new callout will be placed on the 
 1004                          * new CPU via a call to callout_cpu_switch() which
 1005                          * will get the lock on the right CPU followed
 1006                          * by a call callout_cc_add() which will add it there.
 1007                          * (see above in softclock_call_cc()).
 1008                          */
 1009                         cc_migration_cpu(cc, direct) = cpu;
 1010                         cc_migration_time(cc, direct) = to_sbt;
 1011                         cc_migration_prec(cc, direct) = precision;
 1012                         cc_migration_func(cc, direct) = ftn;
 1013                         cc_migration_arg(cc, direct) = arg;
 1014                         c->c_iflags |= (CALLOUT_DFRMIGRATION | CALLOUT_PENDING);
 1015                         c->c_flags |= CALLOUT_ACTIVE;
 1016                         CTR6(KTR_CALLOUT,
 1017                     "migration of %p func %p arg %p in %d.%08x to %u deferred",
 1018                             c, c->c_func, c->c_arg, (int)(to_sbt >> 32),
 1019                             (u_int)(to_sbt & 0xffffffff), cpu);
 1020                         CC_UNLOCK(cc);
 1021                         return (cancelled);
 1022                 }
 1023                 cc = callout_cpu_switch(c, cc, cpu);
 1024         }
 1025 #endif
 1026 
 1027         callout_cc_add(c, cc, to_sbt, precision, ftn, arg, cpu, flags);
 1028         CTR6(KTR_CALLOUT, "%sscheduled %p func %p arg %p in %d.%08x",
 1029             cancelled ? "re" : "", c, c->c_func, c->c_arg, (int)(to_sbt >> 32),
 1030             (u_int)(to_sbt & 0xffffffff));
 1031         CC_UNLOCK(cc);
 1032 
 1033         return (cancelled);
 1034 }
 1035 
 1036 /*
 1037  * Common idioms that can be optimized in the future.
 1038  */
 1039 int
 1040 callout_schedule_on(struct callout *c, int to_ticks, int cpu)
 1041 {
 1042         return callout_reset_on(c, to_ticks, c->c_func, c->c_arg, cpu);
 1043 }
 1044 
 1045 int
 1046 callout_schedule(struct callout *c, int to_ticks)
 1047 {
 1048         return callout_reset_on(c, to_ticks, c->c_func, c->c_arg, c->c_cpu);
 1049 }
 1050 
 1051 int
 1052 _callout_stop_safe(struct callout *c, int flags, callout_func_t *drain)
 1053 {
 1054         struct callout_cpu *cc, *old_cc;
 1055         struct lock_class *class;
 1056         int direct, sq_locked, use_lock;
 1057         int cancelled, not_on_a_list;
 1058 
 1059         if ((flags & CS_DRAIN) != 0)
 1060                 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, c->c_lock,
 1061                     "calling %s", __func__);
 1062 
 1063         KASSERT((flags & CS_DRAIN) == 0 || drain == NULL,
 1064             ("Cannot set drain callback and CS_DRAIN flag at the same time"));
 1065 
 1066         /*
 1067          * Some old subsystems don't hold Giant while running a callout_stop(),
 1068          * so just discard this check for the moment.
 1069          */
 1070         if ((flags & CS_DRAIN) == 0 && c->c_lock != NULL) {
 1071                 if (c->c_lock == &Giant.lock_object)
 1072                         use_lock = mtx_owned(&Giant);
 1073                 else {
 1074                         use_lock = 1;
 1075                         class = LOCK_CLASS(c->c_lock);
 1076                         class->lc_assert(c->c_lock, LA_XLOCKED);
 1077                 }
 1078         } else
 1079                 use_lock = 0;
 1080         if (c->c_iflags & CALLOUT_DIRECT) {
 1081                 direct = 1;
 1082         } else {
 1083                 direct = 0;
 1084         }
 1085         sq_locked = 0;
 1086         old_cc = NULL;
 1087 again:
 1088         cc = callout_lock(c);
 1089 
 1090         if ((c->c_iflags & (CALLOUT_DFRMIGRATION | CALLOUT_PENDING)) ==
 1091             (CALLOUT_DFRMIGRATION | CALLOUT_PENDING) &&
 1092             ((c->c_flags & CALLOUT_ACTIVE) == CALLOUT_ACTIVE)) {
 1093                 /*
 1094                  * Special case where this slipped in while we
 1095                  * were migrating *as* the callout is about to
 1096                  * execute. The caller probably holds the lock
 1097                  * the callout wants.
 1098                  *
 1099                  * Get rid of the migration first. Then set
 1100                  * the flag that tells this code *not* to
 1101                  * try to remove it from any lists (its not
 1102                  * on one yet). When the callout wheel runs,
 1103                  * it will ignore this callout.
 1104                  */
 1105                 c->c_iflags &= ~CALLOUT_PENDING;
 1106                 c->c_flags &= ~CALLOUT_ACTIVE;
 1107                 not_on_a_list = 1;
 1108         } else {
 1109                 not_on_a_list = 0;
 1110         }
 1111 
 1112         /*
 1113          * If the callout was migrating while the callout cpu lock was
 1114          * dropped,  just drop the sleepqueue lock and check the states
 1115          * again.
 1116          */
 1117         if (sq_locked != 0 && cc != old_cc) {
 1118 #ifdef SMP
 1119                 CC_UNLOCK(cc);
 1120                 sleepq_release(&cc_exec_waiting(old_cc, direct));
 1121                 sq_locked = 0;
 1122                 old_cc = NULL;
 1123                 goto again;
 1124 #else
 1125                 panic("migration should not happen");
 1126 #endif
 1127         }
 1128 
 1129         /*
 1130          * If the callout is running, try to stop it or drain it.
 1131          */
 1132         if (cc_exec_curr(cc, direct) == c) {
 1133                 /*
 1134                  * Succeed we to stop it or not, we must clear the
 1135                  * active flag - this is what API users expect.  If we're
 1136                  * draining and the callout is currently executing, first wait
 1137                  * until it finishes.
 1138                  */
 1139                 if ((flags & CS_DRAIN) == 0)
 1140                         c->c_flags &= ~CALLOUT_ACTIVE;
 1141 
 1142                 if ((flags & CS_DRAIN) != 0) {
 1143                         /*
 1144                          * The current callout is running (or just
 1145                          * about to run) and blocking is allowed, so
 1146                          * just wait for the current invocation to
 1147                          * finish.
 1148                          */
 1149                         if (cc_exec_curr(cc, direct) == c) {
 1150                                 /*
 1151                                  * Use direct calls to sleepqueue interface
 1152                                  * instead of cv/msleep in order to avoid
 1153                                  * a LOR between cc_lock and sleepqueue
 1154                                  * chain spinlocks.  This piece of code
 1155                                  * emulates a msleep_spin() call actually.
 1156                                  *
 1157                                  * If we already have the sleepqueue chain
 1158                                  * locked, then we can safely block.  If we
 1159                                  * don't already have it locked, however,
 1160                                  * we have to drop the cc_lock to lock
 1161                                  * it.  This opens several races, so we
 1162                                  * restart at the beginning once we have
 1163                                  * both locks.  If nothing has changed, then
 1164                                  * we will end up back here with sq_locked
 1165                                  * set.
 1166                                  */
 1167                                 if (!sq_locked) {
 1168                                         CC_UNLOCK(cc);
 1169                                         sleepq_lock(
 1170                                             &cc_exec_waiting(cc, direct));
 1171                                         sq_locked = 1;
 1172                                         old_cc = cc;
 1173                                         goto again;
 1174                                 }
 1175 
 1176                                 /*
 1177                                  * Migration could be cancelled here, but
 1178                                  * as long as it is still not sure when it
 1179                                  * will be packed up, just let softclock()
 1180                                  * take care of it.
 1181                                  */
 1182                                 cc_exec_waiting(cc, direct) = true;
 1183                                 DROP_GIANT();
 1184                                 CC_UNLOCK(cc);
 1185                                 sleepq_add(
 1186                                     &cc_exec_waiting(cc, direct),
 1187                                     &cc->cc_lock.lock_object, "codrain",
 1188                                     SLEEPQ_SLEEP, 0);
 1189                                 sleepq_wait(
 1190                                     &cc_exec_waiting(cc, direct),
 1191                                              0);
 1192                                 sq_locked = 0;
 1193                                 old_cc = NULL;
 1194 
 1195                                 /* Reacquire locks previously released. */
 1196                                 PICKUP_GIANT();
 1197                                 goto again;
 1198                         }
 1199                         c->c_flags &= ~CALLOUT_ACTIVE;
 1200                 } else if (use_lock &&
 1201                            !cc_exec_cancel(cc, direct) && (drain == NULL)) {
 1202                         
 1203                         /*
 1204                          * The current callout is waiting for its
 1205                          * lock which we hold.  Cancel the callout
 1206                          * and return.  After our caller drops the
 1207                          * lock, the callout will be skipped in
 1208                          * softclock(). This *only* works with a
 1209                          * callout_stop() *not* callout_drain() or
 1210                          * callout_async_drain().
 1211                          */
 1212                         cc_exec_cancel(cc, direct) = true;
 1213                         CTR3(KTR_CALLOUT, "cancelled %p func %p arg %p",
 1214                             c, c->c_func, c->c_arg);
 1215                         KASSERT(!cc_cce_migrating(cc, direct),
 1216                             ("callout wrongly scheduled for migration"));
 1217                         if (callout_migrating(c)) {
 1218                                 c->c_iflags &= ~CALLOUT_DFRMIGRATION;
 1219 #ifdef SMP
 1220                                 cc_migration_cpu(cc, direct) = CPUBLOCK;
 1221                                 cc_migration_time(cc, direct) = 0;
 1222                                 cc_migration_prec(cc, direct) = 0;
 1223                                 cc_migration_func(cc, direct) = NULL;
 1224                                 cc_migration_arg(cc, direct) = NULL;
 1225 #endif
 1226                         }
 1227                         CC_UNLOCK(cc);
 1228                         KASSERT(!sq_locked, ("sleepqueue chain locked"));
 1229                         return (1);
 1230                 } else if (callout_migrating(c)) {
 1231                         /*
 1232                          * The callout is currently being serviced
 1233                          * and the "next" callout is scheduled at
 1234                          * its completion with a migration. We remove
 1235                          * the migration flag so it *won't* get rescheduled,
 1236                          * but we can't stop the one thats running so
 1237                          * we return 0.
 1238                          */
 1239                         c->c_iflags &= ~CALLOUT_DFRMIGRATION;
 1240 #ifdef SMP
 1241                         /* 
 1242                          * We can't call cc_cce_cleanup here since
 1243                          * if we do it will remove .ce_curr and
 1244                          * its still running. This will prevent a
 1245                          * reschedule of the callout when the 
 1246                          * execution completes.
 1247                          */
 1248                         cc_migration_cpu(cc, direct) = CPUBLOCK;
 1249                         cc_migration_time(cc, direct) = 0;
 1250                         cc_migration_prec(cc, direct) = 0;
 1251                         cc_migration_func(cc, direct) = NULL;
 1252                         cc_migration_arg(cc, direct) = NULL;
 1253 #endif
 1254                         CTR3(KTR_CALLOUT, "postponing stop %p func %p arg %p",
 1255                             c, c->c_func, c->c_arg);
 1256                         if (drain) {
 1257                                 KASSERT(cc_exec_drain(cc, direct) == NULL,
 1258                                     ("callout drain function already set to %p",
 1259                                     cc_exec_drain(cc, direct)));
 1260                                 cc_exec_drain(cc, direct) = drain;
 1261                         }
 1262                         CC_UNLOCK(cc);
 1263                         return ((flags & CS_EXECUTING) != 0);
 1264                 } else {
 1265                         CTR3(KTR_CALLOUT, "failed to stop %p func %p arg %p",
 1266                             c, c->c_func, c->c_arg);
 1267                         if (drain) {
 1268                                 KASSERT(cc_exec_drain(cc, direct) == NULL,
 1269                                     ("callout drain function already set to %p",
 1270                                     cc_exec_drain(cc, direct)));
 1271                                 cc_exec_drain(cc, direct) = drain;
 1272                         }
 1273                 }
 1274                 KASSERT(!sq_locked, ("sleepqueue chain still locked"));
 1275                 cancelled = ((flags & CS_EXECUTING) != 0);
 1276         } else
 1277                 cancelled = 1;
 1278 
 1279         if (sq_locked)
 1280                 sleepq_release(&cc_exec_waiting(cc, direct));
 1281 
 1282         if ((c->c_iflags & CALLOUT_PENDING) == 0) {
 1283                 CTR3(KTR_CALLOUT, "failed to stop %p func %p arg %p",
 1284                     c, c->c_func, c->c_arg);
 1285                 /*
 1286                  * For not scheduled and not executing callout return
 1287                  * negative value.
 1288                  */
 1289                 if (cc_exec_curr(cc, direct) != c)
 1290                         cancelled = -1;
 1291                 CC_UNLOCK(cc);
 1292                 return (cancelled);
 1293         }
 1294 
 1295         c->c_iflags &= ~CALLOUT_PENDING;
 1296         c->c_flags &= ~CALLOUT_ACTIVE;
 1297 
 1298         CTR3(KTR_CALLOUT, "cancelled %p func %p arg %p",
 1299             c, c->c_func, c->c_arg);
 1300         if (not_on_a_list == 0) {
 1301                 if ((c->c_iflags & CALLOUT_PROCESSED) == 0) {
 1302                         if (cc_exec_next(cc) == c)
 1303                                 cc_exec_next(cc) = LIST_NEXT(c, c_links.le);
 1304                         LIST_REMOVE(c, c_links.le);
 1305                 } else {
 1306                         TAILQ_REMOVE(&cc->cc_expireq, c, c_links.tqe);
 1307                 }
 1308         }
 1309         CC_UNLOCK(cc);
 1310         return (cancelled);
 1311 }
 1312 
 1313 void
 1314 callout_init(struct callout *c, int mpsafe)
 1315 {
 1316         bzero(c, sizeof *c);
 1317         if (mpsafe) {
 1318                 c->c_lock = NULL;
 1319                 c->c_iflags = CALLOUT_RETURNUNLOCKED;
 1320         } else {
 1321                 c->c_lock = &Giant.lock_object;
 1322                 c->c_iflags = 0;
 1323         }
 1324         c->c_cpu = cc_default_cpu;
 1325 }
 1326 
 1327 void
 1328 _callout_init_lock(struct callout *c, struct lock_object *lock, int flags)
 1329 {
 1330         bzero(c, sizeof *c);
 1331         c->c_lock = lock;
 1332         KASSERT((flags & ~(CALLOUT_RETURNUNLOCKED | CALLOUT_SHAREDLOCK)) == 0,
 1333             ("callout_init_lock: bad flags %d", flags));
 1334         KASSERT(lock != NULL || (flags & CALLOUT_RETURNUNLOCKED) == 0,
 1335             ("callout_init_lock: CALLOUT_RETURNUNLOCKED with no lock"));
 1336         KASSERT(lock == NULL || !(LOCK_CLASS(lock)->lc_flags & LC_SLEEPABLE),
 1337             ("%s: callout %p has sleepable lock", __func__, c));
 1338         c->c_iflags = flags & (CALLOUT_RETURNUNLOCKED | CALLOUT_SHAREDLOCK);
 1339         c->c_cpu = cc_default_cpu;
 1340 }
 1341 
 1342 static int
 1343 flssbt(sbintime_t sbt)
 1344 {
 1345 
 1346         sbt += (uint64_t)sbt >> 1;
 1347         if (sizeof(long) >= sizeof(sbintime_t))
 1348                 return (flsl(sbt));
 1349         if (sbt >= SBT_1S)
 1350                 return (flsl(((uint64_t)sbt) >> 32) + 32);
 1351         return (flsl(sbt));
 1352 }
 1353 
 1354 /*
 1355  * Dump immediate statistic snapshot of the scheduled callouts.
 1356  */
 1357 static int
 1358 sysctl_kern_callout_stat(SYSCTL_HANDLER_ARGS)
 1359 {
 1360         struct callout *tmp;
 1361         struct callout_cpu *cc;
 1362         struct callout_list *sc;
 1363         sbintime_t maxpr, maxt, medpr, medt, now, spr, st, t;
 1364         int ct[64], cpr[64], ccpbk[32];
 1365         int error, val, i, count, tcum, pcum, maxc, c, medc;
 1366         int cpu;
 1367 
 1368         val = 0;
 1369         error = sysctl_handle_int(oidp, &val, 0, req);
 1370         if (error != 0 || req->newptr == NULL)
 1371                 return (error);
 1372         count = maxc = 0;
 1373         st = spr = maxt = maxpr = 0;
 1374         bzero(ccpbk, sizeof(ccpbk));
 1375         bzero(ct, sizeof(ct));
 1376         bzero(cpr, sizeof(cpr));
 1377         now = sbinuptime();
 1378         CPU_FOREACH(cpu) {
 1379                 cc = CC_CPU(cpu);
 1380                 CC_LOCK(cc);
 1381                 for (i = 0; i < callwheelsize; i++) {
 1382                         sc = &cc->cc_callwheel[i];
 1383                         c = 0;
 1384                         LIST_FOREACH(tmp, sc, c_links.le) {
 1385                                 c++;
 1386                                 t = tmp->c_time - now;
 1387                                 if (t < 0)
 1388                                         t = 0;
 1389                                 st += t / SBT_1US;
 1390                                 spr += tmp->c_precision / SBT_1US;
 1391                                 if (t > maxt)
 1392                                         maxt = t;
 1393                                 if (tmp->c_precision > maxpr)
 1394                                         maxpr = tmp->c_precision;
 1395                                 ct[flssbt(t)]++;
 1396                                 cpr[flssbt(tmp->c_precision)]++;
 1397                         }
 1398                         if (c > maxc)
 1399                                 maxc = c;
 1400                         ccpbk[fls(c + c / 2)]++;
 1401                         count += c;
 1402                 }
 1403                 CC_UNLOCK(cc);
 1404         }
 1405 
 1406         for (i = 0, tcum = 0; i < 64 && tcum < count / 2; i++)
 1407                 tcum += ct[i];
 1408         medt = (i >= 2) ? (((sbintime_t)1) << (i - 2)) : 0;
 1409         for (i = 0, pcum = 0; i < 64 && pcum < count / 2; i++)
 1410                 pcum += cpr[i];
 1411         medpr = (i >= 2) ? (((sbintime_t)1) << (i - 2)) : 0;
 1412         for (i = 0, c = 0; i < 32 && c < count / 2; i++)
 1413                 c += ccpbk[i];
 1414         medc = (i >= 2) ? (1 << (i - 2)) : 0;
 1415 
 1416         printf("Scheduled callouts statistic snapshot:\n");
 1417         printf("  Callouts: %6d  Buckets: %6d*%-3d  Bucket size: 0.%06ds\n",
 1418             count, callwheelsize, mp_ncpus, 1000000 >> CC_HASH_SHIFT);
 1419         printf("  C/Bk: med %5d         avg %6d.%06jd  max %6d\n",
 1420             medc,
 1421             count / callwheelsize / mp_ncpus,
 1422             (uint64_t)count * 1000000 / callwheelsize / mp_ncpus % 1000000,
 1423             maxc);
 1424         printf("  Time: med %5jd.%06jds avg %6jd.%06jds max %6jd.%06jds\n",
 1425             medt / SBT_1S, (medt & 0xffffffff) * 1000000 >> 32,
 1426             (st / count) / 1000000, (st / count) % 1000000,
 1427             maxt / SBT_1S, (maxt & 0xffffffff) * 1000000 >> 32);
 1428         printf("  Prec: med %5jd.%06jds avg %6jd.%06jds max %6jd.%06jds\n",
 1429             medpr / SBT_1S, (medpr & 0xffffffff) * 1000000 >> 32,
 1430             (spr / count) / 1000000, (spr / count) % 1000000,
 1431             maxpr / SBT_1S, (maxpr & 0xffffffff) * 1000000 >> 32);
 1432         printf("  Distribution:       \tbuckets\t   time\t   tcum\t"
 1433             "   prec\t   pcum\n");
 1434         for (i = 0, tcum = pcum = 0; i < 64; i++) {
 1435                 if (ct[i] == 0 && cpr[i] == 0)
 1436                         continue;
 1437                 t = (i != 0) ? (((sbintime_t)1) << (i - 1)) : 0;
 1438                 tcum += ct[i];
 1439                 pcum += cpr[i];
 1440                 printf("  %10jd.%06jds\t 2**%d\t%7d\t%7d\t%7d\t%7d\n",
 1441                     t / SBT_1S, (t & 0xffffffff) * 1000000 >> 32,
 1442                     i - 1 - (32 - CC_HASH_SHIFT),
 1443                     ct[i], tcum, cpr[i], pcum);
 1444         }
 1445         return (error);
 1446 }
 1447 SYSCTL_PROC(_kern, OID_AUTO, callout_stat,
 1448     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE,
 1449     0, 0, sysctl_kern_callout_stat, "I",
 1450     "Dump immediate statistic snapshot of the scheduled callouts");
 1451 
 1452 #ifdef DDB
 1453 static void
 1454 _show_callout(struct callout *c)
 1455 {
 1456 
 1457         db_printf("callout %p\n", c);
 1458 #define C_DB_PRINTF(f, e)       db_printf("   %s = " f "\n", #e, c->e);
 1459         db_printf("   &c_links = %p\n", &(c->c_links));
 1460         C_DB_PRINTF("%" PRId64, c_time);
 1461         C_DB_PRINTF("%" PRId64, c_precision);
 1462         C_DB_PRINTF("%p",       c_arg);
 1463         C_DB_PRINTF("%p",       c_func);
 1464         C_DB_PRINTF("%p",       c_lock);
 1465         C_DB_PRINTF("%#x",      c_flags);
 1466         C_DB_PRINTF("%#x",      c_iflags);
 1467         C_DB_PRINTF("%d",       c_cpu);
 1468 #undef  C_DB_PRINTF
 1469 }
 1470 
 1471 DB_SHOW_COMMAND(callout, db_show_callout)
 1472 {
 1473 
 1474         if (!have_addr) {
 1475                 db_printf("usage: show callout <struct callout *>\n");
 1476                 return;
 1477         }
 1478 
 1479         _show_callout((struct callout *)addr);
 1480 }
 1481 
 1482 static void
 1483 _show_last_callout(int cpu, int direct, const char *dirstr)
 1484 {
 1485         struct callout_cpu *cc;
 1486         void *func, *arg;
 1487 
 1488         cc = CC_CPU(cpu);
 1489         func = cc_exec_last_func(cc, direct);
 1490         arg = cc_exec_last_arg(cc, direct);
 1491         db_printf("cpu %d last%s callout function: %p ", cpu, dirstr, func);
 1492         db_printsym((db_expr_t)func, DB_STGY_ANY);
 1493         db_printf("\ncpu %d last%s callout argument: %p\n", cpu, dirstr, arg);
 1494 }
 1495 
 1496 DB_SHOW_COMMAND(callout_last, db_show_callout_last)
 1497 {
 1498         int cpu, last;
 1499 
 1500         if (have_addr) {
 1501                 if (addr < 0 || addr > mp_maxid || CPU_ABSENT(addr)) {
 1502                         db_printf("no such cpu: %d\n", (int)addr);
 1503                         return;
 1504                 }
 1505                 cpu = last = addr;
 1506         } else {
 1507                 cpu = 0;
 1508                 last = mp_maxid;
 1509         }
 1510 
 1511         while (cpu <= last) {
 1512                 if (!CPU_ABSENT(cpu)) {
 1513                         _show_last_callout(cpu, 0, "");
 1514                         _show_last_callout(cpu, 1, " direct");
 1515                 }
 1516                 cpu++;
 1517         }
 1518 }
 1519 #endif /* DDB */

Cache object: 506be4a4ded471f21a9fc3723c24c90c


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.