1 /*-
2 * Copyright (c) 1982, 1986, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 4. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * From: @(#)kern_clock.c 8.5 (Berkeley) 1/21/94
35 */
36
37 #include <sys/cdefs.h>
38 __FBSDID("$FreeBSD: releng/11.2/sys/kern/kern_timeout.c 331722 2018-03-29 02:50:57Z eadler $");
39
40 #include "opt_callout_profiling.h"
41 #include "opt_ddb.h"
42 #if defined(__arm__)
43 #include "opt_timer.h"
44 #endif
45 #include "opt_rss.h"
46
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/bus.h>
50 #include <sys/callout.h>
51 #include <sys/file.h>
52 #include <sys/interrupt.h>
53 #include <sys/kernel.h>
54 #include <sys/ktr.h>
55 #include <sys/lock.h>
56 #include <sys/malloc.h>
57 #include <sys/mutex.h>
58 #include <sys/proc.h>
59 #include <sys/sdt.h>
60 #include <sys/sleepqueue.h>
61 #include <sys/sysctl.h>
62 #include <sys/smp.h>
63
64 #ifdef DDB
65 #include <ddb/ddb.h>
66 #include <machine/_inttypes.h>
67 #endif
68
69 #ifdef SMP
70 #include <machine/cpu.h>
71 #endif
72
73 #ifndef NO_EVENTTIMERS
74 DPCPU_DECLARE(sbintime_t, hardclocktime);
75 #endif
76
77 SDT_PROVIDER_DEFINE(callout_execute);
78 SDT_PROBE_DEFINE1(callout_execute, , , callout__start, "struct callout *");
79 SDT_PROBE_DEFINE1(callout_execute, , , callout__end, "struct callout *");
80
81 #ifdef CALLOUT_PROFILING
82 static int avg_depth;
83 SYSCTL_INT(_debug, OID_AUTO, to_avg_depth, CTLFLAG_RD, &avg_depth, 0,
84 "Average number of items examined per softclock call. Units = 1/1000");
85 static int avg_gcalls;
86 SYSCTL_INT(_debug, OID_AUTO, to_avg_gcalls, CTLFLAG_RD, &avg_gcalls, 0,
87 "Average number of Giant callouts made per softclock call. Units = 1/1000");
88 static int avg_lockcalls;
89 SYSCTL_INT(_debug, OID_AUTO, to_avg_lockcalls, CTLFLAG_RD, &avg_lockcalls, 0,
90 "Average number of lock callouts made per softclock call. Units = 1/1000");
91 static int avg_mpcalls;
92 SYSCTL_INT(_debug, OID_AUTO, to_avg_mpcalls, CTLFLAG_RD, &avg_mpcalls, 0,
93 "Average number of MP callouts made per softclock call. Units = 1/1000");
94 static int avg_depth_dir;
95 SYSCTL_INT(_debug, OID_AUTO, to_avg_depth_dir, CTLFLAG_RD, &avg_depth_dir, 0,
96 "Average number of direct callouts examined per callout_process call. "
97 "Units = 1/1000");
98 static int avg_lockcalls_dir;
99 SYSCTL_INT(_debug, OID_AUTO, to_avg_lockcalls_dir, CTLFLAG_RD,
100 &avg_lockcalls_dir, 0, "Average number of lock direct callouts made per "
101 "callout_process call. Units = 1/1000");
102 static int avg_mpcalls_dir;
103 SYSCTL_INT(_debug, OID_AUTO, to_avg_mpcalls_dir, CTLFLAG_RD, &avg_mpcalls_dir,
104 0, "Average number of MP direct callouts made per callout_process call. "
105 "Units = 1/1000");
106 #endif
107
108 static int ncallout;
109 SYSCTL_INT(_kern, OID_AUTO, ncallout, CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &ncallout, 0,
110 "Number of entries in callwheel and size of timeout() preallocation");
111
112 #ifdef RSS
113 static int pin_default_swi = 1;
114 static int pin_pcpu_swi = 1;
115 #else
116 static int pin_default_swi = 0;
117 static int pin_pcpu_swi = 0;
118 #endif
119
120 SYSCTL_INT(_kern, OID_AUTO, pin_default_swi, CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &pin_default_swi,
121 0, "Pin the default (non-per-cpu) swi (shared with PCPU 0 swi)");
122 SYSCTL_INT(_kern, OID_AUTO, pin_pcpu_swi, CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &pin_pcpu_swi,
123 0, "Pin the per-CPU swis (except PCPU 0, which is also default");
124
125 /*
126 * TODO:
127 * allocate more timeout table slots when table overflows.
128 */
129 u_int callwheelsize, callwheelmask;
130
131 /*
132 * The callout cpu exec entities represent informations necessary for
133 * describing the state of callouts currently running on the CPU and the ones
134 * necessary for migrating callouts to the new callout cpu. In particular,
135 * the first entry of the array cc_exec_entity holds informations for callout
136 * running in SWI thread context, while the second one holds informations
137 * for callout running directly from hardware interrupt context.
138 * The cached informations are very important for deferring migration when
139 * the migrating callout is already running.
140 */
141 struct cc_exec {
142 struct callout *cc_curr;
143 void (*cc_drain)(void *);
144 #ifdef SMP
145 void (*ce_migration_func)(void *);
146 void *ce_migration_arg;
147 int ce_migration_cpu;
148 sbintime_t ce_migration_time;
149 sbintime_t ce_migration_prec;
150 #endif
151 bool cc_cancel;
152 bool cc_waiting;
153 };
154
155 /*
156 * There is one struct callout_cpu per cpu, holding all relevant
157 * state for the callout processing thread on the individual CPU.
158 */
159 struct callout_cpu {
160 struct mtx_padalign cc_lock;
161 struct cc_exec cc_exec_entity[2];
162 struct callout *cc_next;
163 struct callout *cc_callout;
164 struct callout_list *cc_callwheel;
165 struct callout_tailq cc_expireq;
166 struct callout_slist cc_callfree;
167 sbintime_t cc_firstevent;
168 sbintime_t cc_lastscan;
169 void *cc_cookie;
170 u_int cc_bucket;
171 u_int cc_inited;
172 char cc_ktr_event_name[20];
173 };
174
175 #define callout_migrating(c) ((c)->c_iflags & CALLOUT_DFRMIGRATION)
176
177 #define cc_exec_curr(cc, dir) cc->cc_exec_entity[dir].cc_curr
178 #define cc_exec_drain(cc, dir) cc->cc_exec_entity[dir].cc_drain
179 #define cc_exec_next(cc) cc->cc_next
180 #define cc_exec_cancel(cc, dir) cc->cc_exec_entity[dir].cc_cancel
181 #define cc_exec_waiting(cc, dir) cc->cc_exec_entity[dir].cc_waiting
182 #ifdef SMP
183 #define cc_migration_func(cc, dir) cc->cc_exec_entity[dir].ce_migration_func
184 #define cc_migration_arg(cc, dir) cc->cc_exec_entity[dir].ce_migration_arg
185 #define cc_migration_cpu(cc, dir) cc->cc_exec_entity[dir].ce_migration_cpu
186 #define cc_migration_time(cc, dir) cc->cc_exec_entity[dir].ce_migration_time
187 #define cc_migration_prec(cc, dir) cc->cc_exec_entity[dir].ce_migration_prec
188
189 struct callout_cpu cc_cpu[MAXCPU];
190 #define CPUBLOCK MAXCPU
191 #define CC_CPU(cpu) (&cc_cpu[(cpu)])
192 #define CC_SELF() CC_CPU(PCPU_GET(cpuid))
193 #else
194 struct callout_cpu cc_cpu;
195 #define CC_CPU(cpu) &cc_cpu
196 #define CC_SELF() &cc_cpu
197 #endif
198 #define CC_LOCK(cc) mtx_lock_spin(&(cc)->cc_lock)
199 #define CC_UNLOCK(cc) mtx_unlock_spin(&(cc)->cc_lock)
200 #define CC_LOCK_ASSERT(cc) mtx_assert(&(cc)->cc_lock, MA_OWNED)
201
202 static int timeout_cpu;
203
204 static void callout_cpu_init(struct callout_cpu *cc, int cpu);
205 static void softclock_call_cc(struct callout *c, struct callout_cpu *cc,
206 #ifdef CALLOUT_PROFILING
207 int *mpcalls, int *lockcalls, int *gcalls,
208 #endif
209 int direct);
210
211 static MALLOC_DEFINE(M_CALLOUT, "callout", "Callout datastructures");
212
213 /**
214 * Locked by cc_lock:
215 * cc_curr - If a callout is in progress, it is cc_curr.
216 * If cc_curr is non-NULL, threads waiting in
217 * callout_drain() will be woken up as soon as the
218 * relevant callout completes.
219 * cc_cancel - Changing to 1 with both callout_lock and cc_lock held
220 * guarantees that the current callout will not run.
221 * The softclock() function sets this to 0 before it
222 * drops callout_lock to acquire c_lock, and it calls
223 * the handler only if curr_cancelled is still 0 after
224 * cc_lock is successfully acquired.
225 * cc_waiting - If a thread is waiting in callout_drain(), then
226 * callout_wait is nonzero. Set only when
227 * cc_curr is non-NULL.
228 */
229
230 /*
231 * Resets the execution entity tied to a specific callout cpu.
232 */
233 static void
234 cc_cce_cleanup(struct callout_cpu *cc, int direct)
235 {
236
237 cc_exec_curr(cc, direct) = NULL;
238 cc_exec_cancel(cc, direct) = false;
239 cc_exec_waiting(cc, direct) = false;
240 #ifdef SMP
241 cc_migration_cpu(cc, direct) = CPUBLOCK;
242 cc_migration_time(cc, direct) = 0;
243 cc_migration_prec(cc, direct) = 0;
244 cc_migration_func(cc, direct) = NULL;
245 cc_migration_arg(cc, direct) = NULL;
246 #endif
247 }
248
249 /*
250 * Checks if migration is requested by a specific callout cpu.
251 */
252 static int
253 cc_cce_migrating(struct callout_cpu *cc, int direct)
254 {
255
256 #ifdef SMP
257 return (cc_migration_cpu(cc, direct) != CPUBLOCK);
258 #else
259 return (0);
260 #endif
261 }
262
263 /*
264 * Kernel low level callwheel initialization
265 * called on cpu0 during kernel startup.
266 */
267 static void
268 callout_callwheel_init(void *dummy)
269 {
270 struct callout_cpu *cc;
271
272 /*
273 * Calculate the size of the callout wheel and the preallocated
274 * timeout() structures.
275 * XXX: Clip callout to result of previous function of maxusers
276 * maximum 384. This is still huge, but acceptable.
277 */
278 memset(CC_CPU(0), 0, sizeof(cc_cpu));
279 ncallout = imin(16 + maxproc + maxfiles, 18508);
280 TUNABLE_INT_FETCH("kern.ncallout", &ncallout);
281
282 /*
283 * Calculate callout wheel size, should be next power of two higher
284 * than 'ncallout'.
285 */
286 callwheelsize = 1 << fls(ncallout);
287 callwheelmask = callwheelsize - 1;
288
289 /*
290 * Fetch whether we're pinning the swi's or not.
291 */
292 TUNABLE_INT_FETCH("kern.pin_default_swi", &pin_default_swi);
293 TUNABLE_INT_FETCH("kern.pin_pcpu_swi", &pin_pcpu_swi);
294
295 /*
296 * Only cpu0 handles timeout(9) and receives a preallocation.
297 *
298 * XXX: Once all timeout(9) consumers are converted this can
299 * be removed.
300 */
301 timeout_cpu = PCPU_GET(cpuid);
302 cc = CC_CPU(timeout_cpu);
303 cc->cc_callout = malloc(ncallout * sizeof(struct callout),
304 M_CALLOUT, M_WAITOK);
305 callout_cpu_init(cc, timeout_cpu);
306 }
307 SYSINIT(callwheel_init, SI_SUB_CPU, SI_ORDER_ANY, callout_callwheel_init, NULL);
308
309 /*
310 * Initialize the per-cpu callout structures.
311 */
312 static void
313 callout_cpu_init(struct callout_cpu *cc, int cpu)
314 {
315 struct callout *c;
316 int i;
317
318 mtx_init(&cc->cc_lock, "callout", NULL, MTX_SPIN | MTX_RECURSE);
319 SLIST_INIT(&cc->cc_callfree);
320 cc->cc_inited = 1;
321 cc->cc_callwheel = malloc(sizeof(struct callout_list) * callwheelsize,
322 M_CALLOUT, M_WAITOK);
323 for (i = 0; i < callwheelsize; i++)
324 LIST_INIT(&cc->cc_callwheel[i]);
325 TAILQ_INIT(&cc->cc_expireq);
326 cc->cc_firstevent = SBT_MAX;
327 for (i = 0; i < 2; i++)
328 cc_cce_cleanup(cc, i);
329 snprintf(cc->cc_ktr_event_name, sizeof(cc->cc_ktr_event_name),
330 "callwheel cpu %d", cpu);
331 if (cc->cc_callout == NULL) /* Only cpu0 handles timeout(9) */
332 return;
333 for (i = 0; i < ncallout; i++) {
334 c = &cc->cc_callout[i];
335 callout_init(c, 0);
336 c->c_iflags = CALLOUT_LOCAL_ALLOC;
337 SLIST_INSERT_HEAD(&cc->cc_callfree, c, c_links.sle);
338 }
339 }
340
341 #ifdef SMP
342 /*
343 * Switches the cpu tied to a specific callout.
344 * The function expects a locked incoming callout cpu and returns with
345 * locked outcoming callout cpu.
346 */
347 static struct callout_cpu *
348 callout_cpu_switch(struct callout *c, struct callout_cpu *cc, int new_cpu)
349 {
350 struct callout_cpu *new_cc;
351
352 MPASS(c != NULL && cc != NULL);
353 CC_LOCK_ASSERT(cc);
354
355 /*
356 * Avoid interrupts and preemption firing after the callout cpu
357 * is blocked in order to avoid deadlocks as the new thread
358 * may be willing to acquire the callout cpu lock.
359 */
360 c->c_cpu = CPUBLOCK;
361 spinlock_enter();
362 CC_UNLOCK(cc);
363 new_cc = CC_CPU(new_cpu);
364 CC_LOCK(new_cc);
365 spinlock_exit();
366 c->c_cpu = new_cpu;
367 return (new_cc);
368 }
369 #endif
370
371 /*
372 * Start standard softclock thread.
373 */
374 static void
375 start_softclock(void *dummy)
376 {
377 struct callout_cpu *cc;
378 char name[MAXCOMLEN];
379 #ifdef SMP
380 int cpu;
381 struct intr_event *ie;
382 #endif
383
384 cc = CC_CPU(timeout_cpu);
385 snprintf(name, sizeof(name), "clock (%d)", timeout_cpu);
386 if (swi_add(&clk_intr_event, name, softclock, cc, SWI_CLOCK,
387 INTR_MPSAFE, &cc->cc_cookie))
388 panic("died while creating standard software ithreads");
389 if (pin_default_swi &&
390 (intr_event_bind(clk_intr_event, timeout_cpu) != 0)) {
391 printf("%s: timeout clock couldn't be pinned to cpu %d\n",
392 __func__,
393 timeout_cpu);
394 }
395
396 #ifdef SMP
397 CPU_FOREACH(cpu) {
398 if (cpu == timeout_cpu)
399 continue;
400 cc = CC_CPU(cpu);
401 cc->cc_callout = NULL; /* Only cpu0 handles timeout(9). */
402 callout_cpu_init(cc, cpu);
403 snprintf(name, sizeof(name), "clock (%d)", cpu);
404 ie = NULL;
405 if (swi_add(&ie, name, softclock, cc, SWI_CLOCK,
406 INTR_MPSAFE, &cc->cc_cookie))
407 panic("died while creating standard software ithreads");
408 if (pin_pcpu_swi && (intr_event_bind(ie, cpu) != 0)) {
409 printf("%s: per-cpu clock couldn't be pinned to "
410 "cpu %d\n",
411 __func__,
412 cpu);
413 }
414 }
415 #endif
416 }
417 SYSINIT(start_softclock, SI_SUB_SOFTINTR, SI_ORDER_FIRST, start_softclock, NULL);
418
419 #define CC_HASH_SHIFT 8
420
421 static inline u_int
422 callout_hash(sbintime_t sbt)
423 {
424
425 return (sbt >> (32 - CC_HASH_SHIFT));
426 }
427
428 static inline u_int
429 callout_get_bucket(sbintime_t sbt)
430 {
431
432 return (callout_hash(sbt) & callwheelmask);
433 }
434
435 void
436 callout_process(sbintime_t now)
437 {
438 struct callout *tmp, *tmpn;
439 struct callout_cpu *cc;
440 struct callout_list *sc;
441 sbintime_t first, last, max, tmp_max;
442 uint32_t lookahead;
443 u_int firstb, lastb, nowb;
444 #ifdef CALLOUT_PROFILING
445 int depth_dir = 0, mpcalls_dir = 0, lockcalls_dir = 0;
446 #endif
447
448 cc = CC_SELF();
449 mtx_lock_spin_flags(&cc->cc_lock, MTX_QUIET);
450
451 /* Compute the buckets of the last scan and present times. */
452 firstb = callout_hash(cc->cc_lastscan);
453 cc->cc_lastscan = now;
454 nowb = callout_hash(now);
455
456 /* Compute the last bucket and minimum time of the bucket after it. */
457 if (nowb == firstb)
458 lookahead = (SBT_1S / 16);
459 else if (nowb - firstb == 1)
460 lookahead = (SBT_1S / 8);
461 else
462 lookahead = (SBT_1S / 2);
463 first = last = now;
464 first += (lookahead / 2);
465 last += lookahead;
466 last &= (0xffffffffffffffffLLU << (32 - CC_HASH_SHIFT));
467 lastb = callout_hash(last) - 1;
468 max = last;
469
470 /*
471 * Check if we wrapped around the entire wheel from the last scan.
472 * In case, we need to scan entirely the wheel for pending callouts.
473 */
474 if (lastb - firstb >= callwheelsize) {
475 lastb = firstb + callwheelsize - 1;
476 if (nowb - firstb >= callwheelsize)
477 nowb = lastb;
478 }
479
480 /* Iterate callwheel from firstb to nowb and then up to lastb. */
481 do {
482 sc = &cc->cc_callwheel[firstb & callwheelmask];
483 tmp = LIST_FIRST(sc);
484 while (tmp != NULL) {
485 /* Run the callout if present time within allowed. */
486 if (tmp->c_time <= now) {
487 /*
488 * Consumer told us the callout may be run
489 * directly from hardware interrupt context.
490 */
491 if (tmp->c_iflags & CALLOUT_DIRECT) {
492 #ifdef CALLOUT_PROFILING
493 ++depth_dir;
494 #endif
495 cc_exec_next(cc) =
496 LIST_NEXT(tmp, c_links.le);
497 cc->cc_bucket = firstb & callwheelmask;
498 LIST_REMOVE(tmp, c_links.le);
499 softclock_call_cc(tmp, cc,
500 #ifdef CALLOUT_PROFILING
501 &mpcalls_dir, &lockcalls_dir, NULL,
502 #endif
503 1);
504 tmp = cc_exec_next(cc);
505 cc_exec_next(cc) = NULL;
506 } else {
507 tmpn = LIST_NEXT(tmp, c_links.le);
508 LIST_REMOVE(tmp, c_links.le);
509 TAILQ_INSERT_TAIL(&cc->cc_expireq,
510 tmp, c_links.tqe);
511 tmp->c_iflags |= CALLOUT_PROCESSED;
512 tmp = tmpn;
513 }
514 continue;
515 }
516 /* Skip events from distant future. */
517 if (tmp->c_time >= max)
518 goto next;
519 /*
520 * Event minimal time is bigger than present maximal
521 * time, so it cannot be aggregated.
522 */
523 if (tmp->c_time > last) {
524 lastb = nowb;
525 goto next;
526 }
527 /* Update first and last time, respecting this event. */
528 if (tmp->c_time < first)
529 first = tmp->c_time;
530 tmp_max = tmp->c_time + tmp->c_precision;
531 if (tmp_max < last)
532 last = tmp_max;
533 next:
534 tmp = LIST_NEXT(tmp, c_links.le);
535 }
536 /* Proceed with the next bucket. */
537 firstb++;
538 /*
539 * Stop if we looked after present time and found
540 * some event we can't execute at now.
541 * Stop if we looked far enough into the future.
542 */
543 } while (((int)(firstb - lastb)) <= 0);
544 cc->cc_firstevent = last;
545 #ifndef NO_EVENTTIMERS
546 cpu_new_callout(curcpu, last, first);
547 #endif
548 #ifdef CALLOUT_PROFILING
549 avg_depth_dir += (depth_dir * 1000 - avg_depth_dir) >> 8;
550 avg_mpcalls_dir += (mpcalls_dir * 1000 - avg_mpcalls_dir) >> 8;
551 avg_lockcalls_dir += (lockcalls_dir * 1000 - avg_lockcalls_dir) >> 8;
552 #endif
553 mtx_unlock_spin_flags(&cc->cc_lock, MTX_QUIET);
554 /*
555 * swi_sched acquires the thread lock, so we don't want to call it
556 * with cc_lock held; incorrect locking order.
557 */
558 if (!TAILQ_EMPTY(&cc->cc_expireq))
559 swi_sched(cc->cc_cookie, 0);
560 }
561
562 static struct callout_cpu *
563 callout_lock(struct callout *c)
564 {
565 struct callout_cpu *cc;
566 int cpu;
567
568 for (;;) {
569 cpu = c->c_cpu;
570 #ifdef SMP
571 if (cpu == CPUBLOCK) {
572 while (c->c_cpu == CPUBLOCK)
573 cpu_spinwait();
574 continue;
575 }
576 #endif
577 cc = CC_CPU(cpu);
578 CC_LOCK(cc);
579 if (cpu == c->c_cpu)
580 break;
581 CC_UNLOCK(cc);
582 }
583 return (cc);
584 }
585
586 static void
587 callout_cc_add(struct callout *c, struct callout_cpu *cc,
588 sbintime_t sbt, sbintime_t precision, void (*func)(void *),
589 void *arg, int cpu, int flags)
590 {
591 int bucket;
592
593 CC_LOCK_ASSERT(cc);
594 if (sbt < cc->cc_lastscan)
595 sbt = cc->cc_lastscan;
596 c->c_arg = arg;
597 c->c_iflags |= CALLOUT_PENDING;
598 c->c_iflags &= ~CALLOUT_PROCESSED;
599 c->c_flags |= CALLOUT_ACTIVE;
600 if (flags & C_DIRECT_EXEC)
601 c->c_iflags |= CALLOUT_DIRECT;
602 c->c_func = func;
603 c->c_time = sbt;
604 c->c_precision = precision;
605 bucket = callout_get_bucket(c->c_time);
606 CTR3(KTR_CALLOUT, "precision set for %p: %d.%08x",
607 c, (int)(c->c_precision >> 32),
608 (u_int)(c->c_precision & 0xffffffff));
609 LIST_INSERT_HEAD(&cc->cc_callwheel[bucket], c, c_links.le);
610 if (cc->cc_bucket == bucket)
611 cc_exec_next(cc) = c;
612 #ifndef NO_EVENTTIMERS
613 /*
614 * Inform the eventtimers(4) subsystem there's a new callout
615 * that has been inserted, but only if really required.
616 */
617 if (SBT_MAX - c->c_time < c->c_precision)
618 c->c_precision = SBT_MAX - c->c_time;
619 sbt = c->c_time + c->c_precision;
620 if (sbt < cc->cc_firstevent) {
621 cc->cc_firstevent = sbt;
622 cpu_new_callout(cpu, sbt, c->c_time);
623 }
624 #endif
625 }
626
627 static void
628 callout_cc_del(struct callout *c, struct callout_cpu *cc)
629 {
630
631 if ((c->c_iflags & CALLOUT_LOCAL_ALLOC) == 0)
632 return;
633 c->c_func = NULL;
634 SLIST_INSERT_HEAD(&cc->cc_callfree, c, c_links.sle);
635 }
636
637 static void
638 softclock_call_cc(struct callout *c, struct callout_cpu *cc,
639 #ifdef CALLOUT_PROFILING
640 int *mpcalls, int *lockcalls, int *gcalls,
641 #endif
642 int direct)
643 {
644 struct rm_priotracker tracker;
645 void (*c_func)(void *);
646 void *c_arg;
647 struct lock_class *class;
648 struct lock_object *c_lock;
649 uintptr_t lock_status;
650 int c_iflags;
651 #ifdef SMP
652 struct callout_cpu *new_cc;
653 void (*new_func)(void *);
654 void *new_arg;
655 int flags, new_cpu;
656 sbintime_t new_prec, new_time;
657 #endif
658 #if defined(DIAGNOSTIC) || defined(CALLOUT_PROFILING)
659 sbintime_t sbt1, sbt2;
660 struct timespec ts2;
661 static sbintime_t maxdt = 2 * SBT_1MS; /* 2 msec */
662 static timeout_t *lastfunc;
663 #endif
664
665 KASSERT((c->c_iflags & CALLOUT_PENDING) == CALLOUT_PENDING,
666 ("softclock_call_cc: pend %p %x", c, c->c_iflags));
667 KASSERT((c->c_flags & CALLOUT_ACTIVE) == CALLOUT_ACTIVE,
668 ("softclock_call_cc: act %p %x", c, c->c_flags));
669 class = (c->c_lock != NULL) ? LOCK_CLASS(c->c_lock) : NULL;
670 lock_status = 0;
671 if (c->c_flags & CALLOUT_SHAREDLOCK) {
672 if (class == &lock_class_rm)
673 lock_status = (uintptr_t)&tracker;
674 else
675 lock_status = 1;
676 }
677 c_lock = c->c_lock;
678 c_func = c->c_func;
679 c_arg = c->c_arg;
680 c_iflags = c->c_iflags;
681 if (c->c_iflags & CALLOUT_LOCAL_ALLOC)
682 c->c_iflags = CALLOUT_LOCAL_ALLOC;
683 else
684 c->c_iflags &= ~CALLOUT_PENDING;
685
686 cc_exec_curr(cc, direct) = c;
687 cc_exec_cancel(cc, direct) = false;
688 cc_exec_drain(cc, direct) = NULL;
689 CC_UNLOCK(cc);
690 if (c_lock != NULL) {
691 class->lc_lock(c_lock, lock_status);
692 /*
693 * The callout may have been cancelled
694 * while we switched locks.
695 */
696 if (cc_exec_cancel(cc, direct)) {
697 class->lc_unlock(c_lock);
698 goto skip;
699 }
700 /* The callout cannot be stopped now. */
701 cc_exec_cancel(cc, direct) = true;
702 if (c_lock == &Giant.lock_object) {
703 #ifdef CALLOUT_PROFILING
704 (*gcalls)++;
705 #endif
706 CTR3(KTR_CALLOUT, "callout giant %p func %p arg %p",
707 c, c_func, c_arg);
708 } else {
709 #ifdef CALLOUT_PROFILING
710 (*lockcalls)++;
711 #endif
712 CTR3(KTR_CALLOUT, "callout lock %p func %p arg %p",
713 c, c_func, c_arg);
714 }
715 } else {
716 #ifdef CALLOUT_PROFILING
717 (*mpcalls)++;
718 #endif
719 CTR3(KTR_CALLOUT, "callout %p func %p arg %p",
720 c, c_func, c_arg);
721 }
722 KTR_STATE3(KTR_SCHED, "callout", cc->cc_ktr_event_name, "running",
723 "func:%p", c_func, "arg:%p", c_arg, "direct:%d", direct);
724 #if defined(DIAGNOSTIC) || defined(CALLOUT_PROFILING)
725 sbt1 = sbinuptime();
726 #endif
727 THREAD_NO_SLEEPING();
728 SDT_PROBE1(callout_execute, , , callout__start, c);
729 c_func(c_arg);
730 SDT_PROBE1(callout_execute, , , callout__end, c);
731 THREAD_SLEEPING_OK();
732 #if defined(DIAGNOSTIC) || defined(CALLOUT_PROFILING)
733 sbt2 = sbinuptime();
734 sbt2 -= sbt1;
735 if (sbt2 > maxdt) {
736 if (lastfunc != c_func || sbt2 > maxdt * 2) {
737 ts2 = sbttots(sbt2);
738 printf(
739 "Expensive timeout(9) function: %p(%p) %jd.%09ld s\n",
740 c_func, c_arg, (intmax_t)ts2.tv_sec, ts2.tv_nsec);
741 }
742 maxdt = sbt2;
743 lastfunc = c_func;
744 }
745 #endif
746 KTR_STATE0(KTR_SCHED, "callout", cc->cc_ktr_event_name, "idle");
747 CTR1(KTR_CALLOUT, "callout %p finished", c);
748 if ((c_iflags & CALLOUT_RETURNUNLOCKED) == 0)
749 class->lc_unlock(c_lock);
750 skip:
751 CC_LOCK(cc);
752 KASSERT(cc_exec_curr(cc, direct) == c, ("mishandled cc_curr"));
753 cc_exec_curr(cc, direct) = NULL;
754 if (cc_exec_drain(cc, direct)) {
755 void (*drain)(void *);
756
757 drain = cc_exec_drain(cc, direct);
758 cc_exec_drain(cc, direct) = NULL;
759 CC_UNLOCK(cc);
760 drain(c_arg);
761 CC_LOCK(cc);
762 }
763 if (cc_exec_waiting(cc, direct)) {
764 /*
765 * There is someone waiting for the
766 * callout to complete.
767 * If the callout was scheduled for
768 * migration just cancel it.
769 */
770 if (cc_cce_migrating(cc, direct)) {
771 cc_cce_cleanup(cc, direct);
772
773 /*
774 * It should be assert here that the callout is not
775 * destroyed but that is not easy.
776 */
777 c->c_iflags &= ~CALLOUT_DFRMIGRATION;
778 }
779 cc_exec_waiting(cc, direct) = false;
780 CC_UNLOCK(cc);
781 wakeup(&cc_exec_waiting(cc, direct));
782 CC_LOCK(cc);
783 } else if (cc_cce_migrating(cc, direct)) {
784 KASSERT((c_iflags & CALLOUT_LOCAL_ALLOC) == 0,
785 ("Migrating legacy callout %p", c));
786 #ifdef SMP
787 /*
788 * If the callout was scheduled for
789 * migration just perform it now.
790 */
791 new_cpu = cc_migration_cpu(cc, direct);
792 new_time = cc_migration_time(cc, direct);
793 new_prec = cc_migration_prec(cc, direct);
794 new_func = cc_migration_func(cc, direct);
795 new_arg = cc_migration_arg(cc, direct);
796 cc_cce_cleanup(cc, direct);
797
798 /*
799 * It should be assert here that the callout is not destroyed
800 * but that is not easy.
801 *
802 * As first thing, handle deferred callout stops.
803 */
804 if (!callout_migrating(c)) {
805 CTR3(KTR_CALLOUT,
806 "deferred cancelled %p func %p arg %p",
807 c, new_func, new_arg);
808 callout_cc_del(c, cc);
809 return;
810 }
811 c->c_iflags &= ~CALLOUT_DFRMIGRATION;
812
813 new_cc = callout_cpu_switch(c, cc, new_cpu);
814 flags = (direct) ? C_DIRECT_EXEC : 0;
815 callout_cc_add(c, new_cc, new_time, new_prec, new_func,
816 new_arg, new_cpu, flags);
817 CC_UNLOCK(new_cc);
818 CC_LOCK(cc);
819 #else
820 panic("migration should not happen");
821 #endif
822 }
823 /*
824 * If the current callout is locally allocated (from
825 * timeout(9)) then put it on the freelist.
826 *
827 * Note: we need to check the cached copy of c_iflags because
828 * if it was not local, then it's not safe to deref the
829 * callout pointer.
830 */
831 KASSERT((c_iflags & CALLOUT_LOCAL_ALLOC) == 0 ||
832 c->c_iflags == CALLOUT_LOCAL_ALLOC,
833 ("corrupted callout"));
834 if (c_iflags & CALLOUT_LOCAL_ALLOC)
835 callout_cc_del(c, cc);
836 }
837
838 /*
839 * The callout mechanism is based on the work of Adam M. Costello and
840 * George Varghese, published in a technical report entitled "Redesigning
841 * the BSD Callout and Timer Facilities" and modified slightly for inclusion
842 * in FreeBSD by Justin T. Gibbs. The original work on the data structures
843 * used in this implementation was published by G. Varghese and T. Lauck in
844 * the paper "Hashed and Hierarchical Timing Wheels: Data Structures for
845 * the Efficient Implementation of a Timer Facility" in the Proceedings of
846 * the 11th ACM Annual Symposium on Operating Systems Principles,
847 * Austin, Texas Nov 1987.
848 */
849
850 /*
851 * Software (low priority) clock interrupt.
852 * Run periodic events from timeout queue.
853 */
854 void
855 softclock(void *arg)
856 {
857 struct callout_cpu *cc;
858 struct callout *c;
859 #ifdef CALLOUT_PROFILING
860 int depth = 0, gcalls = 0, lockcalls = 0, mpcalls = 0;
861 #endif
862
863 cc = (struct callout_cpu *)arg;
864 CC_LOCK(cc);
865 while ((c = TAILQ_FIRST(&cc->cc_expireq)) != NULL) {
866 TAILQ_REMOVE(&cc->cc_expireq, c, c_links.tqe);
867 softclock_call_cc(c, cc,
868 #ifdef CALLOUT_PROFILING
869 &mpcalls, &lockcalls, &gcalls,
870 #endif
871 0);
872 #ifdef CALLOUT_PROFILING
873 ++depth;
874 #endif
875 }
876 #ifdef CALLOUT_PROFILING
877 avg_depth += (depth * 1000 - avg_depth) >> 8;
878 avg_mpcalls += (mpcalls * 1000 - avg_mpcalls) >> 8;
879 avg_lockcalls += (lockcalls * 1000 - avg_lockcalls) >> 8;
880 avg_gcalls += (gcalls * 1000 - avg_gcalls) >> 8;
881 #endif
882 CC_UNLOCK(cc);
883 }
884
885 /*
886 * timeout --
887 * Execute a function after a specified length of time.
888 *
889 * untimeout --
890 * Cancel previous timeout function call.
891 *
892 * callout_handle_init --
893 * Initialize a handle so that using it with untimeout is benign.
894 *
895 * See AT&T BCI Driver Reference Manual for specification. This
896 * implementation differs from that one in that although an
897 * identification value is returned from timeout, the original
898 * arguments to timeout as well as the identifier are used to
899 * identify entries for untimeout.
900 */
901 struct callout_handle
902 timeout(timeout_t *ftn, void *arg, int to_ticks)
903 {
904 struct callout_cpu *cc;
905 struct callout *new;
906 struct callout_handle handle;
907
908 cc = CC_CPU(timeout_cpu);
909 CC_LOCK(cc);
910 /* Fill in the next free callout structure. */
911 new = SLIST_FIRST(&cc->cc_callfree);
912 if (new == NULL)
913 /* XXX Attempt to malloc first */
914 panic("timeout table full");
915 SLIST_REMOVE_HEAD(&cc->cc_callfree, c_links.sle);
916 callout_reset(new, to_ticks, ftn, arg);
917 handle.callout = new;
918 CC_UNLOCK(cc);
919
920 return (handle);
921 }
922
923 void
924 untimeout(timeout_t *ftn, void *arg, struct callout_handle handle)
925 {
926 struct callout_cpu *cc;
927
928 /*
929 * Check for a handle that was initialized
930 * by callout_handle_init, but never used
931 * for a real timeout.
932 */
933 if (handle.callout == NULL)
934 return;
935
936 cc = callout_lock(handle.callout);
937 if (handle.callout->c_func == ftn && handle.callout->c_arg == arg)
938 callout_stop(handle.callout);
939 CC_UNLOCK(cc);
940 }
941
942 void
943 callout_handle_init(struct callout_handle *handle)
944 {
945 handle->callout = NULL;
946 }
947
948 void
949 callout_when(sbintime_t sbt, sbintime_t precision, int flags,
950 sbintime_t *res, sbintime_t *prec_res)
951 {
952 sbintime_t to_sbt, to_pr;
953
954 if ((flags & (C_ABSOLUTE | C_PRECALC)) != 0) {
955 *res = sbt;
956 *prec_res = precision;
957 return;
958 }
959 if ((flags & C_HARDCLOCK) != 0 && sbt < tick_sbt)
960 sbt = tick_sbt;
961 if ((flags & C_HARDCLOCK) != 0 ||
962 #ifdef NO_EVENTTIMERS
963 sbt >= sbt_timethreshold) {
964 to_sbt = getsbinuptime();
965
966 /* Add safety belt for the case of hz > 1000. */
967 to_sbt += tc_tick_sbt - tick_sbt;
968 #else
969 sbt >= sbt_tickthreshold) {
970 /*
971 * Obtain the time of the last hardclock() call on
972 * this CPU directly from the kern_clocksource.c.
973 * This value is per-CPU, but it is equal for all
974 * active ones.
975 */
976 #ifdef __LP64__
977 to_sbt = DPCPU_GET(hardclocktime);
978 #else
979 spinlock_enter();
980 to_sbt = DPCPU_GET(hardclocktime);
981 spinlock_exit();
982 #endif
983 #endif
984 if (cold && to_sbt == 0)
985 to_sbt = sbinuptime();
986 if ((flags & C_HARDCLOCK) == 0)
987 to_sbt += tick_sbt;
988 } else
989 to_sbt = sbinuptime();
990 if (SBT_MAX - to_sbt < sbt)
991 to_sbt = SBT_MAX;
992 else
993 to_sbt += sbt;
994 *res = to_sbt;
995 to_pr = ((C_PRELGET(flags) < 0) ? sbt >> tc_precexp :
996 sbt >> C_PRELGET(flags));
997 *prec_res = to_pr > precision ? to_pr : precision;
998 }
999
1000 /*
1001 * New interface; clients allocate their own callout structures.
1002 *
1003 * callout_reset() - establish or change a timeout
1004 * callout_stop() - disestablish a timeout
1005 * callout_init() - initialize a callout structure so that it can
1006 * safely be passed to callout_reset() and callout_stop()
1007 *
1008 * <sys/callout.h> defines three convenience macros:
1009 *
1010 * callout_active() - returns truth if callout has not been stopped,
1011 * drained, or deactivated since the last time the callout was
1012 * reset.
1013 * callout_pending() - returns truth if callout is still waiting for timeout
1014 * callout_deactivate() - marks the callout as having been serviced
1015 */
1016 int
1017 callout_reset_sbt_on(struct callout *c, sbintime_t sbt, sbintime_t prec,
1018 void (*ftn)(void *), void *arg, int cpu, int flags)
1019 {
1020 sbintime_t to_sbt, precision;
1021 struct callout_cpu *cc;
1022 int cancelled, direct;
1023 int ignore_cpu=0;
1024
1025 cancelled = 0;
1026 if (cpu == -1) {
1027 ignore_cpu = 1;
1028 } else if ((cpu >= MAXCPU) ||
1029 ((CC_CPU(cpu))->cc_inited == 0)) {
1030 /* Invalid CPU spec */
1031 panic("Invalid CPU in callout %d", cpu);
1032 }
1033 callout_when(sbt, prec, flags, &to_sbt, &precision);
1034
1035 /*
1036 * This flag used to be added by callout_cc_add, but the
1037 * first time you call this we could end up with the
1038 * wrong direct flag if we don't do it before we add.
1039 */
1040 if (flags & C_DIRECT_EXEC) {
1041 direct = 1;
1042 } else {
1043 direct = 0;
1044 }
1045 KASSERT(!direct || c->c_lock == NULL,
1046 ("%s: direct callout %p has lock", __func__, c));
1047 cc = callout_lock(c);
1048 /*
1049 * Don't allow migration of pre-allocated callouts lest they
1050 * become unbalanced or handle the case where the user does
1051 * not care.
1052 */
1053 if ((c->c_iflags & CALLOUT_LOCAL_ALLOC) ||
1054 ignore_cpu) {
1055 cpu = c->c_cpu;
1056 }
1057
1058 if (cc_exec_curr(cc, direct) == c) {
1059 /*
1060 * We're being asked to reschedule a callout which is
1061 * currently in progress. If there is a lock then we
1062 * can cancel the callout if it has not really started.
1063 */
1064 if (c->c_lock != NULL && !cc_exec_cancel(cc, direct))
1065 cancelled = cc_exec_cancel(cc, direct) = true;
1066 if (cc_exec_waiting(cc, direct)) {
1067 /*
1068 * Someone has called callout_drain to kill this
1069 * callout. Don't reschedule.
1070 */
1071 CTR4(KTR_CALLOUT, "%s %p func %p arg %p",
1072 cancelled ? "cancelled" : "failed to cancel",
1073 c, c->c_func, c->c_arg);
1074 CC_UNLOCK(cc);
1075 return (cancelled);
1076 }
1077 #ifdef SMP
1078 if (callout_migrating(c)) {
1079 /*
1080 * This only occurs when a second callout_reset_sbt_on
1081 * is made after a previous one moved it into
1082 * deferred migration (below). Note we do *not* change
1083 * the prev_cpu even though the previous target may
1084 * be different.
1085 */
1086 cc_migration_cpu(cc, direct) = cpu;
1087 cc_migration_time(cc, direct) = to_sbt;
1088 cc_migration_prec(cc, direct) = precision;
1089 cc_migration_func(cc, direct) = ftn;
1090 cc_migration_arg(cc, direct) = arg;
1091 cancelled = 1;
1092 CC_UNLOCK(cc);
1093 return (cancelled);
1094 }
1095 #endif
1096 }
1097 if (c->c_iflags & CALLOUT_PENDING) {
1098 if ((c->c_iflags & CALLOUT_PROCESSED) == 0) {
1099 if (cc_exec_next(cc) == c)
1100 cc_exec_next(cc) = LIST_NEXT(c, c_links.le);
1101 LIST_REMOVE(c, c_links.le);
1102 } else {
1103 TAILQ_REMOVE(&cc->cc_expireq, c, c_links.tqe);
1104 }
1105 cancelled = 1;
1106 c->c_iflags &= ~ CALLOUT_PENDING;
1107 c->c_flags &= ~ CALLOUT_ACTIVE;
1108 }
1109
1110 #ifdef SMP
1111 /*
1112 * If the callout must migrate try to perform it immediately.
1113 * If the callout is currently running, just defer the migration
1114 * to a more appropriate moment.
1115 */
1116 if (c->c_cpu != cpu) {
1117 if (cc_exec_curr(cc, direct) == c) {
1118 /*
1119 * Pending will have been removed since we are
1120 * actually executing the callout on another
1121 * CPU. That callout should be waiting on the
1122 * lock the caller holds. If we set both
1123 * active/and/pending after we return and the
1124 * lock on the executing callout proceeds, it
1125 * will then see pending is true and return.
1126 * At the return from the actual callout execution
1127 * the migration will occur in softclock_call_cc
1128 * and this new callout will be placed on the
1129 * new CPU via a call to callout_cpu_switch() which
1130 * will get the lock on the right CPU followed
1131 * by a call callout_cc_add() which will add it there.
1132 * (see above in softclock_call_cc()).
1133 */
1134 cc_migration_cpu(cc, direct) = cpu;
1135 cc_migration_time(cc, direct) = to_sbt;
1136 cc_migration_prec(cc, direct) = precision;
1137 cc_migration_func(cc, direct) = ftn;
1138 cc_migration_arg(cc, direct) = arg;
1139 c->c_iflags |= (CALLOUT_DFRMIGRATION | CALLOUT_PENDING);
1140 c->c_flags |= CALLOUT_ACTIVE;
1141 CTR6(KTR_CALLOUT,
1142 "migration of %p func %p arg %p in %d.%08x to %u deferred",
1143 c, c->c_func, c->c_arg, (int)(to_sbt >> 32),
1144 (u_int)(to_sbt & 0xffffffff), cpu);
1145 CC_UNLOCK(cc);
1146 return (cancelled);
1147 }
1148 cc = callout_cpu_switch(c, cc, cpu);
1149 }
1150 #endif
1151
1152 callout_cc_add(c, cc, to_sbt, precision, ftn, arg, cpu, flags);
1153 CTR6(KTR_CALLOUT, "%sscheduled %p func %p arg %p in %d.%08x",
1154 cancelled ? "re" : "", c, c->c_func, c->c_arg, (int)(to_sbt >> 32),
1155 (u_int)(to_sbt & 0xffffffff));
1156 CC_UNLOCK(cc);
1157
1158 return (cancelled);
1159 }
1160
1161 /*
1162 * Common idioms that can be optimized in the future.
1163 */
1164 int
1165 callout_schedule_on(struct callout *c, int to_ticks, int cpu)
1166 {
1167 return callout_reset_on(c, to_ticks, c->c_func, c->c_arg, cpu);
1168 }
1169
1170 int
1171 callout_schedule(struct callout *c, int to_ticks)
1172 {
1173 return callout_reset_on(c, to_ticks, c->c_func, c->c_arg, c->c_cpu);
1174 }
1175
1176 int
1177 _callout_stop_safe(struct callout *c, int flags, void (*drain)(void *))
1178 {
1179 struct callout_cpu *cc, *old_cc;
1180 struct lock_class *class;
1181 int direct, sq_locked, use_lock;
1182 int cancelled, not_on_a_list;
1183
1184 if ((flags & CS_DRAIN) != 0)
1185 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, c->c_lock,
1186 "calling %s", __func__);
1187
1188 /*
1189 * Some old subsystems don't hold Giant while running a callout_stop(),
1190 * so just discard this check for the moment.
1191 */
1192 if ((flags & CS_DRAIN) == 0 && c->c_lock != NULL) {
1193 if (c->c_lock == &Giant.lock_object)
1194 use_lock = mtx_owned(&Giant);
1195 else {
1196 use_lock = 1;
1197 class = LOCK_CLASS(c->c_lock);
1198 class->lc_assert(c->c_lock, LA_XLOCKED);
1199 }
1200 } else
1201 use_lock = 0;
1202 if (c->c_iflags & CALLOUT_DIRECT) {
1203 direct = 1;
1204 } else {
1205 direct = 0;
1206 }
1207 sq_locked = 0;
1208 old_cc = NULL;
1209 again:
1210 cc = callout_lock(c);
1211
1212 if ((c->c_iflags & (CALLOUT_DFRMIGRATION | CALLOUT_PENDING)) ==
1213 (CALLOUT_DFRMIGRATION | CALLOUT_PENDING) &&
1214 ((c->c_flags & CALLOUT_ACTIVE) == CALLOUT_ACTIVE)) {
1215 /*
1216 * Special case where this slipped in while we
1217 * were migrating *as* the callout is about to
1218 * execute. The caller probably holds the lock
1219 * the callout wants.
1220 *
1221 * Get rid of the migration first. Then set
1222 * the flag that tells this code *not* to
1223 * try to remove it from any lists (its not
1224 * on one yet). When the callout wheel runs,
1225 * it will ignore this callout.
1226 */
1227 c->c_iflags &= ~CALLOUT_PENDING;
1228 c->c_flags &= ~CALLOUT_ACTIVE;
1229 not_on_a_list = 1;
1230 } else {
1231 not_on_a_list = 0;
1232 }
1233
1234 /*
1235 * If the callout was migrating while the callout cpu lock was
1236 * dropped, just drop the sleepqueue lock and check the states
1237 * again.
1238 */
1239 if (sq_locked != 0 && cc != old_cc) {
1240 #ifdef SMP
1241 CC_UNLOCK(cc);
1242 sleepq_release(&cc_exec_waiting(old_cc, direct));
1243 sq_locked = 0;
1244 old_cc = NULL;
1245 goto again;
1246 #else
1247 panic("migration should not happen");
1248 #endif
1249 }
1250
1251 /*
1252 * If the callout is running, try to stop it or drain it.
1253 */
1254 if (cc_exec_curr(cc, direct) == c) {
1255 /*
1256 * Succeed we to stop it or not, we must clear the
1257 * active flag - this is what API users expect. If we're
1258 * draining and the callout is currently executing, first wait
1259 * until it finishes.
1260 */
1261 if ((flags & CS_DRAIN) == 0)
1262 c->c_flags &= ~CALLOUT_ACTIVE;
1263
1264 if ((flags & CS_DRAIN) != 0) {
1265 /*
1266 * The current callout is running (or just
1267 * about to run) and blocking is allowed, so
1268 * just wait for the current invocation to
1269 * finish.
1270 */
1271 while (cc_exec_curr(cc, direct) == c) {
1272 /*
1273 * Use direct calls to sleepqueue interface
1274 * instead of cv/msleep in order to avoid
1275 * a LOR between cc_lock and sleepqueue
1276 * chain spinlocks. This piece of code
1277 * emulates a msleep_spin() call actually.
1278 *
1279 * If we already have the sleepqueue chain
1280 * locked, then we can safely block. If we
1281 * don't already have it locked, however,
1282 * we have to drop the cc_lock to lock
1283 * it. This opens several races, so we
1284 * restart at the beginning once we have
1285 * both locks. If nothing has changed, then
1286 * we will end up back here with sq_locked
1287 * set.
1288 */
1289 if (!sq_locked) {
1290 CC_UNLOCK(cc);
1291 sleepq_lock(
1292 &cc_exec_waiting(cc, direct));
1293 sq_locked = 1;
1294 old_cc = cc;
1295 goto again;
1296 }
1297
1298 /*
1299 * Migration could be cancelled here, but
1300 * as long as it is still not sure when it
1301 * will be packed up, just let softclock()
1302 * take care of it.
1303 */
1304 cc_exec_waiting(cc, direct) = true;
1305 DROP_GIANT();
1306 CC_UNLOCK(cc);
1307 sleepq_add(
1308 &cc_exec_waiting(cc, direct),
1309 &cc->cc_lock.lock_object, "codrain",
1310 SLEEPQ_SLEEP, 0);
1311 sleepq_wait(
1312 &cc_exec_waiting(cc, direct),
1313 0);
1314 sq_locked = 0;
1315 old_cc = NULL;
1316
1317 /* Reacquire locks previously released. */
1318 PICKUP_GIANT();
1319 CC_LOCK(cc);
1320 }
1321 c->c_flags &= ~CALLOUT_ACTIVE;
1322 } else if (use_lock &&
1323 !cc_exec_cancel(cc, direct) && (drain == NULL)) {
1324
1325 /*
1326 * The current callout is waiting for its
1327 * lock which we hold. Cancel the callout
1328 * and return. After our caller drops the
1329 * lock, the callout will be skipped in
1330 * softclock(). This *only* works with a
1331 * callout_stop() *not* callout_drain() or
1332 * callout_async_drain().
1333 */
1334 cc_exec_cancel(cc, direct) = true;
1335 CTR3(KTR_CALLOUT, "cancelled %p func %p arg %p",
1336 c, c->c_func, c->c_arg);
1337 KASSERT(!cc_cce_migrating(cc, direct),
1338 ("callout wrongly scheduled for migration"));
1339 if (callout_migrating(c)) {
1340 c->c_iflags &= ~CALLOUT_DFRMIGRATION;
1341 #ifdef SMP
1342 cc_migration_cpu(cc, direct) = CPUBLOCK;
1343 cc_migration_time(cc, direct) = 0;
1344 cc_migration_prec(cc, direct) = 0;
1345 cc_migration_func(cc, direct) = NULL;
1346 cc_migration_arg(cc, direct) = NULL;
1347 #endif
1348 }
1349 CC_UNLOCK(cc);
1350 KASSERT(!sq_locked, ("sleepqueue chain locked"));
1351 return (1);
1352 } else if (callout_migrating(c)) {
1353 /*
1354 * The callout is currently being serviced
1355 * and the "next" callout is scheduled at
1356 * its completion with a migration. We remove
1357 * the migration flag so it *won't* get rescheduled,
1358 * but we can't stop the one thats running so
1359 * we return 0.
1360 */
1361 c->c_iflags &= ~CALLOUT_DFRMIGRATION;
1362 #ifdef SMP
1363 /*
1364 * We can't call cc_cce_cleanup here since
1365 * if we do it will remove .ce_curr and
1366 * its still running. This will prevent a
1367 * reschedule of the callout when the
1368 * execution completes.
1369 */
1370 cc_migration_cpu(cc, direct) = CPUBLOCK;
1371 cc_migration_time(cc, direct) = 0;
1372 cc_migration_prec(cc, direct) = 0;
1373 cc_migration_func(cc, direct) = NULL;
1374 cc_migration_arg(cc, direct) = NULL;
1375 #endif
1376 CTR3(KTR_CALLOUT, "postponing stop %p func %p arg %p",
1377 c, c->c_func, c->c_arg);
1378 if (drain) {
1379 cc_exec_drain(cc, direct) = drain;
1380 }
1381 CC_UNLOCK(cc);
1382 return ((flags & CS_EXECUTING) != 0);
1383 }
1384 CTR3(KTR_CALLOUT, "failed to stop %p func %p arg %p",
1385 c, c->c_func, c->c_arg);
1386 if (drain) {
1387 cc_exec_drain(cc, direct) = drain;
1388 }
1389 KASSERT(!sq_locked, ("sleepqueue chain still locked"));
1390 cancelled = ((flags & CS_EXECUTING) != 0);
1391 } else
1392 cancelled = 1;
1393
1394 if (sq_locked)
1395 sleepq_release(&cc_exec_waiting(cc, direct));
1396
1397 if ((c->c_iflags & CALLOUT_PENDING) == 0) {
1398 CTR3(KTR_CALLOUT, "failed to stop %p func %p arg %p",
1399 c, c->c_func, c->c_arg);
1400 /*
1401 * For not scheduled and not executing callout return
1402 * negative value.
1403 */
1404 if (cc_exec_curr(cc, direct) != c)
1405 cancelled = -1;
1406 CC_UNLOCK(cc);
1407 return (cancelled);
1408 }
1409
1410 c->c_iflags &= ~CALLOUT_PENDING;
1411 c->c_flags &= ~CALLOUT_ACTIVE;
1412
1413 CTR3(KTR_CALLOUT, "cancelled %p func %p arg %p",
1414 c, c->c_func, c->c_arg);
1415 if (not_on_a_list == 0) {
1416 if ((c->c_iflags & CALLOUT_PROCESSED) == 0) {
1417 if (cc_exec_next(cc) == c)
1418 cc_exec_next(cc) = LIST_NEXT(c, c_links.le);
1419 LIST_REMOVE(c, c_links.le);
1420 } else {
1421 TAILQ_REMOVE(&cc->cc_expireq, c, c_links.tqe);
1422 }
1423 }
1424 callout_cc_del(c, cc);
1425 CC_UNLOCK(cc);
1426 return (cancelled);
1427 }
1428
1429 void
1430 callout_init(struct callout *c, int mpsafe)
1431 {
1432 bzero(c, sizeof *c);
1433 if (mpsafe) {
1434 c->c_lock = NULL;
1435 c->c_iflags = CALLOUT_RETURNUNLOCKED;
1436 } else {
1437 c->c_lock = &Giant.lock_object;
1438 c->c_iflags = 0;
1439 }
1440 c->c_cpu = timeout_cpu;
1441 }
1442
1443 void
1444 _callout_init_lock(struct callout *c, struct lock_object *lock, int flags)
1445 {
1446 bzero(c, sizeof *c);
1447 c->c_lock = lock;
1448 KASSERT((flags & ~(CALLOUT_RETURNUNLOCKED | CALLOUT_SHAREDLOCK)) == 0,
1449 ("callout_init_lock: bad flags %d", flags));
1450 KASSERT(lock != NULL || (flags & CALLOUT_RETURNUNLOCKED) == 0,
1451 ("callout_init_lock: CALLOUT_RETURNUNLOCKED with no lock"));
1452 KASSERT(lock == NULL || !(LOCK_CLASS(lock)->lc_flags &
1453 (LC_SPINLOCK | LC_SLEEPABLE)), ("%s: invalid lock class",
1454 __func__));
1455 c->c_iflags = flags & (CALLOUT_RETURNUNLOCKED | CALLOUT_SHAREDLOCK);
1456 c->c_cpu = timeout_cpu;
1457 }
1458
1459 #ifdef APM_FIXUP_CALLTODO
1460 /*
1461 * Adjust the kernel calltodo timeout list. This routine is used after
1462 * an APM resume to recalculate the calltodo timer list values with the
1463 * number of hz's we have been sleeping. The next hardclock() will detect
1464 * that there are fired timers and run softclock() to execute them.
1465 *
1466 * Please note, I have not done an exhaustive analysis of what code this
1467 * might break. I am motivated to have my select()'s and alarm()'s that
1468 * have expired during suspend firing upon resume so that the applications
1469 * which set the timer can do the maintanence the timer was for as close
1470 * as possible to the originally intended time. Testing this code for a
1471 * week showed that resuming from a suspend resulted in 22 to 25 timers
1472 * firing, which seemed independent on whether the suspend was 2 hours or
1473 * 2 days. Your milage may vary. - Ken Key <key@cs.utk.edu>
1474 */
1475 void
1476 adjust_timeout_calltodo(struct timeval *time_change)
1477 {
1478 struct callout *p;
1479 unsigned long delta_ticks;
1480
1481 /*
1482 * How many ticks were we asleep?
1483 * (stolen from tvtohz()).
1484 */
1485
1486 /* Don't do anything */
1487 if (time_change->tv_sec < 0)
1488 return;
1489 else if (time_change->tv_sec <= LONG_MAX / 1000000)
1490 delta_ticks = howmany(time_change->tv_sec * 1000000 +
1491 time_change->tv_usec, tick) + 1;
1492 else if (time_change->tv_sec <= LONG_MAX / hz)
1493 delta_ticks = time_change->tv_sec * hz +
1494 howmany(time_change->tv_usec, tick) + 1;
1495 else
1496 delta_ticks = LONG_MAX;
1497
1498 if (delta_ticks > INT_MAX)
1499 delta_ticks = INT_MAX;
1500
1501 /*
1502 * Now rip through the timer calltodo list looking for timers
1503 * to expire.
1504 */
1505
1506 /* don't collide with softclock() */
1507 CC_LOCK(cc);
1508 for (p = calltodo.c_next; p != NULL; p = p->c_next) {
1509 p->c_time -= delta_ticks;
1510
1511 /* Break if the timer had more time on it than delta_ticks */
1512 if (p->c_time > 0)
1513 break;
1514
1515 /* take back the ticks the timer didn't use (p->c_time <= 0) */
1516 delta_ticks = -p->c_time;
1517 }
1518 CC_UNLOCK(cc);
1519
1520 return;
1521 }
1522 #endif /* APM_FIXUP_CALLTODO */
1523
1524 static int
1525 flssbt(sbintime_t sbt)
1526 {
1527
1528 sbt += (uint64_t)sbt >> 1;
1529 if (sizeof(long) >= sizeof(sbintime_t))
1530 return (flsl(sbt));
1531 if (sbt >= SBT_1S)
1532 return (flsl(((uint64_t)sbt) >> 32) + 32);
1533 return (flsl(sbt));
1534 }
1535
1536 /*
1537 * Dump immediate statistic snapshot of the scheduled callouts.
1538 */
1539 static int
1540 sysctl_kern_callout_stat(SYSCTL_HANDLER_ARGS)
1541 {
1542 struct callout *tmp;
1543 struct callout_cpu *cc;
1544 struct callout_list *sc;
1545 sbintime_t maxpr, maxt, medpr, medt, now, spr, st, t;
1546 int ct[64], cpr[64], ccpbk[32];
1547 int error, val, i, count, tcum, pcum, maxc, c, medc;
1548 #ifdef SMP
1549 int cpu;
1550 #endif
1551
1552 val = 0;
1553 error = sysctl_handle_int(oidp, &val, 0, req);
1554 if (error != 0 || req->newptr == NULL)
1555 return (error);
1556 count = maxc = 0;
1557 st = spr = maxt = maxpr = 0;
1558 bzero(ccpbk, sizeof(ccpbk));
1559 bzero(ct, sizeof(ct));
1560 bzero(cpr, sizeof(cpr));
1561 now = sbinuptime();
1562 #ifdef SMP
1563 CPU_FOREACH(cpu) {
1564 cc = CC_CPU(cpu);
1565 #else
1566 cc = CC_CPU(timeout_cpu);
1567 #endif
1568 CC_LOCK(cc);
1569 for (i = 0; i < callwheelsize; i++) {
1570 sc = &cc->cc_callwheel[i];
1571 c = 0;
1572 LIST_FOREACH(tmp, sc, c_links.le) {
1573 c++;
1574 t = tmp->c_time - now;
1575 if (t < 0)
1576 t = 0;
1577 st += t / SBT_1US;
1578 spr += tmp->c_precision / SBT_1US;
1579 if (t > maxt)
1580 maxt = t;
1581 if (tmp->c_precision > maxpr)
1582 maxpr = tmp->c_precision;
1583 ct[flssbt(t)]++;
1584 cpr[flssbt(tmp->c_precision)]++;
1585 }
1586 if (c > maxc)
1587 maxc = c;
1588 ccpbk[fls(c + c / 2)]++;
1589 count += c;
1590 }
1591 CC_UNLOCK(cc);
1592 #ifdef SMP
1593 }
1594 #endif
1595
1596 for (i = 0, tcum = 0; i < 64 && tcum < count / 2; i++)
1597 tcum += ct[i];
1598 medt = (i >= 2) ? (((sbintime_t)1) << (i - 2)) : 0;
1599 for (i = 0, pcum = 0; i < 64 && pcum < count / 2; i++)
1600 pcum += cpr[i];
1601 medpr = (i >= 2) ? (((sbintime_t)1) << (i - 2)) : 0;
1602 for (i = 0, c = 0; i < 32 && c < count / 2; i++)
1603 c += ccpbk[i];
1604 medc = (i >= 2) ? (1 << (i - 2)) : 0;
1605
1606 printf("Scheduled callouts statistic snapshot:\n");
1607 printf(" Callouts: %6d Buckets: %6d*%-3d Bucket size: 0.%06ds\n",
1608 count, callwheelsize, mp_ncpus, 1000000 >> CC_HASH_SHIFT);
1609 printf(" C/Bk: med %5d avg %6d.%06jd max %6d\n",
1610 medc,
1611 count / callwheelsize / mp_ncpus,
1612 (uint64_t)count * 1000000 / callwheelsize / mp_ncpus % 1000000,
1613 maxc);
1614 printf(" Time: med %5jd.%06jds avg %6jd.%06jds max %6jd.%06jds\n",
1615 medt / SBT_1S, (medt & 0xffffffff) * 1000000 >> 32,
1616 (st / count) / 1000000, (st / count) % 1000000,
1617 maxt / SBT_1S, (maxt & 0xffffffff) * 1000000 >> 32);
1618 printf(" Prec: med %5jd.%06jds avg %6jd.%06jds max %6jd.%06jds\n",
1619 medpr / SBT_1S, (medpr & 0xffffffff) * 1000000 >> 32,
1620 (spr / count) / 1000000, (spr / count) % 1000000,
1621 maxpr / SBT_1S, (maxpr & 0xffffffff) * 1000000 >> 32);
1622 printf(" Distribution: \tbuckets\t time\t tcum\t"
1623 " prec\t pcum\n");
1624 for (i = 0, tcum = pcum = 0; i < 64; i++) {
1625 if (ct[i] == 0 && cpr[i] == 0)
1626 continue;
1627 t = (i != 0) ? (((sbintime_t)1) << (i - 1)) : 0;
1628 tcum += ct[i];
1629 pcum += cpr[i];
1630 printf(" %10jd.%06jds\t 2**%d\t%7d\t%7d\t%7d\t%7d\n",
1631 t / SBT_1S, (t & 0xffffffff) * 1000000 >> 32,
1632 i - 1 - (32 - CC_HASH_SHIFT),
1633 ct[i], tcum, cpr[i], pcum);
1634 }
1635 return (error);
1636 }
1637 SYSCTL_PROC(_kern, OID_AUTO, callout_stat,
1638 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE,
1639 0, 0, sysctl_kern_callout_stat, "I",
1640 "Dump immediate statistic snapshot of the scheduled callouts");
1641
1642 #ifdef DDB
1643 static void
1644 _show_callout(struct callout *c)
1645 {
1646
1647 db_printf("callout %p\n", c);
1648 #define C_DB_PRINTF(f, e) db_printf(" %s = " f "\n", #e, c->e);
1649 db_printf(" &c_links = %p\n", &(c->c_links));
1650 C_DB_PRINTF("%" PRId64, c_time);
1651 C_DB_PRINTF("%" PRId64, c_precision);
1652 C_DB_PRINTF("%p", c_arg);
1653 C_DB_PRINTF("%p", c_func);
1654 C_DB_PRINTF("%p", c_lock);
1655 C_DB_PRINTF("%#x", c_flags);
1656 C_DB_PRINTF("%#x", c_iflags);
1657 C_DB_PRINTF("%d", c_cpu);
1658 #undef C_DB_PRINTF
1659 }
1660
1661 DB_SHOW_COMMAND(callout, db_show_callout)
1662 {
1663
1664 if (!have_addr) {
1665 db_printf("usage: show callout <struct callout *>\n");
1666 return;
1667 }
1668
1669 _show_callout((struct callout *)addr);
1670 }
1671 #endif /* DDB */
Cache object: d6829b43e31329de89817e59d3d32dbf
|