1 /*-
2 * Copyright (c) 1982, 1986, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 4. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * From: @(#)kern_clock.c 8.5 (Berkeley) 1/21/94
35 */
36
37 #include <sys/cdefs.h>
38 __FBSDID("$FreeBSD: releng/10.1/sys/kern/kern_timeout.c 260817 2014-01-17 10:58:59Z avg $");
39
40 #include "opt_callout_profiling.h"
41 #include "opt_kdtrace.h"
42 #if defined(__arm__)
43 #include "opt_timer.h"
44 #endif
45
46 #include <sys/param.h>
47 #include <sys/systm.h>
48 #include <sys/bus.h>
49 #include <sys/callout.h>
50 #include <sys/file.h>
51 #include <sys/interrupt.h>
52 #include <sys/kernel.h>
53 #include <sys/ktr.h>
54 #include <sys/lock.h>
55 #include <sys/malloc.h>
56 #include <sys/mutex.h>
57 #include <sys/proc.h>
58 #include <sys/sdt.h>
59 #include <sys/sleepqueue.h>
60 #include <sys/sysctl.h>
61 #include <sys/smp.h>
62
63 #ifdef SMP
64 #include <machine/cpu.h>
65 #endif
66
67 #ifndef NO_EVENTTIMERS
68 DPCPU_DECLARE(sbintime_t, hardclocktime);
69 #endif
70
71 SDT_PROVIDER_DEFINE(callout_execute);
72 SDT_PROBE_DEFINE1(callout_execute, kernel, , callout__start,
73 "struct callout *");
74 SDT_PROBE_DEFINE1(callout_execute, kernel, , callout__end,
75 "struct callout *");
76
77 #ifdef CALLOUT_PROFILING
78 static int avg_depth;
79 SYSCTL_INT(_debug, OID_AUTO, to_avg_depth, CTLFLAG_RD, &avg_depth, 0,
80 "Average number of items examined per softclock call. Units = 1/1000");
81 static int avg_gcalls;
82 SYSCTL_INT(_debug, OID_AUTO, to_avg_gcalls, CTLFLAG_RD, &avg_gcalls, 0,
83 "Average number of Giant callouts made per softclock call. Units = 1/1000");
84 static int avg_lockcalls;
85 SYSCTL_INT(_debug, OID_AUTO, to_avg_lockcalls, CTLFLAG_RD, &avg_lockcalls, 0,
86 "Average number of lock callouts made per softclock call. Units = 1/1000");
87 static int avg_mpcalls;
88 SYSCTL_INT(_debug, OID_AUTO, to_avg_mpcalls, CTLFLAG_RD, &avg_mpcalls, 0,
89 "Average number of MP callouts made per softclock call. Units = 1/1000");
90 static int avg_depth_dir;
91 SYSCTL_INT(_debug, OID_AUTO, to_avg_depth_dir, CTLFLAG_RD, &avg_depth_dir, 0,
92 "Average number of direct callouts examined per callout_process call. "
93 "Units = 1/1000");
94 static int avg_lockcalls_dir;
95 SYSCTL_INT(_debug, OID_AUTO, to_avg_lockcalls_dir, CTLFLAG_RD,
96 &avg_lockcalls_dir, 0, "Average number of lock direct callouts made per "
97 "callout_process call. Units = 1/1000");
98 static int avg_mpcalls_dir;
99 SYSCTL_INT(_debug, OID_AUTO, to_avg_mpcalls_dir, CTLFLAG_RD, &avg_mpcalls_dir,
100 0, "Average number of MP direct callouts made per callout_process call. "
101 "Units = 1/1000");
102 #endif
103
104 static int ncallout;
105 SYSCTL_INT(_kern, OID_AUTO, ncallout, CTLFLAG_RDTUN, &ncallout, 0,
106 "Number of entries in callwheel and size of timeout() preallocation");
107
108 /*
109 * TODO:
110 * allocate more timeout table slots when table overflows.
111 */
112 u_int callwheelsize, callwheelmask;
113
114 /*
115 * The callout cpu exec entities represent informations necessary for
116 * describing the state of callouts currently running on the CPU and the ones
117 * necessary for migrating callouts to the new callout cpu. In particular,
118 * the first entry of the array cc_exec_entity holds informations for callout
119 * running in SWI thread context, while the second one holds informations
120 * for callout running directly from hardware interrupt context.
121 * The cached informations are very important for deferring migration when
122 * the migrating callout is already running.
123 */
124 struct cc_exec {
125 struct callout *cc_next;
126 struct callout *cc_curr;
127 #ifdef SMP
128 void (*ce_migration_func)(void *);
129 void *ce_migration_arg;
130 int ce_migration_cpu;
131 sbintime_t ce_migration_time;
132 sbintime_t ce_migration_prec;
133 #endif
134 bool cc_cancel;
135 bool cc_waiting;
136 };
137
138 /*
139 * There is one struct callout_cpu per cpu, holding all relevant
140 * state for the callout processing thread on the individual CPU.
141 */
142 struct callout_cpu {
143 struct mtx_padalign cc_lock;
144 struct cc_exec cc_exec_entity[2];
145 struct callout *cc_callout;
146 struct callout_list *cc_callwheel;
147 struct callout_tailq cc_expireq;
148 struct callout_slist cc_callfree;
149 sbintime_t cc_firstevent;
150 sbintime_t cc_lastscan;
151 void *cc_cookie;
152 u_int cc_bucket;
153 };
154
155 #define cc_exec_curr cc_exec_entity[0].cc_curr
156 #define cc_exec_next cc_exec_entity[0].cc_next
157 #define cc_exec_cancel cc_exec_entity[0].cc_cancel
158 #define cc_exec_waiting cc_exec_entity[0].cc_waiting
159 #define cc_exec_curr_dir cc_exec_entity[1].cc_curr
160 #define cc_exec_next_dir cc_exec_entity[1].cc_next
161 #define cc_exec_cancel_dir cc_exec_entity[1].cc_cancel
162 #define cc_exec_waiting_dir cc_exec_entity[1].cc_waiting
163
164 #ifdef SMP
165 #define cc_migration_func cc_exec_entity[0].ce_migration_func
166 #define cc_migration_arg cc_exec_entity[0].ce_migration_arg
167 #define cc_migration_cpu cc_exec_entity[0].ce_migration_cpu
168 #define cc_migration_time cc_exec_entity[0].ce_migration_time
169 #define cc_migration_prec cc_exec_entity[0].ce_migration_prec
170 #define cc_migration_func_dir cc_exec_entity[1].ce_migration_func
171 #define cc_migration_arg_dir cc_exec_entity[1].ce_migration_arg
172 #define cc_migration_cpu_dir cc_exec_entity[1].ce_migration_cpu
173 #define cc_migration_time_dir cc_exec_entity[1].ce_migration_time
174 #define cc_migration_prec_dir cc_exec_entity[1].ce_migration_prec
175
176 struct callout_cpu cc_cpu[MAXCPU];
177 #define CPUBLOCK MAXCPU
178 #define CC_CPU(cpu) (&cc_cpu[(cpu)])
179 #define CC_SELF() CC_CPU(PCPU_GET(cpuid))
180 #else
181 struct callout_cpu cc_cpu;
182 #define CC_CPU(cpu) &cc_cpu
183 #define CC_SELF() &cc_cpu
184 #endif
185 #define CC_LOCK(cc) mtx_lock_spin(&(cc)->cc_lock)
186 #define CC_UNLOCK(cc) mtx_unlock_spin(&(cc)->cc_lock)
187 #define CC_LOCK_ASSERT(cc) mtx_assert(&(cc)->cc_lock, MA_OWNED)
188
189 static int timeout_cpu;
190
191 static void callout_cpu_init(struct callout_cpu *cc);
192 static void softclock_call_cc(struct callout *c, struct callout_cpu *cc,
193 #ifdef CALLOUT_PROFILING
194 int *mpcalls, int *lockcalls, int *gcalls,
195 #endif
196 int direct);
197
198 static MALLOC_DEFINE(M_CALLOUT, "callout", "Callout datastructures");
199
200 /**
201 * Locked by cc_lock:
202 * cc_curr - If a callout is in progress, it is cc_curr.
203 * If cc_curr is non-NULL, threads waiting in
204 * callout_drain() will be woken up as soon as the
205 * relevant callout completes.
206 * cc_cancel - Changing to 1 with both callout_lock and cc_lock held
207 * guarantees that the current callout will not run.
208 * The softclock() function sets this to 0 before it
209 * drops callout_lock to acquire c_lock, and it calls
210 * the handler only if curr_cancelled is still 0 after
211 * cc_lock is successfully acquired.
212 * cc_waiting - If a thread is waiting in callout_drain(), then
213 * callout_wait is nonzero. Set only when
214 * cc_curr is non-NULL.
215 */
216
217 /*
218 * Resets the execution entity tied to a specific callout cpu.
219 */
220 static void
221 cc_cce_cleanup(struct callout_cpu *cc, int direct)
222 {
223
224 cc->cc_exec_entity[direct].cc_curr = NULL;
225 cc->cc_exec_entity[direct].cc_next = NULL;
226 cc->cc_exec_entity[direct].cc_cancel = false;
227 cc->cc_exec_entity[direct].cc_waiting = false;
228 #ifdef SMP
229 cc->cc_exec_entity[direct].ce_migration_cpu = CPUBLOCK;
230 cc->cc_exec_entity[direct].ce_migration_time = 0;
231 cc->cc_exec_entity[direct].ce_migration_prec = 0;
232 cc->cc_exec_entity[direct].ce_migration_func = NULL;
233 cc->cc_exec_entity[direct].ce_migration_arg = NULL;
234 #endif
235 }
236
237 /*
238 * Checks if migration is requested by a specific callout cpu.
239 */
240 static int
241 cc_cce_migrating(struct callout_cpu *cc, int direct)
242 {
243
244 #ifdef SMP
245 return (cc->cc_exec_entity[direct].ce_migration_cpu != CPUBLOCK);
246 #else
247 return (0);
248 #endif
249 }
250
251 /*
252 * Kernel low level callwheel initialization
253 * called on cpu0 during kernel startup.
254 */
255 static void
256 callout_callwheel_init(void *dummy)
257 {
258 struct callout_cpu *cc;
259
260 /*
261 * Calculate the size of the callout wheel and the preallocated
262 * timeout() structures.
263 * XXX: Clip callout to result of previous function of maxusers
264 * maximum 384. This is still huge, but acceptable.
265 */
266 ncallout = imin(16 + maxproc + maxfiles, 18508);
267 TUNABLE_INT_FETCH("kern.ncallout", &ncallout);
268
269 /*
270 * Calculate callout wheel size, should be next power of two higher
271 * than 'ncallout'.
272 */
273 callwheelsize = 1 << fls(ncallout);
274 callwheelmask = callwheelsize - 1;
275
276 /*
277 * Only cpu0 handles timeout(9) and receives a preallocation.
278 *
279 * XXX: Once all timeout(9) consumers are converted this can
280 * be removed.
281 */
282 timeout_cpu = PCPU_GET(cpuid);
283 cc = CC_CPU(timeout_cpu);
284 cc->cc_callout = malloc(ncallout * sizeof(struct callout),
285 M_CALLOUT, M_WAITOK);
286 callout_cpu_init(cc);
287 }
288 SYSINIT(callwheel_init, SI_SUB_CPU, SI_ORDER_ANY, callout_callwheel_init, NULL);
289
290 /*
291 * Initialize the per-cpu callout structures.
292 */
293 static void
294 callout_cpu_init(struct callout_cpu *cc)
295 {
296 struct callout *c;
297 int i;
298
299 mtx_init(&cc->cc_lock, "callout", NULL, MTX_SPIN | MTX_RECURSE);
300 SLIST_INIT(&cc->cc_callfree);
301 cc->cc_callwheel = malloc(sizeof(struct callout_list) * callwheelsize,
302 M_CALLOUT, M_WAITOK);
303 for (i = 0; i < callwheelsize; i++)
304 LIST_INIT(&cc->cc_callwheel[i]);
305 TAILQ_INIT(&cc->cc_expireq);
306 cc->cc_firstevent = INT64_MAX;
307 for (i = 0; i < 2; i++)
308 cc_cce_cleanup(cc, i);
309 if (cc->cc_callout == NULL) /* Only cpu0 handles timeout(9) */
310 return;
311 for (i = 0; i < ncallout; i++) {
312 c = &cc->cc_callout[i];
313 callout_init(c, 0);
314 c->c_flags = CALLOUT_LOCAL_ALLOC;
315 SLIST_INSERT_HEAD(&cc->cc_callfree, c, c_links.sle);
316 }
317 }
318
319 #ifdef SMP
320 /*
321 * Switches the cpu tied to a specific callout.
322 * The function expects a locked incoming callout cpu and returns with
323 * locked outcoming callout cpu.
324 */
325 static struct callout_cpu *
326 callout_cpu_switch(struct callout *c, struct callout_cpu *cc, int new_cpu)
327 {
328 struct callout_cpu *new_cc;
329
330 MPASS(c != NULL && cc != NULL);
331 CC_LOCK_ASSERT(cc);
332
333 /*
334 * Avoid interrupts and preemption firing after the callout cpu
335 * is blocked in order to avoid deadlocks as the new thread
336 * may be willing to acquire the callout cpu lock.
337 */
338 c->c_cpu = CPUBLOCK;
339 spinlock_enter();
340 CC_UNLOCK(cc);
341 new_cc = CC_CPU(new_cpu);
342 CC_LOCK(new_cc);
343 spinlock_exit();
344 c->c_cpu = new_cpu;
345 return (new_cc);
346 }
347 #endif
348
349 /*
350 * Start standard softclock thread.
351 */
352 static void
353 start_softclock(void *dummy)
354 {
355 struct callout_cpu *cc;
356 #ifdef SMP
357 int cpu;
358 #endif
359
360 cc = CC_CPU(timeout_cpu);
361 if (swi_add(&clk_intr_event, "clock", softclock, cc, SWI_CLOCK,
362 INTR_MPSAFE, &cc->cc_cookie))
363 panic("died while creating standard software ithreads");
364 #ifdef SMP
365 CPU_FOREACH(cpu) {
366 if (cpu == timeout_cpu)
367 continue;
368 cc = CC_CPU(cpu);
369 cc->cc_callout = NULL; /* Only cpu0 handles timeout(9). */
370 callout_cpu_init(cc);
371 if (swi_add(NULL, "clock", softclock, cc, SWI_CLOCK,
372 INTR_MPSAFE, &cc->cc_cookie))
373 panic("died while creating standard software ithreads");
374 }
375 #endif
376 }
377 SYSINIT(start_softclock, SI_SUB_SOFTINTR, SI_ORDER_FIRST, start_softclock, NULL);
378
379 #define CC_HASH_SHIFT 8
380
381 static inline u_int
382 callout_hash(sbintime_t sbt)
383 {
384
385 return (sbt >> (32 - CC_HASH_SHIFT));
386 }
387
388 static inline u_int
389 callout_get_bucket(sbintime_t sbt)
390 {
391
392 return (callout_hash(sbt) & callwheelmask);
393 }
394
395 void
396 callout_process(sbintime_t now)
397 {
398 struct callout *tmp, *tmpn;
399 struct callout_cpu *cc;
400 struct callout_list *sc;
401 sbintime_t first, last, max, tmp_max;
402 uint32_t lookahead;
403 u_int firstb, lastb, nowb;
404 #ifdef CALLOUT_PROFILING
405 int depth_dir = 0, mpcalls_dir = 0, lockcalls_dir = 0;
406 #endif
407
408 cc = CC_SELF();
409 mtx_lock_spin_flags(&cc->cc_lock, MTX_QUIET);
410
411 /* Compute the buckets of the last scan and present times. */
412 firstb = callout_hash(cc->cc_lastscan);
413 cc->cc_lastscan = now;
414 nowb = callout_hash(now);
415
416 /* Compute the last bucket and minimum time of the bucket after it. */
417 if (nowb == firstb)
418 lookahead = (SBT_1S / 16);
419 else if (nowb - firstb == 1)
420 lookahead = (SBT_1S / 8);
421 else
422 lookahead = (SBT_1S / 2);
423 first = last = now;
424 first += (lookahead / 2);
425 last += lookahead;
426 last &= (0xffffffffffffffffLLU << (32 - CC_HASH_SHIFT));
427 lastb = callout_hash(last) - 1;
428 max = last;
429
430 /*
431 * Check if we wrapped around the entire wheel from the last scan.
432 * In case, we need to scan entirely the wheel for pending callouts.
433 */
434 if (lastb - firstb >= callwheelsize) {
435 lastb = firstb + callwheelsize - 1;
436 if (nowb - firstb >= callwheelsize)
437 nowb = lastb;
438 }
439
440 /* Iterate callwheel from firstb to nowb and then up to lastb. */
441 do {
442 sc = &cc->cc_callwheel[firstb & callwheelmask];
443 tmp = LIST_FIRST(sc);
444 while (tmp != NULL) {
445 /* Run the callout if present time within allowed. */
446 if (tmp->c_time <= now) {
447 /*
448 * Consumer told us the callout may be run
449 * directly from hardware interrupt context.
450 */
451 if (tmp->c_flags & CALLOUT_DIRECT) {
452 #ifdef CALLOUT_PROFILING
453 ++depth_dir;
454 #endif
455 cc->cc_exec_next_dir =
456 LIST_NEXT(tmp, c_links.le);
457 cc->cc_bucket = firstb & callwheelmask;
458 LIST_REMOVE(tmp, c_links.le);
459 softclock_call_cc(tmp, cc,
460 #ifdef CALLOUT_PROFILING
461 &mpcalls_dir, &lockcalls_dir, NULL,
462 #endif
463 1);
464 tmp = cc->cc_exec_next_dir;
465 } else {
466 tmpn = LIST_NEXT(tmp, c_links.le);
467 LIST_REMOVE(tmp, c_links.le);
468 TAILQ_INSERT_TAIL(&cc->cc_expireq,
469 tmp, c_links.tqe);
470 tmp->c_flags |= CALLOUT_PROCESSED;
471 tmp = tmpn;
472 }
473 continue;
474 }
475 /* Skip events from distant future. */
476 if (tmp->c_time >= max)
477 goto next;
478 /*
479 * Event minimal time is bigger than present maximal
480 * time, so it cannot be aggregated.
481 */
482 if (tmp->c_time > last) {
483 lastb = nowb;
484 goto next;
485 }
486 /* Update first and last time, respecting this event. */
487 if (tmp->c_time < first)
488 first = tmp->c_time;
489 tmp_max = tmp->c_time + tmp->c_precision;
490 if (tmp_max < last)
491 last = tmp_max;
492 next:
493 tmp = LIST_NEXT(tmp, c_links.le);
494 }
495 /* Proceed with the next bucket. */
496 firstb++;
497 /*
498 * Stop if we looked after present time and found
499 * some event we can't execute at now.
500 * Stop if we looked far enough into the future.
501 */
502 } while (((int)(firstb - lastb)) <= 0);
503 cc->cc_firstevent = last;
504 #ifndef NO_EVENTTIMERS
505 cpu_new_callout(curcpu, last, first);
506 #endif
507 #ifdef CALLOUT_PROFILING
508 avg_depth_dir += (depth_dir * 1000 - avg_depth_dir) >> 8;
509 avg_mpcalls_dir += (mpcalls_dir * 1000 - avg_mpcalls_dir) >> 8;
510 avg_lockcalls_dir += (lockcalls_dir * 1000 - avg_lockcalls_dir) >> 8;
511 #endif
512 mtx_unlock_spin_flags(&cc->cc_lock, MTX_QUIET);
513 /*
514 * swi_sched acquires the thread lock, so we don't want to call it
515 * with cc_lock held; incorrect locking order.
516 */
517 if (!TAILQ_EMPTY(&cc->cc_expireq))
518 swi_sched(cc->cc_cookie, 0);
519 }
520
521 static struct callout_cpu *
522 callout_lock(struct callout *c)
523 {
524 struct callout_cpu *cc;
525 int cpu;
526
527 for (;;) {
528 cpu = c->c_cpu;
529 #ifdef SMP
530 if (cpu == CPUBLOCK) {
531 while (c->c_cpu == CPUBLOCK)
532 cpu_spinwait();
533 continue;
534 }
535 #endif
536 cc = CC_CPU(cpu);
537 CC_LOCK(cc);
538 if (cpu == c->c_cpu)
539 break;
540 CC_UNLOCK(cc);
541 }
542 return (cc);
543 }
544
545 static void
546 callout_cc_add(struct callout *c, struct callout_cpu *cc,
547 sbintime_t sbt, sbintime_t precision, void (*func)(void *),
548 void *arg, int cpu, int flags)
549 {
550 int bucket;
551
552 CC_LOCK_ASSERT(cc);
553 if (sbt < cc->cc_lastscan)
554 sbt = cc->cc_lastscan;
555 c->c_arg = arg;
556 c->c_flags |= (CALLOUT_ACTIVE | CALLOUT_PENDING);
557 if (flags & C_DIRECT_EXEC)
558 c->c_flags |= CALLOUT_DIRECT;
559 c->c_flags &= ~CALLOUT_PROCESSED;
560 c->c_func = func;
561 c->c_time = sbt;
562 c->c_precision = precision;
563 bucket = callout_get_bucket(c->c_time);
564 CTR3(KTR_CALLOUT, "precision set for %p: %d.%08x",
565 c, (int)(c->c_precision >> 32),
566 (u_int)(c->c_precision & 0xffffffff));
567 LIST_INSERT_HEAD(&cc->cc_callwheel[bucket], c, c_links.le);
568 if (cc->cc_bucket == bucket)
569 cc->cc_exec_next_dir = c;
570 #ifndef NO_EVENTTIMERS
571 /*
572 * Inform the eventtimers(4) subsystem there's a new callout
573 * that has been inserted, but only if really required.
574 */
575 if (INT64_MAX - c->c_time < c->c_precision)
576 c->c_precision = INT64_MAX - c->c_time;
577 sbt = c->c_time + c->c_precision;
578 if (sbt < cc->cc_firstevent) {
579 cc->cc_firstevent = sbt;
580 cpu_new_callout(cpu, sbt, c->c_time);
581 }
582 #endif
583 }
584
585 static void
586 callout_cc_del(struct callout *c, struct callout_cpu *cc)
587 {
588
589 if ((c->c_flags & CALLOUT_LOCAL_ALLOC) == 0)
590 return;
591 c->c_func = NULL;
592 SLIST_INSERT_HEAD(&cc->cc_callfree, c, c_links.sle);
593 }
594
595 static void
596 softclock_call_cc(struct callout *c, struct callout_cpu *cc,
597 #ifdef CALLOUT_PROFILING
598 int *mpcalls, int *lockcalls, int *gcalls,
599 #endif
600 int direct)
601 {
602 struct rm_priotracker tracker;
603 void (*c_func)(void *);
604 void *c_arg;
605 struct lock_class *class;
606 struct lock_object *c_lock;
607 uintptr_t lock_status;
608 int c_flags;
609 #ifdef SMP
610 struct callout_cpu *new_cc;
611 void (*new_func)(void *);
612 void *new_arg;
613 int flags, new_cpu;
614 sbintime_t new_prec, new_time;
615 #endif
616 #if defined(DIAGNOSTIC) || defined(CALLOUT_PROFILING)
617 sbintime_t sbt1, sbt2;
618 struct timespec ts2;
619 static sbintime_t maxdt = 2 * SBT_1MS; /* 2 msec */
620 static timeout_t *lastfunc;
621 #endif
622
623 KASSERT((c->c_flags & (CALLOUT_PENDING | CALLOUT_ACTIVE)) ==
624 (CALLOUT_PENDING | CALLOUT_ACTIVE),
625 ("softclock_call_cc: pend|act %p %x", c, c->c_flags));
626 class = (c->c_lock != NULL) ? LOCK_CLASS(c->c_lock) : NULL;
627 lock_status = 0;
628 if (c->c_flags & CALLOUT_SHAREDLOCK) {
629 if (class == &lock_class_rm)
630 lock_status = (uintptr_t)&tracker;
631 else
632 lock_status = 1;
633 }
634 c_lock = c->c_lock;
635 c_func = c->c_func;
636 c_arg = c->c_arg;
637 c_flags = c->c_flags;
638 if (c->c_flags & CALLOUT_LOCAL_ALLOC)
639 c->c_flags = CALLOUT_LOCAL_ALLOC;
640 else
641 c->c_flags &= ~CALLOUT_PENDING;
642 cc->cc_exec_entity[direct].cc_curr = c;
643 cc->cc_exec_entity[direct].cc_cancel = false;
644 CC_UNLOCK(cc);
645 if (c_lock != NULL) {
646 class->lc_lock(c_lock, lock_status);
647 /*
648 * The callout may have been cancelled
649 * while we switched locks.
650 */
651 if (cc->cc_exec_entity[direct].cc_cancel) {
652 class->lc_unlock(c_lock);
653 goto skip;
654 }
655 /* The callout cannot be stopped now. */
656 cc->cc_exec_entity[direct].cc_cancel = true;
657 if (c_lock == &Giant.lock_object) {
658 #ifdef CALLOUT_PROFILING
659 (*gcalls)++;
660 #endif
661 CTR3(KTR_CALLOUT, "callout giant %p func %p arg %p",
662 c, c_func, c_arg);
663 } else {
664 #ifdef CALLOUT_PROFILING
665 (*lockcalls)++;
666 #endif
667 CTR3(KTR_CALLOUT, "callout lock %p func %p arg %p",
668 c, c_func, c_arg);
669 }
670 } else {
671 #ifdef CALLOUT_PROFILING
672 (*mpcalls)++;
673 #endif
674 CTR3(KTR_CALLOUT, "callout %p func %p arg %p",
675 c, c_func, c_arg);
676 }
677 #if defined(DIAGNOSTIC) || defined(CALLOUT_PROFILING)
678 sbt1 = sbinuptime();
679 #endif
680 THREAD_NO_SLEEPING();
681 SDT_PROBE(callout_execute, kernel, , callout__start, c, 0, 0, 0, 0);
682 c_func(c_arg);
683 SDT_PROBE(callout_execute, kernel, , callout__end, c, 0, 0, 0, 0);
684 THREAD_SLEEPING_OK();
685 #if defined(DIAGNOSTIC) || defined(CALLOUT_PROFILING)
686 sbt2 = sbinuptime();
687 sbt2 -= sbt1;
688 if (sbt2 > maxdt) {
689 if (lastfunc != c_func || sbt2 > maxdt * 2) {
690 ts2 = sbttots(sbt2);
691 printf(
692 "Expensive timeout(9) function: %p(%p) %jd.%09ld s\n",
693 c_func, c_arg, (intmax_t)ts2.tv_sec, ts2.tv_nsec);
694 }
695 maxdt = sbt2;
696 lastfunc = c_func;
697 }
698 #endif
699 CTR1(KTR_CALLOUT, "callout %p finished", c);
700 if ((c_flags & CALLOUT_RETURNUNLOCKED) == 0)
701 class->lc_unlock(c_lock);
702 skip:
703 CC_LOCK(cc);
704 KASSERT(cc->cc_exec_entity[direct].cc_curr == c, ("mishandled cc_curr"));
705 cc->cc_exec_entity[direct].cc_curr = NULL;
706 if (cc->cc_exec_entity[direct].cc_waiting) {
707 /*
708 * There is someone waiting for the
709 * callout to complete.
710 * If the callout was scheduled for
711 * migration just cancel it.
712 */
713 if (cc_cce_migrating(cc, direct)) {
714 cc_cce_cleanup(cc, direct);
715
716 /*
717 * It should be assert here that the callout is not
718 * destroyed but that is not easy.
719 */
720 c->c_flags &= ~CALLOUT_DFRMIGRATION;
721 }
722 cc->cc_exec_entity[direct].cc_waiting = false;
723 CC_UNLOCK(cc);
724 wakeup(&cc->cc_exec_entity[direct].cc_waiting);
725 CC_LOCK(cc);
726 } else if (cc_cce_migrating(cc, direct)) {
727 KASSERT((c_flags & CALLOUT_LOCAL_ALLOC) == 0,
728 ("Migrating legacy callout %p", c));
729 #ifdef SMP
730 /*
731 * If the callout was scheduled for
732 * migration just perform it now.
733 */
734 new_cpu = cc->cc_exec_entity[direct].ce_migration_cpu;
735 new_time = cc->cc_exec_entity[direct].ce_migration_time;
736 new_prec = cc->cc_exec_entity[direct].ce_migration_prec;
737 new_func = cc->cc_exec_entity[direct].ce_migration_func;
738 new_arg = cc->cc_exec_entity[direct].ce_migration_arg;
739 cc_cce_cleanup(cc, direct);
740
741 /*
742 * It should be assert here that the callout is not destroyed
743 * but that is not easy.
744 *
745 * As first thing, handle deferred callout stops.
746 */
747 if ((c->c_flags & CALLOUT_DFRMIGRATION) == 0) {
748 CTR3(KTR_CALLOUT,
749 "deferred cancelled %p func %p arg %p",
750 c, new_func, new_arg);
751 callout_cc_del(c, cc);
752 return;
753 }
754 c->c_flags &= ~CALLOUT_DFRMIGRATION;
755
756 new_cc = callout_cpu_switch(c, cc, new_cpu);
757 flags = (direct) ? C_DIRECT_EXEC : 0;
758 callout_cc_add(c, new_cc, new_time, new_prec, new_func,
759 new_arg, new_cpu, flags);
760 CC_UNLOCK(new_cc);
761 CC_LOCK(cc);
762 #else
763 panic("migration should not happen");
764 #endif
765 }
766 /*
767 * If the current callout is locally allocated (from
768 * timeout(9)) then put it on the freelist.
769 *
770 * Note: we need to check the cached copy of c_flags because
771 * if it was not local, then it's not safe to deref the
772 * callout pointer.
773 */
774 KASSERT((c_flags & CALLOUT_LOCAL_ALLOC) == 0 ||
775 c->c_flags == CALLOUT_LOCAL_ALLOC,
776 ("corrupted callout"));
777 if (c_flags & CALLOUT_LOCAL_ALLOC)
778 callout_cc_del(c, cc);
779 }
780
781 /*
782 * The callout mechanism is based on the work of Adam M. Costello and
783 * George Varghese, published in a technical report entitled "Redesigning
784 * the BSD Callout and Timer Facilities" and modified slightly for inclusion
785 * in FreeBSD by Justin T. Gibbs. The original work on the data structures
786 * used in this implementation was published by G. Varghese and T. Lauck in
787 * the paper "Hashed and Hierarchical Timing Wheels: Data Structures for
788 * the Efficient Implementation of a Timer Facility" in the Proceedings of
789 * the 11th ACM Annual Symposium on Operating Systems Principles,
790 * Austin, Texas Nov 1987.
791 */
792
793 /*
794 * Software (low priority) clock interrupt.
795 * Run periodic events from timeout queue.
796 */
797 void
798 softclock(void *arg)
799 {
800 struct callout_cpu *cc;
801 struct callout *c;
802 #ifdef CALLOUT_PROFILING
803 int depth = 0, gcalls = 0, lockcalls = 0, mpcalls = 0;
804 #endif
805
806 cc = (struct callout_cpu *)arg;
807 CC_LOCK(cc);
808 while ((c = TAILQ_FIRST(&cc->cc_expireq)) != NULL) {
809 TAILQ_REMOVE(&cc->cc_expireq, c, c_links.tqe);
810 softclock_call_cc(c, cc,
811 #ifdef CALLOUT_PROFILING
812 &mpcalls, &lockcalls, &gcalls,
813 #endif
814 0);
815 #ifdef CALLOUT_PROFILING
816 ++depth;
817 #endif
818 }
819 #ifdef CALLOUT_PROFILING
820 avg_depth += (depth * 1000 - avg_depth) >> 8;
821 avg_mpcalls += (mpcalls * 1000 - avg_mpcalls) >> 8;
822 avg_lockcalls += (lockcalls * 1000 - avg_lockcalls) >> 8;
823 avg_gcalls += (gcalls * 1000 - avg_gcalls) >> 8;
824 #endif
825 CC_UNLOCK(cc);
826 }
827
828 /*
829 * timeout --
830 * Execute a function after a specified length of time.
831 *
832 * untimeout --
833 * Cancel previous timeout function call.
834 *
835 * callout_handle_init --
836 * Initialize a handle so that using it with untimeout is benign.
837 *
838 * See AT&T BCI Driver Reference Manual for specification. This
839 * implementation differs from that one in that although an
840 * identification value is returned from timeout, the original
841 * arguments to timeout as well as the identifier are used to
842 * identify entries for untimeout.
843 */
844 struct callout_handle
845 timeout(ftn, arg, to_ticks)
846 timeout_t *ftn;
847 void *arg;
848 int to_ticks;
849 {
850 struct callout_cpu *cc;
851 struct callout *new;
852 struct callout_handle handle;
853
854 cc = CC_CPU(timeout_cpu);
855 CC_LOCK(cc);
856 /* Fill in the next free callout structure. */
857 new = SLIST_FIRST(&cc->cc_callfree);
858 if (new == NULL)
859 /* XXX Attempt to malloc first */
860 panic("timeout table full");
861 SLIST_REMOVE_HEAD(&cc->cc_callfree, c_links.sle);
862 callout_reset(new, to_ticks, ftn, arg);
863 handle.callout = new;
864 CC_UNLOCK(cc);
865
866 return (handle);
867 }
868
869 void
870 untimeout(ftn, arg, handle)
871 timeout_t *ftn;
872 void *arg;
873 struct callout_handle handle;
874 {
875 struct callout_cpu *cc;
876
877 /*
878 * Check for a handle that was initialized
879 * by callout_handle_init, but never used
880 * for a real timeout.
881 */
882 if (handle.callout == NULL)
883 return;
884
885 cc = callout_lock(handle.callout);
886 if (handle.callout->c_func == ftn && handle.callout->c_arg == arg)
887 callout_stop(handle.callout);
888 CC_UNLOCK(cc);
889 }
890
891 void
892 callout_handle_init(struct callout_handle *handle)
893 {
894 handle->callout = NULL;
895 }
896
897 /*
898 * New interface; clients allocate their own callout structures.
899 *
900 * callout_reset() - establish or change a timeout
901 * callout_stop() - disestablish a timeout
902 * callout_init() - initialize a callout structure so that it can
903 * safely be passed to callout_reset() and callout_stop()
904 *
905 * <sys/callout.h> defines three convenience macros:
906 *
907 * callout_active() - returns truth if callout has not been stopped,
908 * drained, or deactivated since the last time the callout was
909 * reset.
910 * callout_pending() - returns truth if callout is still waiting for timeout
911 * callout_deactivate() - marks the callout as having been serviced
912 */
913 int
914 callout_reset_sbt_on(struct callout *c, sbintime_t sbt, sbintime_t precision,
915 void (*ftn)(void *), void *arg, int cpu, int flags)
916 {
917 sbintime_t to_sbt, pr;
918 struct callout_cpu *cc;
919 int cancelled, direct;
920
921 cancelled = 0;
922 if (flags & C_ABSOLUTE) {
923 to_sbt = sbt;
924 } else {
925 if ((flags & C_HARDCLOCK) && (sbt < tick_sbt))
926 sbt = tick_sbt;
927 if ((flags & C_HARDCLOCK) ||
928 #ifdef NO_EVENTTIMERS
929 sbt >= sbt_timethreshold) {
930 to_sbt = getsbinuptime();
931
932 /* Add safety belt for the case of hz > 1000. */
933 to_sbt += tc_tick_sbt - tick_sbt;
934 #else
935 sbt >= sbt_tickthreshold) {
936 /*
937 * Obtain the time of the last hardclock() call on
938 * this CPU directly from the kern_clocksource.c.
939 * This value is per-CPU, but it is equal for all
940 * active ones.
941 */
942 #ifdef __LP64__
943 to_sbt = DPCPU_GET(hardclocktime);
944 #else
945 spinlock_enter();
946 to_sbt = DPCPU_GET(hardclocktime);
947 spinlock_exit();
948 #endif
949 #endif
950 if ((flags & C_HARDCLOCK) == 0)
951 to_sbt += tick_sbt;
952 } else
953 to_sbt = sbinuptime();
954 if (INT64_MAX - to_sbt < sbt)
955 to_sbt = INT64_MAX;
956 else
957 to_sbt += sbt;
958 pr = ((C_PRELGET(flags) < 0) ? sbt >> tc_precexp :
959 sbt >> C_PRELGET(flags));
960 if (pr > precision)
961 precision = pr;
962 }
963 /*
964 * Don't allow migration of pre-allocated callouts lest they
965 * become unbalanced.
966 */
967 if (c->c_flags & CALLOUT_LOCAL_ALLOC)
968 cpu = c->c_cpu;
969 direct = (c->c_flags & CALLOUT_DIRECT) != 0;
970 KASSERT(!direct || c->c_lock == NULL,
971 ("%s: direct callout %p has lock", __func__, c));
972 cc = callout_lock(c);
973 if (cc->cc_exec_entity[direct].cc_curr == c) {
974 /*
975 * We're being asked to reschedule a callout which is
976 * currently in progress. If there is a lock then we
977 * can cancel the callout if it has not really started.
978 */
979 if (c->c_lock != NULL && !cc->cc_exec_entity[direct].cc_cancel)
980 cancelled = cc->cc_exec_entity[direct].cc_cancel = true;
981 if (cc->cc_exec_entity[direct].cc_waiting) {
982 /*
983 * Someone has called callout_drain to kill this
984 * callout. Don't reschedule.
985 */
986 CTR4(KTR_CALLOUT, "%s %p func %p arg %p",
987 cancelled ? "cancelled" : "failed to cancel",
988 c, c->c_func, c->c_arg);
989 CC_UNLOCK(cc);
990 return (cancelled);
991 }
992 }
993 if (c->c_flags & CALLOUT_PENDING) {
994 if ((c->c_flags & CALLOUT_PROCESSED) == 0) {
995 if (cc->cc_exec_next_dir == c)
996 cc->cc_exec_next_dir = LIST_NEXT(c, c_links.le);
997 LIST_REMOVE(c, c_links.le);
998 } else
999 TAILQ_REMOVE(&cc->cc_expireq, c, c_links.tqe);
1000 cancelled = 1;
1001 c->c_flags &= ~(CALLOUT_ACTIVE | CALLOUT_PENDING);
1002 }
1003
1004 #ifdef SMP
1005 /*
1006 * If the callout must migrate try to perform it immediately.
1007 * If the callout is currently running, just defer the migration
1008 * to a more appropriate moment.
1009 */
1010 if (c->c_cpu != cpu) {
1011 if (cc->cc_exec_entity[direct].cc_curr == c) {
1012 cc->cc_exec_entity[direct].ce_migration_cpu = cpu;
1013 cc->cc_exec_entity[direct].ce_migration_time
1014 = to_sbt;
1015 cc->cc_exec_entity[direct].ce_migration_prec
1016 = precision;
1017 cc->cc_exec_entity[direct].ce_migration_func = ftn;
1018 cc->cc_exec_entity[direct].ce_migration_arg = arg;
1019 c->c_flags |= CALLOUT_DFRMIGRATION;
1020 CTR6(KTR_CALLOUT,
1021 "migration of %p func %p arg %p in %d.%08x to %u deferred",
1022 c, c->c_func, c->c_arg, (int)(to_sbt >> 32),
1023 (u_int)(to_sbt & 0xffffffff), cpu);
1024 CC_UNLOCK(cc);
1025 return (cancelled);
1026 }
1027 cc = callout_cpu_switch(c, cc, cpu);
1028 }
1029 #endif
1030
1031 callout_cc_add(c, cc, to_sbt, precision, ftn, arg, cpu, flags);
1032 CTR6(KTR_CALLOUT, "%sscheduled %p func %p arg %p in %d.%08x",
1033 cancelled ? "re" : "", c, c->c_func, c->c_arg, (int)(to_sbt >> 32),
1034 (u_int)(to_sbt & 0xffffffff));
1035 CC_UNLOCK(cc);
1036
1037 return (cancelled);
1038 }
1039
1040 /*
1041 * Common idioms that can be optimized in the future.
1042 */
1043 int
1044 callout_schedule_on(struct callout *c, int to_ticks, int cpu)
1045 {
1046 return callout_reset_on(c, to_ticks, c->c_func, c->c_arg, cpu);
1047 }
1048
1049 int
1050 callout_schedule(struct callout *c, int to_ticks)
1051 {
1052 return callout_reset_on(c, to_ticks, c->c_func, c->c_arg, c->c_cpu);
1053 }
1054
1055 int
1056 _callout_stop_safe(c, safe)
1057 struct callout *c;
1058 int safe;
1059 {
1060 struct callout_cpu *cc, *old_cc;
1061 struct lock_class *class;
1062 int direct, sq_locked, use_lock;
1063
1064 /*
1065 * Some old subsystems don't hold Giant while running a callout_stop(),
1066 * so just discard this check for the moment.
1067 */
1068 if (!safe && c->c_lock != NULL) {
1069 if (c->c_lock == &Giant.lock_object)
1070 use_lock = mtx_owned(&Giant);
1071 else {
1072 use_lock = 1;
1073 class = LOCK_CLASS(c->c_lock);
1074 class->lc_assert(c->c_lock, LA_XLOCKED);
1075 }
1076 } else
1077 use_lock = 0;
1078 direct = (c->c_flags & CALLOUT_DIRECT) != 0;
1079 sq_locked = 0;
1080 old_cc = NULL;
1081 again:
1082 cc = callout_lock(c);
1083
1084 /*
1085 * If the callout was migrating while the callout cpu lock was
1086 * dropped, just drop the sleepqueue lock and check the states
1087 * again.
1088 */
1089 if (sq_locked != 0 && cc != old_cc) {
1090 #ifdef SMP
1091 CC_UNLOCK(cc);
1092 sleepq_release(&old_cc->cc_exec_entity[direct].cc_waiting);
1093 sq_locked = 0;
1094 old_cc = NULL;
1095 goto again;
1096 #else
1097 panic("migration should not happen");
1098 #endif
1099 }
1100
1101 /*
1102 * If the callout isn't pending, it's not on the queue, so
1103 * don't attempt to remove it from the queue. We can try to
1104 * stop it by other means however.
1105 */
1106 if (!(c->c_flags & CALLOUT_PENDING)) {
1107 c->c_flags &= ~CALLOUT_ACTIVE;
1108
1109 /*
1110 * If it wasn't on the queue and it isn't the current
1111 * callout, then we can't stop it, so just bail.
1112 */
1113 if (cc->cc_exec_entity[direct].cc_curr != c) {
1114 CTR3(KTR_CALLOUT, "failed to stop %p func %p arg %p",
1115 c, c->c_func, c->c_arg);
1116 CC_UNLOCK(cc);
1117 if (sq_locked)
1118 sleepq_release(
1119 &cc->cc_exec_entity[direct].cc_waiting);
1120 return (0);
1121 }
1122
1123 if (safe) {
1124 /*
1125 * The current callout is running (or just
1126 * about to run) and blocking is allowed, so
1127 * just wait for the current invocation to
1128 * finish.
1129 */
1130 while (cc->cc_exec_entity[direct].cc_curr == c) {
1131 /*
1132 * Use direct calls to sleepqueue interface
1133 * instead of cv/msleep in order to avoid
1134 * a LOR between cc_lock and sleepqueue
1135 * chain spinlocks. This piece of code
1136 * emulates a msleep_spin() call actually.
1137 *
1138 * If we already have the sleepqueue chain
1139 * locked, then we can safely block. If we
1140 * don't already have it locked, however,
1141 * we have to drop the cc_lock to lock
1142 * it. This opens several races, so we
1143 * restart at the beginning once we have
1144 * both locks. If nothing has changed, then
1145 * we will end up back here with sq_locked
1146 * set.
1147 */
1148 if (!sq_locked) {
1149 CC_UNLOCK(cc);
1150 sleepq_lock(
1151 &cc->cc_exec_entity[direct].cc_waiting);
1152 sq_locked = 1;
1153 old_cc = cc;
1154 goto again;
1155 }
1156
1157 /*
1158 * Migration could be cancelled here, but
1159 * as long as it is still not sure when it
1160 * will be packed up, just let softclock()
1161 * take care of it.
1162 */
1163 cc->cc_exec_entity[direct].cc_waiting = true;
1164 DROP_GIANT();
1165 CC_UNLOCK(cc);
1166 sleepq_add(
1167 &cc->cc_exec_entity[direct].cc_waiting,
1168 &cc->cc_lock.lock_object, "codrain",
1169 SLEEPQ_SLEEP, 0);
1170 sleepq_wait(
1171 &cc->cc_exec_entity[direct].cc_waiting,
1172 0);
1173 sq_locked = 0;
1174 old_cc = NULL;
1175
1176 /* Reacquire locks previously released. */
1177 PICKUP_GIANT();
1178 CC_LOCK(cc);
1179 }
1180 } else if (use_lock &&
1181 !cc->cc_exec_entity[direct].cc_cancel) {
1182 /*
1183 * The current callout is waiting for its
1184 * lock which we hold. Cancel the callout
1185 * and return. After our caller drops the
1186 * lock, the callout will be skipped in
1187 * softclock().
1188 */
1189 cc->cc_exec_entity[direct].cc_cancel = true;
1190 CTR3(KTR_CALLOUT, "cancelled %p func %p arg %p",
1191 c, c->c_func, c->c_arg);
1192 KASSERT(!cc_cce_migrating(cc, direct),
1193 ("callout wrongly scheduled for migration"));
1194 CC_UNLOCK(cc);
1195 KASSERT(!sq_locked, ("sleepqueue chain locked"));
1196 return (1);
1197 } else if ((c->c_flags & CALLOUT_DFRMIGRATION) != 0) {
1198 c->c_flags &= ~CALLOUT_DFRMIGRATION;
1199 CTR3(KTR_CALLOUT, "postponing stop %p func %p arg %p",
1200 c, c->c_func, c->c_arg);
1201 CC_UNLOCK(cc);
1202 return (1);
1203 }
1204 CTR3(KTR_CALLOUT, "failed to stop %p func %p arg %p",
1205 c, c->c_func, c->c_arg);
1206 CC_UNLOCK(cc);
1207 KASSERT(!sq_locked, ("sleepqueue chain still locked"));
1208 return (0);
1209 }
1210 if (sq_locked)
1211 sleepq_release(&cc->cc_exec_entity[direct].cc_waiting);
1212
1213 c->c_flags &= ~(CALLOUT_ACTIVE | CALLOUT_PENDING);
1214
1215 CTR3(KTR_CALLOUT, "cancelled %p func %p arg %p",
1216 c, c->c_func, c->c_arg);
1217 if ((c->c_flags & CALLOUT_PROCESSED) == 0) {
1218 if (cc->cc_exec_next_dir == c)
1219 cc->cc_exec_next_dir = LIST_NEXT(c, c_links.le);
1220 LIST_REMOVE(c, c_links.le);
1221 } else
1222 TAILQ_REMOVE(&cc->cc_expireq, c, c_links.tqe);
1223 callout_cc_del(c, cc);
1224
1225 CC_UNLOCK(cc);
1226 return (1);
1227 }
1228
1229 void
1230 callout_init(c, mpsafe)
1231 struct callout *c;
1232 int mpsafe;
1233 {
1234 bzero(c, sizeof *c);
1235 if (mpsafe) {
1236 c->c_lock = NULL;
1237 c->c_flags = CALLOUT_RETURNUNLOCKED;
1238 } else {
1239 c->c_lock = &Giant.lock_object;
1240 c->c_flags = 0;
1241 }
1242 c->c_cpu = timeout_cpu;
1243 }
1244
1245 void
1246 _callout_init_lock(c, lock, flags)
1247 struct callout *c;
1248 struct lock_object *lock;
1249 int flags;
1250 {
1251 bzero(c, sizeof *c);
1252 c->c_lock = lock;
1253 KASSERT((flags & ~(CALLOUT_RETURNUNLOCKED | CALLOUT_SHAREDLOCK)) == 0,
1254 ("callout_init_lock: bad flags %d", flags));
1255 KASSERT(lock != NULL || (flags & CALLOUT_RETURNUNLOCKED) == 0,
1256 ("callout_init_lock: CALLOUT_RETURNUNLOCKED with no lock"));
1257 KASSERT(lock == NULL || !(LOCK_CLASS(lock)->lc_flags &
1258 (LC_SPINLOCK | LC_SLEEPABLE)), ("%s: invalid lock class",
1259 __func__));
1260 c->c_flags = flags & (CALLOUT_RETURNUNLOCKED | CALLOUT_SHAREDLOCK);
1261 c->c_cpu = timeout_cpu;
1262 }
1263
1264 #ifdef APM_FIXUP_CALLTODO
1265 /*
1266 * Adjust the kernel calltodo timeout list. This routine is used after
1267 * an APM resume to recalculate the calltodo timer list values with the
1268 * number of hz's we have been sleeping. The next hardclock() will detect
1269 * that there are fired timers and run softclock() to execute them.
1270 *
1271 * Please note, I have not done an exhaustive analysis of what code this
1272 * might break. I am motivated to have my select()'s and alarm()'s that
1273 * have expired during suspend firing upon resume so that the applications
1274 * which set the timer can do the maintanence the timer was for as close
1275 * as possible to the originally intended time. Testing this code for a
1276 * week showed that resuming from a suspend resulted in 22 to 25 timers
1277 * firing, which seemed independant on whether the suspend was 2 hours or
1278 * 2 days. Your milage may vary. - Ken Key <key@cs.utk.edu>
1279 */
1280 void
1281 adjust_timeout_calltodo(time_change)
1282 struct timeval *time_change;
1283 {
1284 register struct callout *p;
1285 unsigned long delta_ticks;
1286
1287 /*
1288 * How many ticks were we asleep?
1289 * (stolen from tvtohz()).
1290 */
1291
1292 /* Don't do anything */
1293 if (time_change->tv_sec < 0)
1294 return;
1295 else if (time_change->tv_sec <= LONG_MAX / 1000000)
1296 delta_ticks = (time_change->tv_sec * 1000000 +
1297 time_change->tv_usec + (tick - 1)) / tick + 1;
1298 else if (time_change->tv_sec <= LONG_MAX / hz)
1299 delta_ticks = time_change->tv_sec * hz +
1300 (time_change->tv_usec + (tick - 1)) / tick + 1;
1301 else
1302 delta_ticks = LONG_MAX;
1303
1304 if (delta_ticks > INT_MAX)
1305 delta_ticks = INT_MAX;
1306
1307 /*
1308 * Now rip through the timer calltodo list looking for timers
1309 * to expire.
1310 */
1311
1312 /* don't collide with softclock() */
1313 CC_LOCK(cc);
1314 for (p = calltodo.c_next; p != NULL; p = p->c_next) {
1315 p->c_time -= delta_ticks;
1316
1317 /* Break if the timer had more time on it than delta_ticks */
1318 if (p->c_time > 0)
1319 break;
1320
1321 /* take back the ticks the timer didn't use (p->c_time <= 0) */
1322 delta_ticks = -p->c_time;
1323 }
1324 CC_UNLOCK(cc);
1325
1326 return;
1327 }
1328 #endif /* APM_FIXUP_CALLTODO */
1329
1330 static int
1331 flssbt(sbintime_t sbt)
1332 {
1333
1334 sbt += (uint64_t)sbt >> 1;
1335 if (sizeof(long) >= sizeof(sbintime_t))
1336 return (flsl(sbt));
1337 if (sbt >= SBT_1S)
1338 return (flsl(((uint64_t)sbt) >> 32) + 32);
1339 return (flsl(sbt));
1340 }
1341
1342 /*
1343 * Dump immediate statistic snapshot of the scheduled callouts.
1344 */
1345 static int
1346 sysctl_kern_callout_stat(SYSCTL_HANDLER_ARGS)
1347 {
1348 struct callout *tmp;
1349 struct callout_cpu *cc;
1350 struct callout_list *sc;
1351 sbintime_t maxpr, maxt, medpr, medt, now, spr, st, t;
1352 int ct[64], cpr[64], ccpbk[32];
1353 int error, val, i, count, tcum, pcum, maxc, c, medc;
1354 #ifdef SMP
1355 int cpu;
1356 #endif
1357
1358 val = 0;
1359 error = sysctl_handle_int(oidp, &val, 0, req);
1360 if (error != 0 || req->newptr == NULL)
1361 return (error);
1362 count = maxc = 0;
1363 st = spr = maxt = maxpr = 0;
1364 bzero(ccpbk, sizeof(ccpbk));
1365 bzero(ct, sizeof(ct));
1366 bzero(cpr, sizeof(cpr));
1367 now = sbinuptime();
1368 #ifdef SMP
1369 CPU_FOREACH(cpu) {
1370 cc = CC_CPU(cpu);
1371 #else
1372 cc = CC_CPU(timeout_cpu);
1373 #endif
1374 CC_LOCK(cc);
1375 for (i = 0; i < callwheelsize; i++) {
1376 sc = &cc->cc_callwheel[i];
1377 c = 0;
1378 LIST_FOREACH(tmp, sc, c_links.le) {
1379 c++;
1380 t = tmp->c_time - now;
1381 if (t < 0)
1382 t = 0;
1383 st += t / SBT_1US;
1384 spr += tmp->c_precision / SBT_1US;
1385 if (t > maxt)
1386 maxt = t;
1387 if (tmp->c_precision > maxpr)
1388 maxpr = tmp->c_precision;
1389 ct[flssbt(t)]++;
1390 cpr[flssbt(tmp->c_precision)]++;
1391 }
1392 if (c > maxc)
1393 maxc = c;
1394 ccpbk[fls(c + c / 2)]++;
1395 count += c;
1396 }
1397 CC_UNLOCK(cc);
1398 #ifdef SMP
1399 }
1400 #endif
1401
1402 for (i = 0, tcum = 0; i < 64 && tcum < count / 2; i++)
1403 tcum += ct[i];
1404 medt = (i >= 2) ? (((sbintime_t)1) << (i - 2)) : 0;
1405 for (i = 0, pcum = 0; i < 64 && pcum < count / 2; i++)
1406 pcum += cpr[i];
1407 medpr = (i >= 2) ? (((sbintime_t)1) << (i - 2)) : 0;
1408 for (i = 0, c = 0; i < 32 && c < count / 2; i++)
1409 c += ccpbk[i];
1410 medc = (i >= 2) ? (1 << (i - 2)) : 0;
1411
1412 printf("Scheduled callouts statistic snapshot:\n");
1413 printf(" Callouts: %6d Buckets: %6d*%-3d Bucket size: 0.%06ds\n",
1414 count, callwheelsize, mp_ncpus, 1000000 >> CC_HASH_SHIFT);
1415 printf(" C/Bk: med %5d avg %6d.%06jd max %6d\n",
1416 medc,
1417 count / callwheelsize / mp_ncpus,
1418 (uint64_t)count * 1000000 / callwheelsize / mp_ncpus % 1000000,
1419 maxc);
1420 printf(" Time: med %5jd.%06jds avg %6jd.%06jds max %6jd.%06jds\n",
1421 medt / SBT_1S, (medt & 0xffffffff) * 1000000 >> 32,
1422 (st / count) / 1000000, (st / count) % 1000000,
1423 maxt / SBT_1S, (maxt & 0xffffffff) * 1000000 >> 32);
1424 printf(" Prec: med %5jd.%06jds avg %6jd.%06jds max %6jd.%06jds\n",
1425 medpr / SBT_1S, (medpr & 0xffffffff) * 1000000 >> 32,
1426 (spr / count) / 1000000, (spr / count) % 1000000,
1427 maxpr / SBT_1S, (maxpr & 0xffffffff) * 1000000 >> 32);
1428 printf(" Distribution: \tbuckets\t time\t tcum\t"
1429 " prec\t pcum\n");
1430 for (i = 0, tcum = pcum = 0; i < 64; i++) {
1431 if (ct[i] == 0 && cpr[i] == 0)
1432 continue;
1433 t = (i != 0) ? (((sbintime_t)1) << (i - 1)) : 0;
1434 tcum += ct[i];
1435 pcum += cpr[i];
1436 printf(" %10jd.%06jds\t 2**%d\t%7d\t%7d\t%7d\t%7d\n",
1437 t / SBT_1S, (t & 0xffffffff) * 1000000 >> 32,
1438 i - 1 - (32 - CC_HASH_SHIFT),
1439 ct[i], tcum, cpr[i], pcum);
1440 }
1441 return (error);
1442 }
1443 SYSCTL_PROC(_kern, OID_AUTO, callout_stat,
1444 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE,
1445 0, 0, sysctl_kern_callout_stat, "I",
1446 "Dump immediate statistic snapshot of the scheduled callouts");
Cache object: 975957b027aca5cec0eb6ae48ed94d06
|