1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 1982, 1986, 1991, 1993
5 * The Regents of the University of California. All rights reserved.
6 * (c) UNIX System Laboratories, Inc.
7 * All or some portions of this file are derived from material licensed
8 * to the University of California by American Telephone and Telegraph
9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
10 * the permission of UNIX System Laboratories, Inc.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * From: @(#)kern_clock.c 8.5 (Berkeley) 1/21/94
37 */
38
39 #include <sys/cdefs.h>
40 __FBSDID("$FreeBSD: releng/12.0/sys/kern/kern_timeout.c 326218 2017-11-25 23:41:05Z nwhitehorn $");
41
42 #include "opt_callout_profiling.h"
43 #include "opt_ddb.h"
44 #if defined(__arm__)
45 #include "opt_timer.h"
46 #endif
47 #include "opt_rss.h"
48
49 #include <sys/param.h>
50 #include <sys/systm.h>
51 #include <sys/bus.h>
52 #include <sys/callout.h>
53 #include <sys/file.h>
54 #include <sys/interrupt.h>
55 #include <sys/kernel.h>
56 #include <sys/ktr.h>
57 #include <sys/lock.h>
58 #include <sys/malloc.h>
59 #include <sys/mutex.h>
60 #include <sys/proc.h>
61 #include <sys/sdt.h>
62 #include <sys/sleepqueue.h>
63 #include <sys/sysctl.h>
64 #include <sys/smp.h>
65
66 #ifdef DDB
67 #include <ddb/ddb.h>
68 #include <machine/_inttypes.h>
69 #endif
70
71 #ifdef SMP
72 #include <machine/cpu.h>
73 #endif
74
75 #ifndef NO_EVENTTIMERS
76 DPCPU_DECLARE(sbintime_t, hardclocktime);
77 #endif
78
79 SDT_PROVIDER_DEFINE(callout_execute);
80 SDT_PROBE_DEFINE1(callout_execute, , , callout__start, "struct callout *");
81 SDT_PROBE_DEFINE1(callout_execute, , , callout__end, "struct callout *");
82
83 #ifdef CALLOUT_PROFILING
84 static int avg_depth;
85 SYSCTL_INT(_debug, OID_AUTO, to_avg_depth, CTLFLAG_RD, &avg_depth, 0,
86 "Average number of items examined per softclock call. Units = 1/1000");
87 static int avg_gcalls;
88 SYSCTL_INT(_debug, OID_AUTO, to_avg_gcalls, CTLFLAG_RD, &avg_gcalls, 0,
89 "Average number of Giant callouts made per softclock call. Units = 1/1000");
90 static int avg_lockcalls;
91 SYSCTL_INT(_debug, OID_AUTO, to_avg_lockcalls, CTLFLAG_RD, &avg_lockcalls, 0,
92 "Average number of lock callouts made per softclock call. Units = 1/1000");
93 static int avg_mpcalls;
94 SYSCTL_INT(_debug, OID_AUTO, to_avg_mpcalls, CTLFLAG_RD, &avg_mpcalls, 0,
95 "Average number of MP callouts made per softclock call. Units = 1/1000");
96 static int avg_depth_dir;
97 SYSCTL_INT(_debug, OID_AUTO, to_avg_depth_dir, CTLFLAG_RD, &avg_depth_dir, 0,
98 "Average number of direct callouts examined per callout_process call. "
99 "Units = 1/1000");
100 static int avg_lockcalls_dir;
101 SYSCTL_INT(_debug, OID_AUTO, to_avg_lockcalls_dir, CTLFLAG_RD,
102 &avg_lockcalls_dir, 0, "Average number of lock direct callouts made per "
103 "callout_process call. Units = 1/1000");
104 static int avg_mpcalls_dir;
105 SYSCTL_INT(_debug, OID_AUTO, to_avg_mpcalls_dir, CTLFLAG_RD, &avg_mpcalls_dir,
106 0, "Average number of MP direct callouts made per callout_process call. "
107 "Units = 1/1000");
108 #endif
109
110 static int ncallout;
111 SYSCTL_INT(_kern, OID_AUTO, ncallout, CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &ncallout, 0,
112 "Number of entries in callwheel and size of timeout() preallocation");
113
114 #ifdef RSS
115 static int pin_default_swi = 1;
116 static int pin_pcpu_swi = 1;
117 #else
118 static int pin_default_swi = 0;
119 static int pin_pcpu_swi = 0;
120 #endif
121
122 SYSCTL_INT(_kern, OID_AUTO, pin_default_swi, CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &pin_default_swi,
123 0, "Pin the default (non-per-cpu) swi (shared with PCPU 0 swi)");
124 SYSCTL_INT(_kern, OID_AUTO, pin_pcpu_swi, CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &pin_pcpu_swi,
125 0, "Pin the per-CPU swis (except PCPU 0, which is also default");
126
127 /*
128 * TODO:
129 * allocate more timeout table slots when table overflows.
130 */
131 u_int callwheelsize, callwheelmask;
132
133 /*
134 * The callout cpu exec entities represent informations necessary for
135 * describing the state of callouts currently running on the CPU and the ones
136 * necessary for migrating callouts to the new callout cpu. In particular,
137 * the first entry of the array cc_exec_entity holds informations for callout
138 * running in SWI thread context, while the second one holds informations
139 * for callout running directly from hardware interrupt context.
140 * The cached informations are very important for deferring migration when
141 * the migrating callout is already running.
142 */
143 struct cc_exec {
144 struct callout *cc_curr;
145 void (*cc_drain)(void *);
146 #ifdef SMP
147 void (*ce_migration_func)(void *);
148 void *ce_migration_arg;
149 int ce_migration_cpu;
150 sbintime_t ce_migration_time;
151 sbintime_t ce_migration_prec;
152 #endif
153 bool cc_cancel;
154 bool cc_waiting;
155 };
156
157 /*
158 * There is one struct callout_cpu per cpu, holding all relevant
159 * state for the callout processing thread on the individual CPU.
160 */
161 struct callout_cpu {
162 struct mtx_padalign cc_lock;
163 struct cc_exec cc_exec_entity[2];
164 struct callout *cc_next;
165 struct callout *cc_callout;
166 struct callout_list *cc_callwheel;
167 struct callout_tailq cc_expireq;
168 struct callout_slist cc_callfree;
169 sbintime_t cc_firstevent;
170 sbintime_t cc_lastscan;
171 void *cc_cookie;
172 u_int cc_bucket;
173 u_int cc_inited;
174 char cc_ktr_event_name[20];
175 };
176
177 #define callout_migrating(c) ((c)->c_iflags & CALLOUT_DFRMIGRATION)
178
179 #define cc_exec_curr(cc, dir) cc->cc_exec_entity[dir].cc_curr
180 #define cc_exec_drain(cc, dir) cc->cc_exec_entity[dir].cc_drain
181 #define cc_exec_next(cc) cc->cc_next
182 #define cc_exec_cancel(cc, dir) cc->cc_exec_entity[dir].cc_cancel
183 #define cc_exec_waiting(cc, dir) cc->cc_exec_entity[dir].cc_waiting
184 #ifdef SMP
185 #define cc_migration_func(cc, dir) cc->cc_exec_entity[dir].ce_migration_func
186 #define cc_migration_arg(cc, dir) cc->cc_exec_entity[dir].ce_migration_arg
187 #define cc_migration_cpu(cc, dir) cc->cc_exec_entity[dir].ce_migration_cpu
188 #define cc_migration_time(cc, dir) cc->cc_exec_entity[dir].ce_migration_time
189 #define cc_migration_prec(cc, dir) cc->cc_exec_entity[dir].ce_migration_prec
190
191 struct callout_cpu cc_cpu[MAXCPU];
192 #define CPUBLOCK MAXCPU
193 #define CC_CPU(cpu) (&cc_cpu[(cpu)])
194 #define CC_SELF() CC_CPU(PCPU_GET(cpuid))
195 #else
196 struct callout_cpu cc_cpu;
197 #define CC_CPU(cpu) &cc_cpu
198 #define CC_SELF() &cc_cpu
199 #endif
200 #define CC_LOCK(cc) mtx_lock_spin(&(cc)->cc_lock)
201 #define CC_UNLOCK(cc) mtx_unlock_spin(&(cc)->cc_lock)
202 #define CC_LOCK_ASSERT(cc) mtx_assert(&(cc)->cc_lock, MA_OWNED)
203
204 static int timeout_cpu;
205
206 static void callout_cpu_init(struct callout_cpu *cc, int cpu);
207 static void softclock_call_cc(struct callout *c, struct callout_cpu *cc,
208 #ifdef CALLOUT_PROFILING
209 int *mpcalls, int *lockcalls, int *gcalls,
210 #endif
211 int direct);
212
213 static MALLOC_DEFINE(M_CALLOUT, "callout", "Callout datastructures");
214
215 /**
216 * Locked by cc_lock:
217 * cc_curr - If a callout is in progress, it is cc_curr.
218 * If cc_curr is non-NULL, threads waiting in
219 * callout_drain() will be woken up as soon as the
220 * relevant callout completes.
221 * cc_cancel - Changing to 1 with both callout_lock and cc_lock held
222 * guarantees that the current callout will not run.
223 * The softclock() function sets this to 0 before it
224 * drops callout_lock to acquire c_lock, and it calls
225 * the handler only if curr_cancelled is still 0 after
226 * cc_lock is successfully acquired.
227 * cc_waiting - If a thread is waiting in callout_drain(), then
228 * callout_wait is nonzero. Set only when
229 * cc_curr is non-NULL.
230 */
231
232 /*
233 * Resets the execution entity tied to a specific callout cpu.
234 */
235 static void
236 cc_cce_cleanup(struct callout_cpu *cc, int direct)
237 {
238
239 cc_exec_curr(cc, direct) = NULL;
240 cc_exec_cancel(cc, direct) = false;
241 cc_exec_waiting(cc, direct) = false;
242 #ifdef SMP
243 cc_migration_cpu(cc, direct) = CPUBLOCK;
244 cc_migration_time(cc, direct) = 0;
245 cc_migration_prec(cc, direct) = 0;
246 cc_migration_func(cc, direct) = NULL;
247 cc_migration_arg(cc, direct) = NULL;
248 #endif
249 }
250
251 /*
252 * Checks if migration is requested by a specific callout cpu.
253 */
254 static int
255 cc_cce_migrating(struct callout_cpu *cc, int direct)
256 {
257
258 #ifdef SMP
259 return (cc_migration_cpu(cc, direct) != CPUBLOCK);
260 #else
261 return (0);
262 #endif
263 }
264
265 /*
266 * Kernel low level callwheel initialization
267 * called on the BSP during kernel startup.
268 */
269 static void
270 callout_callwheel_init(void *dummy)
271 {
272 struct callout_cpu *cc;
273
274 /*
275 * Calculate the size of the callout wheel and the preallocated
276 * timeout() structures.
277 * XXX: Clip callout to result of previous function of maxusers
278 * maximum 384. This is still huge, but acceptable.
279 */
280 memset(CC_CPU(curcpu), 0, sizeof(cc_cpu));
281 ncallout = imin(16 + maxproc + maxfiles, 18508);
282 TUNABLE_INT_FETCH("kern.ncallout", &ncallout);
283
284 /*
285 * Calculate callout wheel size, should be next power of two higher
286 * than 'ncallout'.
287 */
288 callwheelsize = 1 << fls(ncallout);
289 callwheelmask = callwheelsize - 1;
290
291 /*
292 * Fetch whether we're pinning the swi's or not.
293 */
294 TUNABLE_INT_FETCH("kern.pin_default_swi", &pin_default_swi);
295 TUNABLE_INT_FETCH("kern.pin_pcpu_swi", &pin_pcpu_swi);
296
297 /*
298 * Only BSP handles timeout(9) and receives a preallocation.
299 *
300 * XXX: Once all timeout(9) consumers are converted this can
301 * be removed.
302 */
303 timeout_cpu = PCPU_GET(cpuid);
304 cc = CC_CPU(timeout_cpu);
305 cc->cc_callout = malloc(ncallout * sizeof(struct callout),
306 M_CALLOUT, M_WAITOK);
307 callout_cpu_init(cc, timeout_cpu);
308 }
309 SYSINIT(callwheel_init, SI_SUB_CPU, SI_ORDER_ANY, callout_callwheel_init, NULL);
310
311 /*
312 * Initialize the per-cpu callout structures.
313 */
314 static void
315 callout_cpu_init(struct callout_cpu *cc, int cpu)
316 {
317 struct callout *c;
318 int i;
319
320 mtx_init(&cc->cc_lock, "callout", NULL, MTX_SPIN | MTX_RECURSE);
321 SLIST_INIT(&cc->cc_callfree);
322 cc->cc_inited = 1;
323 cc->cc_callwheel = malloc(sizeof(struct callout_list) * callwheelsize,
324 M_CALLOUT, M_WAITOK);
325 for (i = 0; i < callwheelsize; i++)
326 LIST_INIT(&cc->cc_callwheel[i]);
327 TAILQ_INIT(&cc->cc_expireq);
328 cc->cc_firstevent = SBT_MAX;
329 for (i = 0; i < 2; i++)
330 cc_cce_cleanup(cc, i);
331 snprintf(cc->cc_ktr_event_name, sizeof(cc->cc_ktr_event_name),
332 "callwheel cpu %d", cpu);
333 if (cc->cc_callout == NULL) /* Only BSP handles timeout(9) */
334 return;
335 for (i = 0; i < ncallout; i++) {
336 c = &cc->cc_callout[i];
337 callout_init(c, 0);
338 c->c_iflags = CALLOUT_LOCAL_ALLOC;
339 SLIST_INSERT_HEAD(&cc->cc_callfree, c, c_links.sle);
340 }
341 }
342
343 #ifdef SMP
344 /*
345 * Switches the cpu tied to a specific callout.
346 * The function expects a locked incoming callout cpu and returns with
347 * locked outcoming callout cpu.
348 */
349 static struct callout_cpu *
350 callout_cpu_switch(struct callout *c, struct callout_cpu *cc, int new_cpu)
351 {
352 struct callout_cpu *new_cc;
353
354 MPASS(c != NULL && cc != NULL);
355 CC_LOCK_ASSERT(cc);
356
357 /*
358 * Avoid interrupts and preemption firing after the callout cpu
359 * is blocked in order to avoid deadlocks as the new thread
360 * may be willing to acquire the callout cpu lock.
361 */
362 c->c_cpu = CPUBLOCK;
363 spinlock_enter();
364 CC_UNLOCK(cc);
365 new_cc = CC_CPU(new_cpu);
366 CC_LOCK(new_cc);
367 spinlock_exit();
368 c->c_cpu = new_cpu;
369 return (new_cc);
370 }
371 #endif
372
373 /*
374 * Start standard softclock thread.
375 */
376 static void
377 start_softclock(void *dummy)
378 {
379 struct callout_cpu *cc;
380 char name[MAXCOMLEN];
381 #ifdef SMP
382 int cpu;
383 struct intr_event *ie;
384 #endif
385
386 cc = CC_CPU(timeout_cpu);
387 snprintf(name, sizeof(name), "clock (%d)", timeout_cpu);
388 if (swi_add(&clk_intr_event, name, softclock, cc, SWI_CLOCK,
389 INTR_MPSAFE, &cc->cc_cookie))
390 panic("died while creating standard software ithreads");
391 if (pin_default_swi &&
392 (intr_event_bind(clk_intr_event, timeout_cpu) != 0)) {
393 printf("%s: timeout clock couldn't be pinned to cpu %d\n",
394 __func__,
395 timeout_cpu);
396 }
397
398 #ifdef SMP
399 CPU_FOREACH(cpu) {
400 if (cpu == timeout_cpu)
401 continue;
402 cc = CC_CPU(cpu);
403 cc->cc_callout = NULL; /* Only BSP handles timeout(9). */
404 callout_cpu_init(cc, cpu);
405 snprintf(name, sizeof(name), "clock (%d)", cpu);
406 ie = NULL;
407 if (swi_add(&ie, name, softclock, cc, SWI_CLOCK,
408 INTR_MPSAFE, &cc->cc_cookie))
409 panic("died while creating standard software ithreads");
410 if (pin_pcpu_swi && (intr_event_bind(ie, cpu) != 0)) {
411 printf("%s: per-cpu clock couldn't be pinned to "
412 "cpu %d\n",
413 __func__,
414 cpu);
415 }
416 }
417 #endif
418 }
419 SYSINIT(start_softclock, SI_SUB_SOFTINTR, SI_ORDER_FIRST, start_softclock, NULL);
420
421 #define CC_HASH_SHIFT 8
422
423 static inline u_int
424 callout_hash(sbintime_t sbt)
425 {
426
427 return (sbt >> (32 - CC_HASH_SHIFT));
428 }
429
430 static inline u_int
431 callout_get_bucket(sbintime_t sbt)
432 {
433
434 return (callout_hash(sbt) & callwheelmask);
435 }
436
437 void
438 callout_process(sbintime_t now)
439 {
440 struct callout *tmp, *tmpn;
441 struct callout_cpu *cc;
442 struct callout_list *sc;
443 sbintime_t first, last, max, tmp_max;
444 uint32_t lookahead;
445 u_int firstb, lastb, nowb;
446 #ifdef CALLOUT_PROFILING
447 int depth_dir = 0, mpcalls_dir = 0, lockcalls_dir = 0;
448 #endif
449
450 cc = CC_SELF();
451 mtx_lock_spin_flags(&cc->cc_lock, MTX_QUIET);
452
453 /* Compute the buckets of the last scan and present times. */
454 firstb = callout_hash(cc->cc_lastscan);
455 cc->cc_lastscan = now;
456 nowb = callout_hash(now);
457
458 /* Compute the last bucket and minimum time of the bucket after it. */
459 if (nowb == firstb)
460 lookahead = (SBT_1S / 16);
461 else if (nowb - firstb == 1)
462 lookahead = (SBT_1S / 8);
463 else
464 lookahead = (SBT_1S / 2);
465 first = last = now;
466 first += (lookahead / 2);
467 last += lookahead;
468 last &= (0xffffffffffffffffLLU << (32 - CC_HASH_SHIFT));
469 lastb = callout_hash(last) - 1;
470 max = last;
471
472 /*
473 * Check if we wrapped around the entire wheel from the last scan.
474 * In case, we need to scan entirely the wheel for pending callouts.
475 */
476 if (lastb - firstb >= callwheelsize) {
477 lastb = firstb + callwheelsize - 1;
478 if (nowb - firstb >= callwheelsize)
479 nowb = lastb;
480 }
481
482 /* Iterate callwheel from firstb to nowb and then up to lastb. */
483 do {
484 sc = &cc->cc_callwheel[firstb & callwheelmask];
485 tmp = LIST_FIRST(sc);
486 while (tmp != NULL) {
487 /* Run the callout if present time within allowed. */
488 if (tmp->c_time <= now) {
489 /*
490 * Consumer told us the callout may be run
491 * directly from hardware interrupt context.
492 */
493 if (tmp->c_iflags & CALLOUT_DIRECT) {
494 #ifdef CALLOUT_PROFILING
495 ++depth_dir;
496 #endif
497 cc_exec_next(cc) =
498 LIST_NEXT(tmp, c_links.le);
499 cc->cc_bucket = firstb & callwheelmask;
500 LIST_REMOVE(tmp, c_links.le);
501 softclock_call_cc(tmp, cc,
502 #ifdef CALLOUT_PROFILING
503 &mpcalls_dir, &lockcalls_dir, NULL,
504 #endif
505 1);
506 tmp = cc_exec_next(cc);
507 cc_exec_next(cc) = NULL;
508 } else {
509 tmpn = LIST_NEXT(tmp, c_links.le);
510 LIST_REMOVE(tmp, c_links.le);
511 TAILQ_INSERT_TAIL(&cc->cc_expireq,
512 tmp, c_links.tqe);
513 tmp->c_iflags |= CALLOUT_PROCESSED;
514 tmp = tmpn;
515 }
516 continue;
517 }
518 /* Skip events from distant future. */
519 if (tmp->c_time >= max)
520 goto next;
521 /*
522 * Event minimal time is bigger than present maximal
523 * time, so it cannot be aggregated.
524 */
525 if (tmp->c_time > last) {
526 lastb = nowb;
527 goto next;
528 }
529 /* Update first and last time, respecting this event. */
530 if (tmp->c_time < first)
531 first = tmp->c_time;
532 tmp_max = tmp->c_time + tmp->c_precision;
533 if (tmp_max < last)
534 last = tmp_max;
535 next:
536 tmp = LIST_NEXT(tmp, c_links.le);
537 }
538 /* Proceed with the next bucket. */
539 firstb++;
540 /*
541 * Stop if we looked after present time and found
542 * some event we can't execute at now.
543 * Stop if we looked far enough into the future.
544 */
545 } while (((int)(firstb - lastb)) <= 0);
546 cc->cc_firstevent = last;
547 #ifndef NO_EVENTTIMERS
548 cpu_new_callout(curcpu, last, first);
549 #endif
550 #ifdef CALLOUT_PROFILING
551 avg_depth_dir += (depth_dir * 1000 - avg_depth_dir) >> 8;
552 avg_mpcalls_dir += (mpcalls_dir * 1000 - avg_mpcalls_dir) >> 8;
553 avg_lockcalls_dir += (lockcalls_dir * 1000 - avg_lockcalls_dir) >> 8;
554 #endif
555 mtx_unlock_spin_flags(&cc->cc_lock, MTX_QUIET);
556 /*
557 * swi_sched acquires the thread lock, so we don't want to call it
558 * with cc_lock held; incorrect locking order.
559 */
560 if (!TAILQ_EMPTY(&cc->cc_expireq))
561 swi_sched(cc->cc_cookie, 0);
562 }
563
564 static struct callout_cpu *
565 callout_lock(struct callout *c)
566 {
567 struct callout_cpu *cc;
568 int cpu;
569
570 for (;;) {
571 cpu = c->c_cpu;
572 #ifdef SMP
573 if (cpu == CPUBLOCK) {
574 while (c->c_cpu == CPUBLOCK)
575 cpu_spinwait();
576 continue;
577 }
578 #endif
579 cc = CC_CPU(cpu);
580 CC_LOCK(cc);
581 if (cpu == c->c_cpu)
582 break;
583 CC_UNLOCK(cc);
584 }
585 return (cc);
586 }
587
588 static void
589 callout_cc_add(struct callout *c, struct callout_cpu *cc,
590 sbintime_t sbt, sbintime_t precision, void (*func)(void *),
591 void *arg, int cpu, int flags)
592 {
593 int bucket;
594
595 CC_LOCK_ASSERT(cc);
596 if (sbt < cc->cc_lastscan)
597 sbt = cc->cc_lastscan;
598 c->c_arg = arg;
599 c->c_iflags |= CALLOUT_PENDING;
600 c->c_iflags &= ~CALLOUT_PROCESSED;
601 c->c_flags |= CALLOUT_ACTIVE;
602 if (flags & C_DIRECT_EXEC)
603 c->c_iflags |= CALLOUT_DIRECT;
604 c->c_func = func;
605 c->c_time = sbt;
606 c->c_precision = precision;
607 bucket = callout_get_bucket(c->c_time);
608 CTR3(KTR_CALLOUT, "precision set for %p: %d.%08x",
609 c, (int)(c->c_precision >> 32),
610 (u_int)(c->c_precision & 0xffffffff));
611 LIST_INSERT_HEAD(&cc->cc_callwheel[bucket], c, c_links.le);
612 if (cc->cc_bucket == bucket)
613 cc_exec_next(cc) = c;
614 #ifndef NO_EVENTTIMERS
615 /*
616 * Inform the eventtimers(4) subsystem there's a new callout
617 * that has been inserted, but only if really required.
618 */
619 if (SBT_MAX - c->c_time < c->c_precision)
620 c->c_precision = SBT_MAX - c->c_time;
621 sbt = c->c_time + c->c_precision;
622 if (sbt < cc->cc_firstevent) {
623 cc->cc_firstevent = sbt;
624 cpu_new_callout(cpu, sbt, c->c_time);
625 }
626 #endif
627 }
628
629 static void
630 callout_cc_del(struct callout *c, struct callout_cpu *cc)
631 {
632
633 if ((c->c_iflags & CALLOUT_LOCAL_ALLOC) == 0)
634 return;
635 c->c_func = NULL;
636 SLIST_INSERT_HEAD(&cc->cc_callfree, c, c_links.sle);
637 }
638
639 static void
640 softclock_call_cc(struct callout *c, struct callout_cpu *cc,
641 #ifdef CALLOUT_PROFILING
642 int *mpcalls, int *lockcalls, int *gcalls,
643 #endif
644 int direct)
645 {
646 struct rm_priotracker tracker;
647 void (*c_func)(void *);
648 void *c_arg;
649 struct lock_class *class;
650 struct lock_object *c_lock;
651 uintptr_t lock_status;
652 int c_iflags;
653 #ifdef SMP
654 struct callout_cpu *new_cc;
655 void (*new_func)(void *);
656 void *new_arg;
657 int flags, new_cpu;
658 sbintime_t new_prec, new_time;
659 #endif
660 #if defined(DIAGNOSTIC) || defined(CALLOUT_PROFILING)
661 sbintime_t sbt1, sbt2;
662 struct timespec ts2;
663 static sbintime_t maxdt = 2 * SBT_1MS; /* 2 msec */
664 static timeout_t *lastfunc;
665 #endif
666
667 KASSERT((c->c_iflags & CALLOUT_PENDING) == CALLOUT_PENDING,
668 ("softclock_call_cc: pend %p %x", c, c->c_iflags));
669 KASSERT((c->c_flags & CALLOUT_ACTIVE) == CALLOUT_ACTIVE,
670 ("softclock_call_cc: act %p %x", c, c->c_flags));
671 class = (c->c_lock != NULL) ? LOCK_CLASS(c->c_lock) : NULL;
672 lock_status = 0;
673 if (c->c_flags & CALLOUT_SHAREDLOCK) {
674 if (class == &lock_class_rm)
675 lock_status = (uintptr_t)&tracker;
676 else
677 lock_status = 1;
678 }
679 c_lock = c->c_lock;
680 c_func = c->c_func;
681 c_arg = c->c_arg;
682 c_iflags = c->c_iflags;
683 if (c->c_iflags & CALLOUT_LOCAL_ALLOC)
684 c->c_iflags = CALLOUT_LOCAL_ALLOC;
685 else
686 c->c_iflags &= ~CALLOUT_PENDING;
687
688 cc_exec_curr(cc, direct) = c;
689 cc_exec_cancel(cc, direct) = false;
690 cc_exec_drain(cc, direct) = NULL;
691 CC_UNLOCK(cc);
692 if (c_lock != NULL) {
693 class->lc_lock(c_lock, lock_status);
694 /*
695 * The callout may have been cancelled
696 * while we switched locks.
697 */
698 if (cc_exec_cancel(cc, direct)) {
699 class->lc_unlock(c_lock);
700 goto skip;
701 }
702 /* The callout cannot be stopped now. */
703 cc_exec_cancel(cc, direct) = true;
704 if (c_lock == &Giant.lock_object) {
705 #ifdef CALLOUT_PROFILING
706 (*gcalls)++;
707 #endif
708 CTR3(KTR_CALLOUT, "callout giant %p func %p arg %p",
709 c, c_func, c_arg);
710 } else {
711 #ifdef CALLOUT_PROFILING
712 (*lockcalls)++;
713 #endif
714 CTR3(KTR_CALLOUT, "callout lock %p func %p arg %p",
715 c, c_func, c_arg);
716 }
717 } else {
718 #ifdef CALLOUT_PROFILING
719 (*mpcalls)++;
720 #endif
721 CTR3(KTR_CALLOUT, "callout %p func %p arg %p",
722 c, c_func, c_arg);
723 }
724 KTR_STATE3(KTR_SCHED, "callout", cc->cc_ktr_event_name, "running",
725 "func:%p", c_func, "arg:%p", c_arg, "direct:%d", direct);
726 #if defined(DIAGNOSTIC) || defined(CALLOUT_PROFILING)
727 sbt1 = sbinuptime();
728 #endif
729 THREAD_NO_SLEEPING();
730 SDT_PROBE1(callout_execute, , , callout__start, c);
731 c_func(c_arg);
732 SDT_PROBE1(callout_execute, , , callout__end, c);
733 THREAD_SLEEPING_OK();
734 #if defined(DIAGNOSTIC) || defined(CALLOUT_PROFILING)
735 sbt2 = sbinuptime();
736 sbt2 -= sbt1;
737 if (sbt2 > maxdt) {
738 if (lastfunc != c_func || sbt2 > maxdt * 2) {
739 ts2 = sbttots(sbt2);
740 printf(
741 "Expensive timeout(9) function: %p(%p) %jd.%09ld s\n",
742 c_func, c_arg, (intmax_t)ts2.tv_sec, ts2.tv_nsec);
743 }
744 maxdt = sbt2;
745 lastfunc = c_func;
746 }
747 #endif
748 KTR_STATE0(KTR_SCHED, "callout", cc->cc_ktr_event_name, "idle");
749 CTR1(KTR_CALLOUT, "callout %p finished", c);
750 if ((c_iflags & CALLOUT_RETURNUNLOCKED) == 0)
751 class->lc_unlock(c_lock);
752 skip:
753 CC_LOCK(cc);
754 KASSERT(cc_exec_curr(cc, direct) == c, ("mishandled cc_curr"));
755 cc_exec_curr(cc, direct) = NULL;
756 if (cc_exec_drain(cc, direct)) {
757 void (*drain)(void *);
758
759 drain = cc_exec_drain(cc, direct);
760 cc_exec_drain(cc, direct) = NULL;
761 CC_UNLOCK(cc);
762 drain(c_arg);
763 CC_LOCK(cc);
764 }
765 if (cc_exec_waiting(cc, direct)) {
766 /*
767 * There is someone waiting for the
768 * callout to complete.
769 * If the callout was scheduled for
770 * migration just cancel it.
771 */
772 if (cc_cce_migrating(cc, direct)) {
773 cc_cce_cleanup(cc, direct);
774
775 /*
776 * It should be assert here that the callout is not
777 * destroyed but that is not easy.
778 */
779 c->c_iflags &= ~CALLOUT_DFRMIGRATION;
780 }
781 cc_exec_waiting(cc, direct) = false;
782 CC_UNLOCK(cc);
783 wakeup(&cc_exec_waiting(cc, direct));
784 CC_LOCK(cc);
785 } else if (cc_cce_migrating(cc, direct)) {
786 KASSERT((c_iflags & CALLOUT_LOCAL_ALLOC) == 0,
787 ("Migrating legacy callout %p", c));
788 #ifdef SMP
789 /*
790 * If the callout was scheduled for
791 * migration just perform it now.
792 */
793 new_cpu = cc_migration_cpu(cc, direct);
794 new_time = cc_migration_time(cc, direct);
795 new_prec = cc_migration_prec(cc, direct);
796 new_func = cc_migration_func(cc, direct);
797 new_arg = cc_migration_arg(cc, direct);
798 cc_cce_cleanup(cc, direct);
799
800 /*
801 * It should be assert here that the callout is not destroyed
802 * but that is not easy.
803 *
804 * As first thing, handle deferred callout stops.
805 */
806 if (!callout_migrating(c)) {
807 CTR3(KTR_CALLOUT,
808 "deferred cancelled %p func %p arg %p",
809 c, new_func, new_arg);
810 callout_cc_del(c, cc);
811 return;
812 }
813 c->c_iflags &= ~CALLOUT_DFRMIGRATION;
814
815 new_cc = callout_cpu_switch(c, cc, new_cpu);
816 flags = (direct) ? C_DIRECT_EXEC : 0;
817 callout_cc_add(c, new_cc, new_time, new_prec, new_func,
818 new_arg, new_cpu, flags);
819 CC_UNLOCK(new_cc);
820 CC_LOCK(cc);
821 #else
822 panic("migration should not happen");
823 #endif
824 }
825 /*
826 * If the current callout is locally allocated (from
827 * timeout(9)) then put it on the freelist.
828 *
829 * Note: we need to check the cached copy of c_iflags because
830 * if it was not local, then it's not safe to deref the
831 * callout pointer.
832 */
833 KASSERT((c_iflags & CALLOUT_LOCAL_ALLOC) == 0 ||
834 c->c_iflags == CALLOUT_LOCAL_ALLOC,
835 ("corrupted callout"));
836 if (c_iflags & CALLOUT_LOCAL_ALLOC)
837 callout_cc_del(c, cc);
838 }
839
840 /*
841 * The callout mechanism is based on the work of Adam M. Costello and
842 * George Varghese, published in a technical report entitled "Redesigning
843 * the BSD Callout and Timer Facilities" and modified slightly for inclusion
844 * in FreeBSD by Justin T. Gibbs. The original work on the data structures
845 * used in this implementation was published by G. Varghese and T. Lauck in
846 * the paper "Hashed and Hierarchical Timing Wheels: Data Structures for
847 * the Efficient Implementation of a Timer Facility" in the Proceedings of
848 * the 11th ACM Annual Symposium on Operating Systems Principles,
849 * Austin, Texas Nov 1987.
850 */
851
852 /*
853 * Software (low priority) clock interrupt.
854 * Run periodic events from timeout queue.
855 */
856 void
857 softclock(void *arg)
858 {
859 struct callout_cpu *cc;
860 struct callout *c;
861 #ifdef CALLOUT_PROFILING
862 int depth = 0, gcalls = 0, lockcalls = 0, mpcalls = 0;
863 #endif
864
865 cc = (struct callout_cpu *)arg;
866 CC_LOCK(cc);
867 while ((c = TAILQ_FIRST(&cc->cc_expireq)) != NULL) {
868 TAILQ_REMOVE(&cc->cc_expireq, c, c_links.tqe);
869 softclock_call_cc(c, cc,
870 #ifdef CALLOUT_PROFILING
871 &mpcalls, &lockcalls, &gcalls,
872 #endif
873 0);
874 #ifdef CALLOUT_PROFILING
875 ++depth;
876 #endif
877 }
878 #ifdef CALLOUT_PROFILING
879 avg_depth += (depth * 1000 - avg_depth) >> 8;
880 avg_mpcalls += (mpcalls * 1000 - avg_mpcalls) >> 8;
881 avg_lockcalls += (lockcalls * 1000 - avg_lockcalls) >> 8;
882 avg_gcalls += (gcalls * 1000 - avg_gcalls) >> 8;
883 #endif
884 CC_UNLOCK(cc);
885 }
886
887 /*
888 * timeout --
889 * Execute a function after a specified length of time.
890 *
891 * untimeout --
892 * Cancel previous timeout function call.
893 *
894 * callout_handle_init --
895 * Initialize a handle so that using it with untimeout is benign.
896 *
897 * See AT&T BCI Driver Reference Manual for specification. This
898 * implementation differs from that one in that although an
899 * identification value is returned from timeout, the original
900 * arguments to timeout as well as the identifier are used to
901 * identify entries for untimeout.
902 */
903 struct callout_handle
904 timeout(timeout_t *ftn, void *arg, int to_ticks)
905 {
906 struct callout_cpu *cc;
907 struct callout *new;
908 struct callout_handle handle;
909
910 cc = CC_CPU(timeout_cpu);
911 CC_LOCK(cc);
912 /* Fill in the next free callout structure. */
913 new = SLIST_FIRST(&cc->cc_callfree);
914 if (new == NULL)
915 /* XXX Attempt to malloc first */
916 panic("timeout table full");
917 SLIST_REMOVE_HEAD(&cc->cc_callfree, c_links.sle);
918 callout_reset(new, to_ticks, ftn, arg);
919 handle.callout = new;
920 CC_UNLOCK(cc);
921
922 return (handle);
923 }
924
925 void
926 untimeout(timeout_t *ftn, void *arg, struct callout_handle handle)
927 {
928 struct callout_cpu *cc;
929
930 /*
931 * Check for a handle that was initialized
932 * by callout_handle_init, but never used
933 * for a real timeout.
934 */
935 if (handle.callout == NULL)
936 return;
937
938 cc = callout_lock(handle.callout);
939 if (handle.callout->c_func == ftn && handle.callout->c_arg == arg)
940 callout_stop(handle.callout);
941 CC_UNLOCK(cc);
942 }
943
944 void
945 callout_handle_init(struct callout_handle *handle)
946 {
947 handle->callout = NULL;
948 }
949
950 void
951 callout_when(sbintime_t sbt, sbintime_t precision, int flags,
952 sbintime_t *res, sbintime_t *prec_res)
953 {
954 sbintime_t to_sbt, to_pr;
955
956 if ((flags & (C_ABSOLUTE | C_PRECALC)) != 0) {
957 *res = sbt;
958 *prec_res = precision;
959 return;
960 }
961 if ((flags & C_HARDCLOCK) != 0 && sbt < tick_sbt)
962 sbt = tick_sbt;
963 if ((flags & C_HARDCLOCK) != 0 ||
964 #ifdef NO_EVENTTIMERS
965 sbt >= sbt_timethreshold) {
966 to_sbt = getsbinuptime();
967
968 /* Add safety belt for the case of hz > 1000. */
969 to_sbt += tc_tick_sbt - tick_sbt;
970 #else
971 sbt >= sbt_tickthreshold) {
972 /*
973 * Obtain the time of the last hardclock() call on
974 * this CPU directly from the kern_clocksource.c.
975 * This value is per-CPU, but it is equal for all
976 * active ones.
977 */
978 #ifdef __LP64__
979 to_sbt = DPCPU_GET(hardclocktime);
980 #else
981 spinlock_enter();
982 to_sbt = DPCPU_GET(hardclocktime);
983 spinlock_exit();
984 #endif
985 #endif
986 if (cold && to_sbt == 0)
987 to_sbt = sbinuptime();
988 if ((flags & C_HARDCLOCK) == 0)
989 to_sbt += tick_sbt;
990 } else
991 to_sbt = sbinuptime();
992 if (SBT_MAX - to_sbt < sbt)
993 to_sbt = SBT_MAX;
994 else
995 to_sbt += sbt;
996 *res = to_sbt;
997 to_pr = ((C_PRELGET(flags) < 0) ? sbt >> tc_precexp :
998 sbt >> C_PRELGET(flags));
999 *prec_res = to_pr > precision ? to_pr : precision;
1000 }
1001
1002 /*
1003 * New interface; clients allocate their own callout structures.
1004 *
1005 * callout_reset() - establish or change a timeout
1006 * callout_stop() - disestablish a timeout
1007 * callout_init() - initialize a callout structure so that it can
1008 * safely be passed to callout_reset() and callout_stop()
1009 *
1010 * <sys/callout.h> defines three convenience macros:
1011 *
1012 * callout_active() - returns truth if callout has not been stopped,
1013 * drained, or deactivated since the last time the callout was
1014 * reset.
1015 * callout_pending() - returns truth if callout is still waiting for timeout
1016 * callout_deactivate() - marks the callout as having been serviced
1017 */
1018 int
1019 callout_reset_sbt_on(struct callout *c, sbintime_t sbt, sbintime_t prec,
1020 void (*ftn)(void *), void *arg, int cpu, int flags)
1021 {
1022 sbintime_t to_sbt, precision;
1023 struct callout_cpu *cc;
1024 int cancelled, direct;
1025 int ignore_cpu=0;
1026
1027 cancelled = 0;
1028 if (cpu == -1) {
1029 ignore_cpu = 1;
1030 } else if ((cpu >= MAXCPU) ||
1031 ((CC_CPU(cpu))->cc_inited == 0)) {
1032 /* Invalid CPU spec */
1033 panic("Invalid CPU in callout %d", cpu);
1034 }
1035 callout_when(sbt, prec, flags, &to_sbt, &precision);
1036
1037 /*
1038 * This flag used to be added by callout_cc_add, but the
1039 * first time you call this we could end up with the
1040 * wrong direct flag if we don't do it before we add.
1041 */
1042 if (flags & C_DIRECT_EXEC) {
1043 direct = 1;
1044 } else {
1045 direct = 0;
1046 }
1047 KASSERT(!direct || c->c_lock == NULL,
1048 ("%s: direct callout %p has lock", __func__, c));
1049 cc = callout_lock(c);
1050 /*
1051 * Don't allow migration of pre-allocated callouts lest they
1052 * become unbalanced or handle the case where the user does
1053 * not care.
1054 */
1055 if ((c->c_iflags & CALLOUT_LOCAL_ALLOC) ||
1056 ignore_cpu) {
1057 cpu = c->c_cpu;
1058 }
1059
1060 if (cc_exec_curr(cc, direct) == c) {
1061 /*
1062 * We're being asked to reschedule a callout which is
1063 * currently in progress. If there is a lock then we
1064 * can cancel the callout if it has not really started.
1065 */
1066 if (c->c_lock != NULL && !cc_exec_cancel(cc, direct))
1067 cancelled = cc_exec_cancel(cc, direct) = true;
1068 if (cc_exec_waiting(cc, direct) || cc_exec_drain(cc, direct)) {
1069 /*
1070 * Someone has called callout_drain to kill this
1071 * callout. Don't reschedule.
1072 */
1073 CTR4(KTR_CALLOUT, "%s %p func %p arg %p",
1074 cancelled ? "cancelled" : "failed to cancel",
1075 c, c->c_func, c->c_arg);
1076 CC_UNLOCK(cc);
1077 return (cancelled);
1078 }
1079 #ifdef SMP
1080 if (callout_migrating(c)) {
1081 /*
1082 * This only occurs when a second callout_reset_sbt_on
1083 * is made after a previous one moved it into
1084 * deferred migration (below). Note we do *not* change
1085 * the prev_cpu even though the previous target may
1086 * be different.
1087 */
1088 cc_migration_cpu(cc, direct) = cpu;
1089 cc_migration_time(cc, direct) = to_sbt;
1090 cc_migration_prec(cc, direct) = precision;
1091 cc_migration_func(cc, direct) = ftn;
1092 cc_migration_arg(cc, direct) = arg;
1093 cancelled = 1;
1094 CC_UNLOCK(cc);
1095 return (cancelled);
1096 }
1097 #endif
1098 }
1099 if (c->c_iflags & CALLOUT_PENDING) {
1100 if ((c->c_iflags & CALLOUT_PROCESSED) == 0) {
1101 if (cc_exec_next(cc) == c)
1102 cc_exec_next(cc) = LIST_NEXT(c, c_links.le);
1103 LIST_REMOVE(c, c_links.le);
1104 } else {
1105 TAILQ_REMOVE(&cc->cc_expireq, c, c_links.tqe);
1106 }
1107 cancelled = 1;
1108 c->c_iflags &= ~ CALLOUT_PENDING;
1109 c->c_flags &= ~ CALLOUT_ACTIVE;
1110 }
1111
1112 #ifdef SMP
1113 /*
1114 * If the callout must migrate try to perform it immediately.
1115 * If the callout is currently running, just defer the migration
1116 * to a more appropriate moment.
1117 */
1118 if (c->c_cpu != cpu) {
1119 if (cc_exec_curr(cc, direct) == c) {
1120 /*
1121 * Pending will have been removed since we are
1122 * actually executing the callout on another
1123 * CPU. That callout should be waiting on the
1124 * lock the caller holds. If we set both
1125 * active/and/pending after we return and the
1126 * lock on the executing callout proceeds, it
1127 * will then see pending is true and return.
1128 * At the return from the actual callout execution
1129 * the migration will occur in softclock_call_cc
1130 * and this new callout will be placed on the
1131 * new CPU via a call to callout_cpu_switch() which
1132 * will get the lock on the right CPU followed
1133 * by a call callout_cc_add() which will add it there.
1134 * (see above in softclock_call_cc()).
1135 */
1136 cc_migration_cpu(cc, direct) = cpu;
1137 cc_migration_time(cc, direct) = to_sbt;
1138 cc_migration_prec(cc, direct) = precision;
1139 cc_migration_func(cc, direct) = ftn;
1140 cc_migration_arg(cc, direct) = arg;
1141 c->c_iflags |= (CALLOUT_DFRMIGRATION | CALLOUT_PENDING);
1142 c->c_flags |= CALLOUT_ACTIVE;
1143 CTR6(KTR_CALLOUT,
1144 "migration of %p func %p arg %p in %d.%08x to %u deferred",
1145 c, c->c_func, c->c_arg, (int)(to_sbt >> 32),
1146 (u_int)(to_sbt & 0xffffffff), cpu);
1147 CC_UNLOCK(cc);
1148 return (cancelled);
1149 }
1150 cc = callout_cpu_switch(c, cc, cpu);
1151 }
1152 #endif
1153
1154 callout_cc_add(c, cc, to_sbt, precision, ftn, arg, cpu, flags);
1155 CTR6(KTR_CALLOUT, "%sscheduled %p func %p arg %p in %d.%08x",
1156 cancelled ? "re" : "", c, c->c_func, c->c_arg, (int)(to_sbt >> 32),
1157 (u_int)(to_sbt & 0xffffffff));
1158 CC_UNLOCK(cc);
1159
1160 return (cancelled);
1161 }
1162
1163 /*
1164 * Common idioms that can be optimized in the future.
1165 */
1166 int
1167 callout_schedule_on(struct callout *c, int to_ticks, int cpu)
1168 {
1169 return callout_reset_on(c, to_ticks, c->c_func, c->c_arg, cpu);
1170 }
1171
1172 int
1173 callout_schedule(struct callout *c, int to_ticks)
1174 {
1175 return callout_reset_on(c, to_ticks, c->c_func, c->c_arg, c->c_cpu);
1176 }
1177
1178 int
1179 _callout_stop_safe(struct callout *c, int flags, void (*drain)(void *))
1180 {
1181 struct callout_cpu *cc, *old_cc;
1182 struct lock_class *class;
1183 int direct, sq_locked, use_lock;
1184 int cancelled, not_on_a_list;
1185
1186 if ((flags & CS_DRAIN) != 0)
1187 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, c->c_lock,
1188 "calling %s", __func__);
1189
1190 /*
1191 * Some old subsystems don't hold Giant while running a callout_stop(),
1192 * so just discard this check for the moment.
1193 */
1194 if ((flags & CS_DRAIN) == 0 && c->c_lock != NULL) {
1195 if (c->c_lock == &Giant.lock_object)
1196 use_lock = mtx_owned(&Giant);
1197 else {
1198 use_lock = 1;
1199 class = LOCK_CLASS(c->c_lock);
1200 class->lc_assert(c->c_lock, LA_XLOCKED);
1201 }
1202 } else
1203 use_lock = 0;
1204 if (c->c_iflags & CALLOUT_DIRECT) {
1205 direct = 1;
1206 } else {
1207 direct = 0;
1208 }
1209 sq_locked = 0;
1210 old_cc = NULL;
1211 again:
1212 cc = callout_lock(c);
1213
1214 if ((c->c_iflags & (CALLOUT_DFRMIGRATION | CALLOUT_PENDING)) ==
1215 (CALLOUT_DFRMIGRATION | CALLOUT_PENDING) &&
1216 ((c->c_flags & CALLOUT_ACTIVE) == CALLOUT_ACTIVE)) {
1217 /*
1218 * Special case where this slipped in while we
1219 * were migrating *as* the callout is about to
1220 * execute. The caller probably holds the lock
1221 * the callout wants.
1222 *
1223 * Get rid of the migration first. Then set
1224 * the flag that tells this code *not* to
1225 * try to remove it from any lists (its not
1226 * on one yet). When the callout wheel runs,
1227 * it will ignore this callout.
1228 */
1229 c->c_iflags &= ~CALLOUT_PENDING;
1230 c->c_flags &= ~CALLOUT_ACTIVE;
1231 not_on_a_list = 1;
1232 } else {
1233 not_on_a_list = 0;
1234 }
1235
1236 /*
1237 * If the callout was migrating while the callout cpu lock was
1238 * dropped, just drop the sleepqueue lock and check the states
1239 * again.
1240 */
1241 if (sq_locked != 0 && cc != old_cc) {
1242 #ifdef SMP
1243 CC_UNLOCK(cc);
1244 sleepq_release(&cc_exec_waiting(old_cc, direct));
1245 sq_locked = 0;
1246 old_cc = NULL;
1247 goto again;
1248 #else
1249 panic("migration should not happen");
1250 #endif
1251 }
1252
1253 /*
1254 * If the callout is running, try to stop it or drain it.
1255 */
1256 if (cc_exec_curr(cc, direct) == c) {
1257 /*
1258 * Succeed we to stop it or not, we must clear the
1259 * active flag - this is what API users expect. If we're
1260 * draining and the callout is currently executing, first wait
1261 * until it finishes.
1262 */
1263 if ((flags & CS_DRAIN) == 0)
1264 c->c_flags &= ~CALLOUT_ACTIVE;
1265
1266 if ((flags & CS_DRAIN) != 0) {
1267 /*
1268 * The current callout is running (or just
1269 * about to run) and blocking is allowed, so
1270 * just wait for the current invocation to
1271 * finish.
1272 */
1273 while (cc_exec_curr(cc, direct) == c) {
1274 /*
1275 * Use direct calls to sleepqueue interface
1276 * instead of cv/msleep in order to avoid
1277 * a LOR between cc_lock and sleepqueue
1278 * chain spinlocks. This piece of code
1279 * emulates a msleep_spin() call actually.
1280 *
1281 * If we already have the sleepqueue chain
1282 * locked, then we can safely block. If we
1283 * don't already have it locked, however,
1284 * we have to drop the cc_lock to lock
1285 * it. This opens several races, so we
1286 * restart at the beginning once we have
1287 * both locks. If nothing has changed, then
1288 * we will end up back here with sq_locked
1289 * set.
1290 */
1291 if (!sq_locked) {
1292 CC_UNLOCK(cc);
1293 sleepq_lock(
1294 &cc_exec_waiting(cc, direct));
1295 sq_locked = 1;
1296 old_cc = cc;
1297 goto again;
1298 }
1299
1300 /*
1301 * Migration could be cancelled here, but
1302 * as long as it is still not sure when it
1303 * will be packed up, just let softclock()
1304 * take care of it.
1305 */
1306 cc_exec_waiting(cc, direct) = true;
1307 DROP_GIANT();
1308 CC_UNLOCK(cc);
1309 sleepq_add(
1310 &cc_exec_waiting(cc, direct),
1311 &cc->cc_lock.lock_object, "codrain",
1312 SLEEPQ_SLEEP, 0);
1313 sleepq_wait(
1314 &cc_exec_waiting(cc, direct),
1315 0);
1316 sq_locked = 0;
1317 old_cc = NULL;
1318
1319 /* Reacquire locks previously released. */
1320 PICKUP_GIANT();
1321 CC_LOCK(cc);
1322 }
1323 c->c_flags &= ~CALLOUT_ACTIVE;
1324 } else if (use_lock &&
1325 !cc_exec_cancel(cc, direct) && (drain == NULL)) {
1326
1327 /*
1328 * The current callout is waiting for its
1329 * lock which we hold. Cancel the callout
1330 * and return. After our caller drops the
1331 * lock, the callout will be skipped in
1332 * softclock(). This *only* works with a
1333 * callout_stop() *not* callout_drain() or
1334 * callout_async_drain().
1335 */
1336 cc_exec_cancel(cc, direct) = true;
1337 CTR3(KTR_CALLOUT, "cancelled %p func %p arg %p",
1338 c, c->c_func, c->c_arg);
1339 KASSERT(!cc_cce_migrating(cc, direct),
1340 ("callout wrongly scheduled for migration"));
1341 if (callout_migrating(c)) {
1342 c->c_iflags &= ~CALLOUT_DFRMIGRATION;
1343 #ifdef SMP
1344 cc_migration_cpu(cc, direct) = CPUBLOCK;
1345 cc_migration_time(cc, direct) = 0;
1346 cc_migration_prec(cc, direct) = 0;
1347 cc_migration_func(cc, direct) = NULL;
1348 cc_migration_arg(cc, direct) = NULL;
1349 #endif
1350 }
1351 CC_UNLOCK(cc);
1352 KASSERT(!sq_locked, ("sleepqueue chain locked"));
1353 return (1);
1354 } else if (callout_migrating(c)) {
1355 /*
1356 * The callout is currently being serviced
1357 * and the "next" callout is scheduled at
1358 * its completion with a migration. We remove
1359 * the migration flag so it *won't* get rescheduled,
1360 * but we can't stop the one thats running so
1361 * we return 0.
1362 */
1363 c->c_iflags &= ~CALLOUT_DFRMIGRATION;
1364 #ifdef SMP
1365 /*
1366 * We can't call cc_cce_cleanup here since
1367 * if we do it will remove .ce_curr and
1368 * its still running. This will prevent a
1369 * reschedule of the callout when the
1370 * execution completes.
1371 */
1372 cc_migration_cpu(cc, direct) = CPUBLOCK;
1373 cc_migration_time(cc, direct) = 0;
1374 cc_migration_prec(cc, direct) = 0;
1375 cc_migration_func(cc, direct) = NULL;
1376 cc_migration_arg(cc, direct) = NULL;
1377 #endif
1378 CTR3(KTR_CALLOUT, "postponing stop %p func %p arg %p",
1379 c, c->c_func, c->c_arg);
1380 if (drain) {
1381 cc_exec_drain(cc, direct) = drain;
1382 }
1383 CC_UNLOCK(cc);
1384 return ((flags & CS_EXECUTING) != 0);
1385 }
1386 CTR3(KTR_CALLOUT, "failed to stop %p func %p arg %p",
1387 c, c->c_func, c->c_arg);
1388 if (drain) {
1389 cc_exec_drain(cc, direct) = drain;
1390 }
1391 KASSERT(!sq_locked, ("sleepqueue chain still locked"));
1392 cancelled = ((flags & CS_EXECUTING) != 0);
1393 } else
1394 cancelled = 1;
1395
1396 if (sq_locked)
1397 sleepq_release(&cc_exec_waiting(cc, direct));
1398
1399 if ((c->c_iflags & CALLOUT_PENDING) == 0) {
1400 CTR3(KTR_CALLOUT, "failed to stop %p func %p arg %p",
1401 c, c->c_func, c->c_arg);
1402 /*
1403 * For not scheduled and not executing callout return
1404 * negative value.
1405 */
1406 if (cc_exec_curr(cc, direct) != c)
1407 cancelled = -1;
1408 CC_UNLOCK(cc);
1409 return (cancelled);
1410 }
1411
1412 c->c_iflags &= ~CALLOUT_PENDING;
1413 c->c_flags &= ~CALLOUT_ACTIVE;
1414
1415 CTR3(KTR_CALLOUT, "cancelled %p func %p arg %p",
1416 c, c->c_func, c->c_arg);
1417 if (not_on_a_list == 0) {
1418 if ((c->c_iflags & CALLOUT_PROCESSED) == 0) {
1419 if (cc_exec_next(cc) == c)
1420 cc_exec_next(cc) = LIST_NEXT(c, c_links.le);
1421 LIST_REMOVE(c, c_links.le);
1422 } else {
1423 TAILQ_REMOVE(&cc->cc_expireq, c, c_links.tqe);
1424 }
1425 }
1426 callout_cc_del(c, cc);
1427 CC_UNLOCK(cc);
1428 return (cancelled);
1429 }
1430
1431 void
1432 callout_init(struct callout *c, int mpsafe)
1433 {
1434 bzero(c, sizeof *c);
1435 if (mpsafe) {
1436 c->c_lock = NULL;
1437 c->c_iflags = CALLOUT_RETURNUNLOCKED;
1438 } else {
1439 c->c_lock = &Giant.lock_object;
1440 c->c_iflags = 0;
1441 }
1442 c->c_cpu = timeout_cpu;
1443 }
1444
1445 void
1446 _callout_init_lock(struct callout *c, struct lock_object *lock, int flags)
1447 {
1448 bzero(c, sizeof *c);
1449 c->c_lock = lock;
1450 KASSERT((flags & ~(CALLOUT_RETURNUNLOCKED | CALLOUT_SHAREDLOCK)) == 0,
1451 ("callout_init_lock: bad flags %d", flags));
1452 KASSERT(lock != NULL || (flags & CALLOUT_RETURNUNLOCKED) == 0,
1453 ("callout_init_lock: CALLOUT_RETURNUNLOCKED with no lock"));
1454 KASSERT(lock == NULL || !(LOCK_CLASS(lock)->lc_flags &
1455 (LC_SPINLOCK | LC_SLEEPABLE)), ("%s: invalid lock class",
1456 __func__));
1457 c->c_iflags = flags & (CALLOUT_RETURNUNLOCKED | CALLOUT_SHAREDLOCK);
1458 c->c_cpu = timeout_cpu;
1459 }
1460
1461 #ifdef APM_FIXUP_CALLTODO
1462 /*
1463 * Adjust the kernel calltodo timeout list. This routine is used after
1464 * an APM resume to recalculate the calltodo timer list values with the
1465 * number of hz's we have been sleeping. The next hardclock() will detect
1466 * that there are fired timers and run softclock() to execute them.
1467 *
1468 * Please note, I have not done an exhaustive analysis of what code this
1469 * might break. I am motivated to have my select()'s and alarm()'s that
1470 * have expired during suspend firing upon resume so that the applications
1471 * which set the timer can do the maintanence the timer was for as close
1472 * as possible to the originally intended time. Testing this code for a
1473 * week showed that resuming from a suspend resulted in 22 to 25 timers
1474 * firing, which seemed independent on whether the suspend was 2 hours or
1475 * 2 days. Your milage may vary. - Ken Key <key@cs.utk.edu>
1476 */
1477 void
1478 adjust_timeout_calltodo(struct timeval *time_change)
1479 {
1480 struct callout *p;
1481 unsigned long delta_ticks;
1482
1483 /*
1484 * How many ticks were we asleep?
1485 * (stolen from tvtohz()).
1486 */
1487
1488 /* Don't do anything */
1489 if (time_change->tv_sec < 0)
1490 return;
1491 else if (time_change->tv_sec <= LONG_MAX / 1000000)
1492 delta_ticks = howmany(time_change->tv_sec * 1000000 +
1493 time_change->tv_usec, tick) + 1;
1494 else if (time_change->tv_sec <= LONG_MAX / hz)
1495 delta_ticks = time_change->tv_sec * hz +
1496 howmany(time_change->tv_usec, tick) + 1;
1497 else
1498 delta_ticks = LONG_MAX;
1499
1500 if (delta_ticks > INT_MAX)
1501 delta_ticks = INT_MAX;
1502
1503 /*
1504 * Now rip through the timer calltodo list looking for timers
1505 * to expire.
1506 */
1507
1508 /* don't collide with softclock() */
1509 CC_LOCK(cc);
1510 for (p = calltodo.c_next; p != NULL; p = p->c_next) {
1511 p->c_time -= delta_ticks;
1512
1513 /* Break if the timer had more time on it than delta_ticks */
1514 if (p->c_time > 0)
1515 break;
1516
1517 /* take back the ticks the timer didn't use (p->c_time <= 0) */
1518 delta_ticks = -p->c_time;
1519 }
1520 CC_UNLOCK(cc);
1521
1522 return;
1523 }
1524 #endif /* APM_FIXUP_CALLTODO */
1525
1526 static int
1527 flssbt(sbintime_t sbt)
1528 {
1529
1530 sbt += (uint64_t)sbt >> 1;
1531 if (sizeof(long) >= sizeof(sbintime_t))
1532 return (flsl(sbt));
1533 if (sbt >= SBT_1S)
1534 return (flsl(((uint64_t)sbt) >> 32) + 32);
1535 return (flsl(sbt));
1536 }
1537
1538 /*
1539 * Dump immediate statistic snapshot of the scheduled callouts.
1540 */
1541 static int
1542 sysctl_kern_callout_stat(SYSCTL_HANDLER_ARGS)
1543 {
1544 struct callout *tmp;
1545 struct callout_cpu *cc;
1546 struct callout_list *sc;
1547 sbintime_t maxpr, maxt, medpr, medt, now, spr, st, t;
1548 int ct[64], cpr[64], ccpbk[32];
1549 int error, val, i, count, tcum, pcum, maxc, c, medc;
1550 #ifdef SMP
1551 int cpu;
1552 #endif
1553
1554 val = 0;
1555 error = sysctl_handle_int(oidp, &val, 0, req);
1556 if (error != 0 || req->newptr == NULL)
1557 return (error);
1558 count = maxc = 0;
1559 st = spr = maxt = maxpr = 0;
1560 bzero(ccpbk, sizeof(ccpbk));
1561 bzero(ct, sizeof(ct));
1562 bzero(cpr, sizeof(cpr));
1563 now = sbinuptime();
1564 #ifdef SMP
1565 CPU_FOREACH(cpu) {
1566 cc = CC_CPU(cpu);
1567 #else
1568 cc = CC_CPU(timeout_cpu);
1569 #endif
1570 CC_LOCK(cc);
1571 for (i = 0; i < callwheelsize; i++) {
1572 sc = &cc->cc_callwheel[i];
1573 c = 0;
1574 LIST_FOREACH(tmp, sc, c_links.le) {
1575 c++;
1576 t = tmp->c_time - now;
1577 if (t < 0)
1578 t = 0;
1579 st += t / SBT_1US;
1580 spr += tmp->c_precision / SBT_1US;
1581 if (t > maxt)
1582 maxt = t;
1583 if (tmp->c_precision > maxpr)
1584 maxpr = tmp->c_precision;
1585 ct[flssbt(t)]++;
1586 cpr[flssbt(tmp->c_precision)]++;
1587 }
1588 if (c > maxc)
1589 maxc = c;
1590 ccpbk[fls(c + c / 2)]++;
1591 count += c;
1592 }
1593 CC_UNLOCK(cc);
1594 #ifdef SMP
1595 }
1596 #endif
1597
1598 for (i = 0, tcum = 0; i < 64 && tcum < count / 2; i++)
1599 tcum += ct[i];
1600 medt = (i >= 2) ? (((sbintime_t)1) << (i - 2)) : 0;
1601 for (i = 0, pcum = 0; i < 64 && pcum < count / 2; i++)
1602 pcum += cpr[i];
1603 medpr = (i >= 2) ? (((sbintime_t)1) << (i - 2)) : 0;
1604 for (i = 0, c = 0; i < 32 && c < count / 2; i++)
1605 c += ccpbk[i];
1606 medc = (i >= 2) ? (1 << (i - 2)) : 0;
1607
1608 printf("Scheduled callouts statistic snapshot:\n");
1609 printf(" Callouts: %6d Buckets: %6d*%-3d Bucket size: 0.%06ds\n",
1610 count, callwheelsize, mp_ncpus, 1000000 >> CC_HASH_SHIFT);
1611 printf(" C/Bk: med %5d avg %6d.%06jd max %6d\n",
1612 medc,
1613 count / callwheelsize / mp_ncpus,
1614 (uint64_t)count * 1000000 / callwheelsize / mp_ncpus % 1000000,
1615 maxc);
1616 printf(" Time: med %5jd.%06jds avg %6jd.%06jds max %6jd.%06jds\n",
1617 medt / SBT_1S, (medt & 0xffffffff) * 1000000 >> 32,
1618 (st / count) / 1000000, (st / count) % 1000000,
1619 maxt / SBT_1S, (maxt & 0xffffffff) * 1000000 >> 32);
1620 printf(" Prec: med %5jd.%06jds avg %6jd.%06jds max %6jd.%06jds\n",
1621 medpr / SBT_1S, (medpr & 0xffffffff) * 1000000 >> 32,
1622 (spr / count) / 1000000, (spr / count) % 1000000,
1623 maxpr / SBT_1S, (maxpr & 0xffffffff) * 1000000 >> 32);
1624 printf(" Distribution: \tbuckets\t time\t tcum\t"
1625 " prec\t pcum\n");
1626 for (i = 0, tcum = pcum = 0; i < 64; i++) {
1627 if (ct[i] == 0 && cpr[i] == 0)
1628 continue;
1629 t = (i != 0) ? (((sbintime_t)1) << (i - 1)) : 0;
1630 tcum += ct[i];
1631 pcum += cpr[i];
1632 printf(" %10jd.%06jds\t 2**%d\t%7d\t%7d\t%7d\t%7d\n",
1633 t / SBT_1S, (t & 0xffffffff) * 1000000 >> 32,
1634 i - 1 - (32 - CC_HASH_SHIFT),
1635 ct[i], tcum, cpr[i], pcum);
1636 }
1637 return (error);
1638 }
1639 SYSCTL_PROC(_kern, OID_AUTO, callout_stat,
1640 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE,
1641 0, 0, sysctl_kern_callout_stat, "I",
1642 "Dump immediate statistic snapshot of the scheduled callouts");
1643
1644 #ifdef DDB
1645 static void
1646 _show_callout(struct callout *c)
1647 {
1648
1649 db_printf("callout %p\n", c);
1650 #define C_DB_PRINTF(f, e) db_printf(" %s = " f "\n", #e, c->e);
1651 db_printf(" &c_links = %p\n", &(c->c_links));
1652 C_DB_PRINTF("%" PRId64, c_time);
1653 C_DB_PRINTF("%" PRId64, c_precision);
1654 C_DB_PRINTF("%p", c_arg);
1655 C_DB_PRINTF("%p", c_func);
1656 C_DB_PRINTF("%p", c_lock);
1657 C_DB_PRINTF("%#x", c_flags);
1658 C_DB_PRINTF("%#x", c_iflags);
1659 C_DB_PRINTF("%d", c_cpu);
1660 #undef C_DB_PRINTF
1661 }
1662
1663 DB_SHOW_COMMAND(callout, db_show_callout)
1664 {
1665
1666 if (!have_addr) {
1667 db_printf("usage: show callout <struct callout *>\n");
1668 return;
1669 }
1670
1671 _show_callout((struct callout *)addr);
1672 }
1673 #endif /* DDB */
Cache object: 3b1cae5c979e30722407a94a31617f63
|