1 /*-
2 * Copyright (c) 1982, 1986, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 4. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * From: @(#)kern_clock.c 8.5 (Berkeley) 1/21/94
35 */
36
37 #include <sys/cdefs.h>
38 __FBSDID("$FreeBSD: releng/8.3/sys/kern/kern_timeout.c 225288 2011-08-31 09:14:56Z attilio $");
39
40 #include "opt_kdtrace.h"
41
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/bus.h>
45 #include <sys/callout.h>
46 #include <sys/condvar.h>
47 #include <sys/interrupt.h>
48 #include <sys/kernel.h>
49 #include <sys/ktr.h>
50 #include <sys/lock.h>
51 #include <sys/malloc.h>
52 #include <sys/mutex.h>
53 #include <sys/proc.h>
54 #include <sys/sdt.h>
55 #include <sys/sleepqueue.h>
56 #include <sys/sysctl.h>
57 #include <sys/smp.h>
58
59 #ifdef SMP
60 #include <machine/cpu.h>
61 #endif
62
63 SDT_PROVIDER_DEFINE(callout_execute);
64 SDT_PROBE_DEFINE(callout_execute, kernel, , callout_start, callout-start);
65 SDT_PROBE_ARGTYPE(callout_execute, kernel, , callout_start, 0,
66 "struct callout *");
67 SDT_PROBE_DEFINE(callout_execute, kernel, , callout_end, callout-end);
68 SDT_PROBE_ARGTYPE(callout_execute, kernel, , callout_end, 0,
69 "struct callout *");
70
71 static int avg_depth;
72 SYSCTL_INT(_debug, OID_AUTO, to_avg_depth, CTLFLAG_RD, &avg_depth, 0,
73 "Average number of items examined per softclock call. Units = 1/1000");
74 static int avg_gcalls;
75 SYSCTL_INT(_debug, OID_AUTO, to_avg_gcalls, CTLFLAG_RD, &avg_gcalls, 0,
76 "Average number of Giant callouts made per softclock call. Units = 1/1000");
77 static int avg_lockcalls;
78 SYSCTL_INT(_debug, OID_AUTO, to_avg_lockcalls, CTLFLAG_RD, &avg_lockcalls, 0,
79 "Average number of lock callouts made per softclock call. Units = 1/1000");
80 static int avg_mpcalls;
81 SYSCTL_INT(_debug, OID_AUTO, to_avg_mpcalls, CTLFLAG_RD, &avg_mpcalls, 0,
82 "Average number of MP callouts made per softclock call. Units = 1/1000");
83 /*
84 * TODO:
85 * allocate more timeout table slots when table overflows.
86 */
87 int callwheelsize, callwheelbits, callwheelmask;
88
89 /*
90 * The callout cpu migration entity represents informations necessary for
91 * describing the migrating callout to the new callout cpu.
92 * The cached informations are very important for deferring migration when
93 * the migrating callout is already running.
94 */
95 struct cc_mig_ent {
96 #ifdef SMP
97 void (*ce_migration_func)(void *);
98 void *ce_migration_arg;
99 int ce_migration_cpu;
100 int ce_migration_ticks;
101 #endif
102 };
103
104 /*
105 * There is one struct callout_cpu per cpu, holding all relevant
106 * state for the callout processing thread on the individual CPU.
107 * In particular:
108 * cc_ticks is incremented once per tick in callout_cpu().
109 * It tracks the global 'ticks' but in a way that the individual
110 * threads should not worry about races in the order in which
111 * hardclock() and hardclock_cpu() run on the various CPUs.
112 * cc_softclock is advanced in callout_cpu() to point to the
113 * first entry in cc_callwheel that may need handling. In turn,
114 * a softclock() is scheduled so it can serve the various entries i
115 * such that cc_softclock <= i <= cc_ticks .
116 * XXX maybe cc_softclock and cc_ticks should be volatile ?
117 *
118 * cc_ticks is also used in callout_reset_cpu() to determine
119 * when the callout should be served.
120 */
121 struct callout_cpu {
122 struct cc_mig_ent cc_migrating_entity;
123 struct mtx cc_lock;
124 struct callout *cc_callout;
125 struct callout_tailq *cc_callwheel;
126 struct callout_list cc_callfree;
127 struct callout *cc_next;
128 struct callout *cc_curr;
129 void *cc_cookie;
130 int cc_ticks;
131 int cc_softticks;
132 int cc_cancel;
133 int cc_waiting;
134 };
135
136 #ifdef SMP
137 #define cc_migration_func cc_migrating_entity.ce_migration_func
138 #define cc_migration_arg cc_migrating_entity.ce_migration_arg
139 #define cc_migration_cpu cc_migrating_entity.ce_migration_cpu
140 #define cc_migration_ticks cc_migrating_entity.ce_migration_ticks
141
142 struct callout_cpu cc_cpu[MAXCPU];
143 #define CPUBLOCK MAXCPU
144 #define CC_CPU(cpu) (&cc_cpu[(cpu)])
145 #define CC_SELF() CC_CPU(PCPU_GET(cpuid))
146 #else
147 struct callout_cpu cc_cpu;
148 #define CC_CPU(cpu) &cc_cpu
149 #define CC_SELF() &cc_cpu
150 #endif
151 #define CC_LOCK(cc) mtx_lock_spin(&(cc)->cc_lock)
152 #define CC_UNLOCK(cc) mtx_unlock_spin(&(cc)->cc_lock)
153 #define CC_LOCK_ASSERT(cc) mtx_assert(&(cc)->cc_lock, MA_OWNED)
154
155 static int timeout_cpu;
156
157 MALLOC_DEFINE(M_CALLOUT, "callout", "Callout datastructures");
158
159 /**
160 * Locked by cc_lock:
161 * cc_curr - If a callout is in progress, it is curr_callout.
162 * If curr_callout is non-NULL, threads waiting in
163 * callout_drain() will be woken up as soon as the
164 * relevant callout completes.
165 * cc_cancel - Changing to 1 with both callout_lock and c_lock held
166 * guarantees that the current callout will not run.
167 * The softclock() function sets this to 0 before it
168 * drops callout_lock to acquire c_lock, and it calls
169 * the handler only if curr_cancelled is still 0 after
170 * c_lock is successfully acquired.
171 * cc_waiting - If a thread is waiting in callout_drain(), then
172 * callout_wait is nonzero. Set only when
173 * curr_callout is non-NULL.
174 */
175
176 /*
177 * Resets the migration entity tied to a specific callout cpu.
178 */
179 static void
180 cc_cme_cleanup(struct callout_cpu *cc)
181 {
182
183 #ifdef SMP
184 cc->cc_migration_cpu = CPUBLOCK;
185 cc->cc_migration_ticks = 0;
186 cc->cc_migration_func = NULL;
187 cc->cc_migration_arg = NULL;
188 #endif
189 }
190
191 /*
192 * Checks if migration is requested by a specific callout cpu.
193 */
194 static int
195 cc_cme_migrating(struct callout_cpu *cc)
196 {
197
198 #ifdef SMP
199 return (cc->cc_migration_cpu != CPUBLOCK);
200 #else
201 return (0);
202 #endif
203 }
204
205 /*
206 * kern_timeout_callwheel_alloc() - kernel low level callwheel initialization
207 *
208 * This code is called very early in the kernel initialization sequence,
209 * and may be called more then once.
210 */
211 caddr_t
212 kern_timeout_callwheel_alloc(caddr_t v)
213 {
214 struct callout_cpu *cc;
215
216 timeout_cpu = PCPU_GET(cpuid);
217 cc = CC_CPU(timeout_cpu);
218 /*
219 * Calculate callout wheel size
220 */
221 for (callwheelsize = 1, callwheelbits = 0;
222 callwheelsize < ncallout;
223 callwheelsize <<= 1, ++callwheelbits)
224 ;
225 callwheelmask = callwheelsize - 1;
226
227 cc->cc_callout = (struct callout *)v;
228 v = (caddr_t)(cc->cc_callout + ncallout);
229 cc->cc_callwheel = (struct callout_tailq *)v;
230 v = (caddr_t)(cc->cc_callwheel + callwheelsize);
231 return(v);
232 }
233
234 static void
235 callout_cpu_init(struct callout_cpu *cc)
236 {
237 struct callout *c;
238 int i;
239
240 mtx_init(&cc->cc_lock, "callout", NULL, MTX_SPIN | MTX_RECURSE);
241 SLIST_INIT(&cc->cc_callfree);
242 for (i = 0; i < callwheelsize; i++) {
243 TAILQ_INIT(&cc->cc_callwheel[i]);
244 }
245 cc_cme_cleanup(cc);
246 if (cc->cc_callout == NULL)
247 return;
248 for (i = 0; i < ncallout; i++) {
249 c = &cc->cc_callout[i];
250 callout_init(c, 0);
251 c->c_flags = CALLOUT_LOCAL_ALLOC;
252 SLIST_INSERT_HEAD(&cc->cc_callfree, c, c_links.sle);
253 }
254 }
255
256 #ifdef SMP
257 /*
258 * Switches the cpu tied to a specific callout.
259 * The function expects a locked incoming callout cpu and returns with
260 * locked outcoming callout cpu.
261 */
262 static struct callout_cpu *
263 callout_cpu_switch(struct callout *c, struct callout_cpu *cc, int new_cpu)
264 {
265 struct callout_cpu *new_cc;
266
267 MPASS(c != NULL && cc != NULL);
268 CC_LOCK_ASSERT(cc);
269
270 /*
271 * Avoid interrupts and preemption firing after the callout cpu
272 * is blocked in order to avoid deadlocks as the new thread
273 * may be willing to acquire the callout cpu lock.
274 */
275 c->c_cpu = CPUBLOCK;
276 spinlock_enter();
277 CC_UNLOCK(cc);
278 new_cc = CC_CPU(new_cpu);
279 CC_LOCK(new_cc);
280 spinlock_exit();
281 c->c_cpu = new_cpu;
282 return (new_cc);
283 }
284 #endif
285
286 /*
287 * kern_timeout_callwheel_init() - initialize previously reserved callwheel
288 * space.
289 *
290 * This code is called just once, after the space reserved for the
291 * callout wheel has been finalized.
292 */
293 void
294 kern_timeout_callwheel_init(void)
295 {
296 callout_cpu_init(CC_CPU(timeout_cpu));
297 }
298
299 /*
300 * Start standard softclock thread.
301 */
302 void *softclock_ih;
303
304 static void
305 start_softclock(void *dummy)
306 {
307 struct callout_cpu *cc;
308 #ifdef SMP
309 int cpu;
310 #endif
311
312 cc = CC_CPU(timeout_cpu);
313 if (swi_add(&clk_intr_event, "clock", softclock, cc, SWI_CLOCK,
314 INTR_MPSAFE, &softclock_ih))
315 panic("died while creating standard software ithreads");
316 cc->cc_cookie = softclock_ih;
317 #ifdef SMP
318 CPU_FOREACH(cpu) {
319 if (cpu == timeout_cpu)
320 continue;
321 cc = CC_CPU(cpu);
322 if (swi_add(NULL, "clock", softclock, cc, SWI_CLOCK,
323 INTR_MPSAFE, &cc->cc_cookie))
324 panic("died while creating standard software ithreads");
325 cc->cc_callout = NULL; /* Only cpu0 handles timeout(). */
326 cc->cc_callwheel = malloc(
327 sizeof(struct callout_tailq) * callwheelsize, M_CALLOUT,
328 M_WAITOK);
329 callout_cpu_init(cc);
330 }
331 #endif
332 }
333
334 SYSINIT(start_softclock, SI_SUB_SOFTINTR, SI_ORDER_FIRST, start_softclock, NULL);
335
336 void
337 callout_tick(void)
338 {
339 struct callout_cpu *cc;
340 int need_softclock;
341 int bucket;
342
343 /*
344 * Process callouts at a very low cpu priority, so we don't keep the
345 * relatively high clock interrupt priority any longer than necessary.
346 */
347 need_softclock = 0;
348 cc = CC_SELF();
349 mtx_lock_spin_flags(&cc->cc_lock, MTX_QUIET);
350 cc->cc_ticks++;
351 for (; (cc->cc_softticks - cc->cc_ticks) <= 0; cc->cc_softticks++) {
352 bucket = cc->cc_softticks & callwheelmask;
353 if (!TAILQ_EMPTY(&cc->cc_callwheel[bucket])) {
354 need_softclock = 1;
355 break;
356 }
357 }
358 mtx_unlock_spin_flags(&cc->cc_lock, MTX_QUIET);
359 /*
360 * swi_sched acquires the thread lock, so we don't want to call it
361 * with cc_lock held; incorrect locking order.
362 */
363 if (need_softclock)
364 swi_sched(cc->cc_cookie, 0);
365 }
366
367 static struct callout_cpu *
368 callout_lock(struct callout *c)
369 {
370 struct callout_cpu *cc;
371 int cpu;
372
373 for (;;) {
374 cpu = c->c_cpu;
375 #ifdef SMP
376 if (cpu == CPUBLOCK) {
377 while (c->c_cpu == CPUBLOCK)
378 cpu_spinwait();
379 continue;
380 }
381 #endif
382 cc = CC_CPU(cpu);
383 CC_LOCK(cc);
384 if (cpu == c->c_cpu)
385 break;
386 CC_UNLOCK(cc);
387 }
388 return (cc);
389 }
390
391 static void
392 callout_cc_add(struct callout *c, struct callout_cpu *cc, int to_ticks,
393 void (*func)(void *), void *arg, int cpu)
394 {
395
396 CC_LOCK_ASSERT(cc);
397
398 if (to_ticks <= 0)
399 to_ticks = 1;
400 c->c_arg = arg;
401 c->c_flags |= (CALLOUT_ACTIVE | CALLOUT_PENDING);
402 c->c_func = func;
403 c->c_time = cc->cc_ticks + to_ticks;
404 TAILQ_INSERT_TAIL(&cc->cc_callwheel[c->c_time & callwheelmask],
405 c, c_links.tqe);
406 }
407
408 /*
409 * The callout mechanism is based on the work of Adam M. Costello and
410 * George Varghese, published in a technical report entitled "Redesigning
411 * the BSD Callout and Timer Facilities" and modified slightly for inclusion
412 * in FreeBSD by Justin T. Gibbs. The original work on the data structures
413 * used in this implementation was published by G. Varghese and T. Lauck in
414 * the paper "Hashed and Hierarchical Timing Wheels: Data Structures for
415 * the Efficient Implementation of a Timer Facility" in the Proceedings of
416 * the 11th ACM Annual Symposium on Operating Systems Principles,
417 * Austin, Texas Nov 1987.
418 */
419
420 /*
421 * Software (low priority) clock interrupt.
422 * Run periodic events from timeout queue.
423 */
424 void
425 softclock(void *arg)
426 {
427 struct callout_cpu *cc;
428 struct callout *c;
429 struct callout_tailq *bucket;
430 int curticks;
431 int steps; /* #steps since we last allowed interrupts */
432 int depth;
433 int mpcalls;
434 int lockcalls;
435 int gcalls;
436 #ifdef DIAGNOSTIC
437 struct bintime bt1, bt2;
438 struct timespec ts2;
439 static uint64_t maxdt = 36893488147419102LL; /* 2 msec */
440 static timeout_t *lastfunc;
441 #endif
442
443 #ifndef MAX_SOFTCLOCK_STEPS
444 #define MAX_SOFTCLOCK_STEPS 100 /* Maximum allowed value of steps. */
445 #endif /* MAX_SOFTCLOCK_STEPS */
446
447 mpcalls = 0;
448 lockcalls = 0;
449 gcalls = 0;
450 depth = 0;
451 steps = 0;
452 cc = (struct callout_cpu *)arg;
453 CC_LOCK(cc);
454 while (cc->cc_softticks - 1 != cc->cc_ticks) {
455 /*
456 * cc_softticks may be modified by hard clock, so cache
457 * it while we work on a given bucket.
458 */
459 curticks = cc->cc_softticks;
460 cc->cc_softticks++;
461 bucket = &cc->cc_callwheel[curticks & callwheelmask];
462 c = TAILQ_FIRST(bucket);
463 while (c) {
464 depth++;
465 if (c->c_time != curticks) {
466 c = TAILQ_NEXT(c, c_links.tqe);
467 ++steps;
468 if (steps >= MAX_SOFTCLOCK_STEPS) {
469 cc->cc_next = c;
470 /* Give interrupts a chance. */
471 CC_UNLOCK(cc);
472 ; /* nothing */
473 CC_LOCK(cc);
474 c = cc->cc_next;
475 steps = 0;
476 }
477 } else {
478 void (*c_func)(void *);
479 void *c_arg;
480 struct lock_class *class;
481 struct lock_object *c_lock;
482 int c_flags, sharedlock;
483
484 cc->cc_next = TAILQ_NEXT(c, c_links.tqe);
485 TAILQ_REMOVE(bucket, c, c_links.tqe);
486 class = (c->c_lock != NULL) ?
487 LOCK_CLASS(c->c_lock) : NULL;
488 sharedlock = (c->c_flags & CALLOUT_SHAREDLOCK) ?
489 0 : 1;
490 c_lock = c->c_lock;
491 c_func = c->c_func;
492 c_arg = c->c_arg;
493 c_flags = c->c_flags;
494 if (c->c_flags & CALLOUT_LOCAL_ALLOC) {
495 c->c_flags = CALLOUT_LOCAL_ALLOC;
496 } else {
497 c->c_flags =
498 (c->c_flags & ~CALLOUT_PENDING);
499 }
500 cc->cc_curr = c;
501 cc->cc_cancel = 0;
502 CC_UNLOCK(cc);
503 if (c_lock != NULL) {
504 class->lc_lock(c_lock, sharedlock);
505 /*
506 * The callout may have been cancelled
507 * while we switched locks.
508 */
509 if (cc->cc_cancel) {
510 class->lc_unlock(c_lock);
511 goto skip;
512 }
513 /* The callout cannot be stopped now. */
514 cc->cc_cancel = 1;
515
516 if (c_lock == &Giant.lock_object) {
517 gcalls++;
518 CTR3(KTR_CALLOUT,
519 "callout %p func %p arg %p",
520 c, c_func, c_arg);
521 } else {
522 lockcalls++;
523 CTR3(KTR_CALLOUT, "callout lock"
524 " %p func %p arg %p",
525 c, c_func, c_arg);
526 }
527 } else {
528 mpcalls++;
529 CTR3(KTR_CALLOUT,
530 "callout mpsafe %p func %p arg %p",
531 c, c_func, c_arg);
532 }
533 #ifdef DIAGNOSTIC
534 binuptime(&bt1);
535 #endif
536 THREAD_NO_SLEEPING();
537 SDT_PROBE(callout_execute, kernel, ,
538 callout_start, c, 0, 0, 0, 0);
539 c_func(c_arg);
540 SDT_PROBE(callout_execute, kernel, ,
541 callout_end, c, 0, 0, 0, 0);
542 THREAD_SLEEPING_OK();
543 #ifdef DIAGNOSTIC
544 binuptime(&bt2);
545 bintime_sub(&bt2, &bt1);
546 if (bt2.frac > maxdt) {
547 if (lastfunc != c_func ||
548 bt2.frac > maxdt * 2) {
549 bintime2timespec(&bt2, &ts2);
550 printf(
551 "Expensive timeout(9) function: %p(%p) %jd.%09ld s\n",
552 c_func, c_arg,
553 (intmax_t)ts2.tv_sec,
554 ts2.tv_nsec);
555 }
556 maxdt = bt2.frac;
557 lastfunc = c_func;
558 }
559 #endif
560 CTR1(KTR_CALLOUT, "callout %p finished", c);
561 if ((c_flags & CALLOUT_RETURNUNLOCKED) == 0)
562 class->lc_unlock(c_lock);
563 skip:
564 CC_LOCK(cc);
565 /*
566 * If the current callout is locally
567 * allocated (from timeout(9))
568 * then put it on the freelist.
569 *
570 * Note: we need to check the cached
571 * copy of c_flags because if it was not
572 * local, then it's not safe to deref the
573 * callout pointer.
574 */
575 if (c_flags & CALLOUT_LOCAL_ALLOC) {
576 KASSERT(c->c_flags ==
577 CALLOUT_LOCAL_ALLOC,
578 ("corrupted callout"));
579 c->c_func = NULL;
580 SLIST_INSERT_HEAD(&cc->cc_callfree, c,
581 c_links.sle);
582 }
583 cc->cc_curr = NULL;
584 if (cc->cc_waiting) {
585
586 /*
587 * There is someone waiting for the
588 * callout to complete.
589 * If the callout was scheduled for
590 * migration just cancel it.
591 */
592 if (cc_cme_migrating(cc))
593 cc_cme_cleanup(cc);
594 cc->cc_waiting = 0;
595 CC_UNLOCK(cc);
596 wakeup(&cc->cc_waiting);
597 CC_LOCK(cc);
598 } else if (cc_cme_migrating(cc)) {
599 #ifdef SMP
600 struct callout_cpu *new_cc;
601 void (*new_func)(void *);
602 void *new_arg;
603 int new_cpu, new_ticks;
604
605 /*
606 * If the callout was scheduled for
607 * migration just perform it now.
608 */
609 new_cpu = cc->cc_migration_cpu;
610 new_ticks = cc->cc_migration_ticks;
611 new_func = cc->cc_migration_func;
612 new_arg = cc->cc_migration_arg;
613 cc_cme_cleanup(cc);
614
615 /*
616 * It should be assert here that the
617 * callout is not destroyed but that
618 * is not easy.
619 */
620 new_cc = callout_cpu_switch(c, cc,
621 new_cpu);
622 callout_cc_add(c, new_cc, new_ticks,
623 new_func, new_arg, new_cpu);
624 CC_UNLOCK(new_cc);
625 CC_LOCK(cc);
626 #else
627 panic("migration should not happen");
628 #endif
629 }
630 steps = 0;
631 c = cc->cc_next;
632 }
633 }
634 }
635 avg_depth += (depth * 1000 - avg_depth) >> 8;
636 avg_mpcalls += (mpcalls * 1000 - avg_mpcalls) >> 8;
637 avg_lockcalls += (lockcalls * 1000 - avg_lockcalls) >> 8;
638 avg_gcalls += (gcalls * 1000 - avg_gcalls) >> 8;
639 cc->cc_next = NULL;
640 CC_UNLOCK(cc);
641 }
642
643 /*
644 * timeout --
645 * Execute a function after a specified length of time.
646 *
647 * untimeout --
648 * Cancel previous timeout function call.
649 *
650 * callout_handle_init --
651 * Initialize a handle so that using it with untimeout is benign.
652 *
653 * See AT&T BCI Driver Reference Manual for specification. This
654 * implementation differs from that one in that although an
655 * identification value is returned from timeout, the original
656 * arguments to timeout as well as the identifier are used to
657 * identify entries for untimeout.
658 */
659 struct callout_handle
660 timeout(ftn, arg, to_ticks)
661 timeout_t *ftn;
662 void *arg;
663 int to_ticks;
664 {
665 struct callout_cpu *cc;
666 struct callout *new;
667 struct callout_handle handle;
668
669 cc = CC_CPU(timeout_cpu);
670 CC_LOCK(cc);
671 /* Fill in the next free callout structure. */
672 new = SLIST_FIRST(&cc->cc_callfree);
673 if (new == NULL)
674 /* XXX Attempt to malloc first */
675 panic("timeout table full");
676 SLIST_REMOVE_HEAD(&cc->cc_callfree, c_links.sle);
677 callout_reset(new, to_ticks, ftn, arg);
678 handle.callout = new;
679 CC_UNLOCK(cc);
680
681 return (handle);
682 }
683
684 void
685 untimeout(ftn, arg, handle)
686 timeout_t *ftn;
687 void *arg;
688 struct callout_handle handle;
689 {
690 struct callout_cpu *cc;
691
692 /*
693 * Check for a handle that was initialized
694 * by callout_handle_init, but never used
695 * for a real timeout.
696 */
697 if (handle.callout == NULL)
698 return;
699
700 cc = callout_lock(handle.callout);
701 if (handle.callout->c_func == ftn && handle.callout->c_arg == arg)
702 callout_stop(handle.callout);
703 CC_UNLOCK(cc);
704 }
705
706 void
707 callout_handle_init(struct callout_handle *handle)
708 {
709 handle->callout = NULL;
710 }
711
712 /*
713 * New interface; clients allocate their own callout structures.
714 *
715 * callout_reset() - establish or change a timeout
716 * callout_stop() - disestablish a timeout
717 * callout_init() - initialize a callout structure so that it can
718 * safely be passed to callout_reset() and callout_stop()
719 *
720 * <sys/callout.h> defines three convenience macros:
721 *
722 * callout_active() - returns truth if callout has not been stopped,
723 * drained, or deactivated since the last time the callout was
724 * reset.
725 * callout_pending() - returns truth if callout is still waiting for timeout
726 * callout_deactivate() - marks the callout as having been serviced
727 */
728 int
729 callout_reset_on(struct callout *c, int to_ticks, void (*ftn)(void *),
730 void *arg, int cpu)
731 {
732 struct callout_cpu *cc;
733 int cancelled = 0;
734
735 /*
736 * Don't allow migration of pre-allocated callouts lest they
737 * become unbalanced.
738 */
739 if (c->c_flags & CALLOUT_LOCAL_ALLOC)
740 cpu = c->c_cpu;
741 cc = callout_lock(c);
742 if (cc->cc_curr == c) {
743 /*
744 * We're being asked to reschedule a callout which is
745 * currently in progress. If there is a lock then we
746 * can cancel the callout if it has not really started.
747 */
748 if (c->c_lock != NULL && !cc->cc_cancel)
749 cancelled = cc->cc_cancel = 1;
750 if (cc->cc_waiting) {
751 /*
752 * Someone has called callout_drain to kill this
753 * callout. Don't reschedule.
754 */
755 CTR4(KTR_CALLOUT, "%s %p func %p arg %p",
756 cancelled ? "cancelled" : "failed to cancel",
757 c, c->c_func, c->c_arg);
758 CC_UNLOCK(cc);
759 return (cancelled);
760 }
761 }
762 if (c->c_flags & CALLOUT_PENDING) {
763 if (cc->cc_next == c) {
764 cc->cc_next = TAILQ_NEXT(c, c_links.tqe);
765 }
766 TAILQ_REMOVE(&cc->cc_callwheel[c->c_time & callwheelmask], c,
767 c_links.tqe);
768
769 cancelled = 1;
770 c->c_flags &= ~(CALLOUT_ACTIVE | CALLOUT_PENDING);
771 }
772
773 #ifdef SMP
774 /*
775 * If the callout must migrate try to perform it immediately.
776 * If the callout is currently running, just defer the migration
777 * to a more appropriate moment.
778 */
779 if (c->c_cpu != cpu) {
780 if (cc->cc_curr == c) {
781 cc->cc_migration_cpu = cpu;
782 cc->cc_migration_ticks = to_ticks;
783 cc->cc_migration_func = ftn;
784 cc->cc_migration_arg = arg;
785 CTR5(KTR_CALLOUT,
786 "migration of %p func %p arg %p in %d to %u deferred",
787 c, c->c_func, c->c_arg, to_ticks, cpu);
788 CC_UNLOCK(cc);
789 return (cancelled);
790 }
791 cc = callout_cpu_switch(c, cc, cpu);
792 }
793 #endif
794
795 callout_cc_add(c, cc, to_ticks, ftn, arg, cpu);
796 CTR5(KTR_CALLOUT, "%sscheduled %p func %p arg %p in %d",
797 cancelled ? "re" : "", c, c->c_func, c->c_arg, to_ticks);
798 CC_UNLOCK(cc);
799
800 return (cancelled);
801 }
802
803 /*
804 * Common idioms that can be optimized in the future.
805 */
806 int
807 callout_schedule_on(struct callout *c, int to_ticks, int cpu)
808 {
809 return callout_reset_on(c, to_ticks, c->c_func, c->c_arg, cpu);
810 }
811
812 int
813 callout_schedule(struct callout *c, int to_ticks)
814 {
815 return callout_reset_on(c, to_ticks, c->c_func, c->c_arg, c->c_cpu);
816 }
817
818 int
819 _callout_stop_safe(c, safe)
820 struct callout *c;
821 int safe;
822 {
823 struct callout_cpu *cc, *old_cc;
824 struct lock_class *class;
825 int use_lock, sq_locked;
826
827 /*
828 * Some old subsystems don't hold Giant while running a callout_stop(),
829 * so just discard this check for the moment.
830 */
831 if (!safe && c->c_lock != NULL) {
832 if (c->c_lock == &Giant.lock_object)
833 use_lock = mtx_owned(&Giant);
834 else {
835 use_lock = 1;
836 class = LOCK_CLASS(c->c_lock);
837 class->lc_assert(c->c_lock, LA_XLOCKED);
838 }
839 } else
840 use_lock = 0;
841
842 sq_locked = 0;
843 old_cc = NULL;
844 again:
845 cc = callout_lock(c);
846
847 /*
848 * If the callout was migrating while the callout cpu lock was
849 * dropped, just drop the sleepqueue lock and check the states
850 * again.
851 */
852 if (sq_locked != 0 && cc != old_cc) {
853 #ifdef SMP
854 CC_UNLOCK(cc);
855 sleepq_release(&old_cc->cc_waiting);
856 sq_locked = 0;
857 old_cc = NULL;
858 goto again;
859 #else
860 panic("migration should not happen");
861 #endif
862 }
863
864 /*
865 * If the callout isn't pending, it's not on the queue, so
866 * don't attempt to remove it from the queue. We can try to
867 * stop it by other means however.
868 */
869 if (!(c->c_flags & CALLOUT_PENDING)) {
870 c->c_flags &= ~CALLOUT_ACTIVE;
871
872 /*
873 * If it wasn't on the queue and it isn't the current
874 * callout, then we can't stop it, so just bail.
875 */
876 if (cc->cc_curr != c) {
877 CTR3(KTR_CALLOUT, "failed to stop %p func %p arg %p",
878 c, c->c_func, c->c_arg);
879 CC_UNLOCK(cc);
880 if (sq_locked)
881 sleepq_release(&cc->cc_waiting);
882 return (0);
883 }
884
885 if (safe) {
886 /*
887 * The current callout is running (or just
888 * about to run) and blocking is allowed, so
889 * just wait for the current invocation to
890 * finish.
891 */
892 while (cc->cc_curr == c) {
893
894 /*
895 * Use direct calls to sleepqueue interface
896 * instead of cv/msleep in order to avoid
897 * a LOR between cc_lock and sleepqueue
898 * chain spinlocks. This piece of code
899 * emulates a msleep_spin() call actually.
900 *
901 * If we already have the sleepqueue chain
902 * locked, then we can safely block. If we
903 * don't already have it locked, however,
904 * we have to drop the cc_lock to lock
905 * it. This opens several races, so we
906 * restart at the beginning once we have
907 * both locks. If nothing has changed, then
908 * we will end up back here with sq_locked
909 * set.
910 */
911 if (!sq_locked) {
912 CC_UNLOCK(cc);
913 sleepq_lock(&cc->cc_waiting);
914 sq_locked = 1;
915 old_cc = cc;
916 goto again;
917 }
918
919 /*
920 * Migration could be cancelled here, but
921 * as long as it is still not sure when it
922 * will be packed up, just let softclock()
923 * take care of it.
924 */
925 cc->cc_waiting = 1;
926 DROP_GIANT();
927 CC_UNLOCK(cc);
928 sleepq_add(&cc->cc_waiting,
929 &cc->cc_lock.lock_object, "codrain",
930 SLEEPQ_SLEEP, 0);
931 sleepq_wait(&cc->cc_waiting, 0);
932 sq_locked = 0;
933 old_cc = NULL;
934
935 /* Reacquire locks previously released. */
936 PICKUP_GIANT();
937 CC_LOCK(cc);
938 }
939 } else if (use_lock && !cc->cc_cancel) {
940 /*
941 * The current callout is waiting for its
942 * lock which we hold. Cancel the callout
943 * and return. After our caller drops the
944 * lock, the callout will be skipped in
945 * softclock().
946 */
947 cc->cc_cancel = 1;
948 CTR3(KTR_CALLOUT, "cancelled %p func %p arg %p",
949 c, c->c_func, c->c_arg);
950 KASSERT(!cc_cme_migrating(cc),
951 ("callout wrongly scheduled for migration"));
952 CC_UNLOCK(cc);
953 KASSERT(!sq_locked, ("sleepqueue chain locked"));
954 return (1);
955 }
956 CTR3(KTR_CALLOUT, "failed to stop %p func %p arg %p",
957 c, c->c_func, c->c_arg);
958 CC_UNLOCK(cc);
959 KASSERT(!sq_locked, ("sleepqueue chain still locked"));
960 return (0);
961 }
962 if (sq_locked)
963 sleepq_release(&cc->cc_waiting);
964
965 c->c_flags &= ~(CALLOUT_ACTIVE | CALLOUT_PENDING);
966
967 if (cc->cc_next == c) {
968 cc->cc_next = TAILQ_NEXT(c, c_links.tqe);
969 }
970 TAILQ_REMOVE(&cc->cc_callwheel[c->c_time & callwheelmask], c,
971 c_links.tqe);
972
973 CTR3(KTR_CALLOUT, "cancelled %p func %p arg %p",
974 c, c->c_func, c->c_arg);
975
976 if (c->c_flags & CALLOUT_LOCAL_ALLOC) {
977 c->c_func = NULL;
978 SLIST_INSERT_HEAD(&cc->cc_callfree, c, c_links.sle);
979 }
980 CC_UNLOCK(cc);
981 return (1);
982 }
983
984 void
985 callout_init(c, mpsafe)
986 struct callout *c;
987 int mpsafe;
988 {
989 bzero(c, sizeof *c);
990 if (mpsafe) {
991 c->c_lock = NULL;
992 c->c_flags = CALLOUT_RETURNUNLOCKED;
993 } else {
994 c->c_lock = &Giant.lock_object;
995 c->c_flags = 0;
996 }
997 c->c_cpu = timeout_cpu;
998 }
999
1000 void
1001 _callout_init_lock(c, lock, flags)
1002 struct callout *c;
1003 struct lock_object *lock;
1004 int flags;
1005 {
1006 bzero(c, sizeof *c);
1007 c->c_lock = lock;
1008 KASSERT((flags & ~(CALLOUT_RETURNUNLOCKED | CALLOUT_SHAREDLOCK)) == 0,
1009 ("callout_init_lock: bad flags %d", flags));
1010 KASSERT(lock != NULL || (flags & CALLOUT_RETURNUNLOCKED) == 0,
1011 ("callout_init_lock: CALLOUT_RETURNUNLOCKED with no lock"));
1012 KASSERT(lock == NULL || !(LOCK_CLASS(lock)->lc_flags &
1013 (LC_SPINLOCK | LC_SLEEPABLE)), ("%s: invalid lock class",
1014 __func__));
1015 c->c_flags = flags & (CALLOUT_RETURNUNLOCKED | CALLOUT_SHAREDLOCK);
1016 c->c_cpu = timeout_cpu;
1017 }
1018
1019 #ifdef APM_FIXUP_CALLTODO
1020 /*
1021 * Adjust the kernel calltodo timeout list. This routine is used after
1022 * an APM resume to recalculate the calltodo timer list values with the
1023 * number of hz's we have been sleeping. The next hardclock() will detect
1024 * that there are fired timers and run softclock() to execute them.
1025 *
1026 * Please note, I have not done an exhaustive analysis of what code this
1027 * might break. I am motivated to have my select()'s and alarm()'s that
1028 * have expired during suspend firing upon resume so that the applications
1029 * which set the timer can do the maintanence the timer was for as close
1030 * as possible to the originally intended time. Testing this code for a
1031 * week showed that resuming from a suspend resulted in 22 to 25 timers
1032 * firing, which seemed independant on whether the suspend was 2 hours or
1033 * 2 days. Your milage may vary. - Ken Key <key@cs.utk.edu>
1034 */
1035 void
1036 adjust_timeout_calltodo(time_change)
1037 struct timeval *time_change;
1038 {
1039 register struct callout *p;
1040 unsigned long delta_ticks;
1041
1042 /*
1043 * How many ticks were we asleep?
1044 * (stolen from tvtohz()).
1045 */
1046
1047 /* Don't do anything */
1048 if (time_change->tv_sec < 0)
1049 return;
1050 else if (time_change->tv_sec <= LONG_MAX / 1000000)
1051 delta_ticks = (time_change->tv_sec * 1000000 +
1052 time_change->tv_usec + (tick - 1)) / tick + 1;
1053 else if (time_change->tv_sec <= LONG_MAX / hz)
1054 delta_ticks = time_change->tv_sec * hz +
1055 (time_change->tv_usec + (tick - 1)) / tick + 1;
1056 else
1057 delta_ticks = LONG_MAX;
1058
1059 if (delta_ticks > INT_MAX)
1060 delta_ticks = INT_MAX;
1061
1062 /*
1063 * Now rip through the timer calltodo list looking for timers
1064 * to expire.
1065 */
1066
1067 /* don't collide with softclock() */
1068 CC_LOCK(cc);
1069 for (p = calltodo.c_next; p != NULL; p = p->c_next) {
1070 p->c_time -= delta_ticks;
1071
1072 /* Break if the timer had more time on it than delta_ticks */
1073 if (p->c_time > 0)
1074 break;
1075
1076 /* take back the ticks the timer didn't use (p->c_time <= 0) */
1077 delta_ticks = -p->c_time;
1078 }
1079 CC_UNLOCK(cc);
1080
1081 return;
1082 }
1083 #endif /* APM_FIXUP_CALLTODO */
Cache object: 2238e256f028314bc273adb36c12cade
|