1 /* $NetBSD: kern_timeout.c,v 1.43 2008/10/10 11:42:58 ad Exp $ */
2
3 /*-
4 * Copyright (c) 2003, 2006, 2007, 2008 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe, and by Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * Copyright (c) 2001 Thomas Nordin <nordin@openbsd.org>
34 * Copyright (c) 2000-2001 Artur Grabowski <art@openbsd.org>
35 * All rights reserved.
36 *
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
39 * are met:
40 *
41 * 1. Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * 2. Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in the
45 * documentation and/or other materials provided with the distribution.
46 * 3. The name of the author may not be used to endorse or promote products
47 * derived from this software without specific prior written permission.
48 *
49 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
50 * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
51 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
52 * THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
53 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
54 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
55 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
56 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
57 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
58 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
59 */
60
61 #include <sys/cdefs.h>
62 __KERNEL_RCSID(0, "$NetBSD: kern_timeout.c,v 1.43 2008/10/10 11:42:58 ad Exp $");
63
64 /*
65 * Timeouts are kept in a hierarchical timing wheel. The c_time is the
66 * value of c_cpu->cc_ticks when the timeout should be called. There are
67 * four levels with 256 buckets each. See 'Scheme 7' in "Hashed and
68 * Hierarchical Timing Wheels: Efficient Data Structures for Implementing
69 * a Timer Facility" by George Varghese and Tony Lauck.
70 *
71 * Some of the "math" in here is a bit tricky. We have to beware of
72 * wrapping ints.
73 *
74 * We use the fact that any element added to the queue must be added with
75 * a positive time. That means that any element `to' on the queue cannot
76 * be scheduled to timeout further in time than INT_MAX, but c->c_time can
77 * be positive or negative so comparing it with anything is dangerous.
78 * The only way we can use the c->c_time value in any predictable way is
79 * when we calculate how far in the future `to' will timeout - "c->c_time
80 * - c->c_cpu->cc_ticks". The result will always be positive for future
81 * timeouts and 0 or negative for due timeouts.
82 */
83
84 #define _CALLOUT_PRIVATE
85
86 #include <sys/param.h>
87 #include <sys/systm.h>
88 #include <sys/kernel.h>
89 #include <sys/callout.h>
90 #include <sys/mutex.h>
91 #include <sys/proc.h>
92 #include <sys/sleepq.h>
93 #include <sys/syncobj.h>
94 #include <sys/evcnt.h>
95 #include <sys/intr.h>
96 #include <sys/cpu.h>
97 #include <sys/kmem.h>
98
99 #ifdef DDB
100 #include <machine/db_machdep.h>
101 #include <ddb/db_interface.h>
102 #include <ddb/db_access.h>
103 #include <ddb/db_sym.h>
104 #include <ddb/db_output.h>
105 #endif
106
107 #define BUCKETS 1024
108 #define WHEELSIZE 256
109 #define WHEELMASK 255
110 #define WHEELBITS 8
111
112 #define MASKWHEEL(wheel, time) (((time) >> ((wheel)*WHEELBITS)) & WHEELMASK)
113
114 #define BUCKET(cc, rel, abs) \
115 (((rel) <= (1 << (2*WHEELBITS))) \
116 ? ((rel) <= (1 << WHEELBITS)) \
117 ? &(cc)->cc_wheel[MASKWHEEL(0, (abs))] \
118 : &(cc)->cc_wheel[MASKWHEEL(1, (abs)) + WHEELSIZE] \
119 : ((rel) <= (1 << (3*WHEELBITS))) \
120 ? &(cc)->cc_wheel[MASKWHEEL(2, (abs)) + 2*WHEELSIZE] \
121 : &(cc)->cc_wheel[MASKWHEEL(3, (abs)) + 3*WHEELSIZE])
122
123 #define MOVEBUCKET(cc, wheel, time) \
124 CIRCQ_APPEND(&(cc)->cc_todo, \
125 &(cc)->cc_wheel[MASKWHEEL((wheel), (time)) + (wheel)*WHEELSIZE])
126
127 /*
128 * Circular queue definitions.
129 */
130
131 #define CIRCQ_INIT(list) \
132 do { \
133 (list)->cq_next_l = (list); \
134 (list)->cq_prev_l = (list); \
135 } while (/*CONSTCOND*/0)
136
137 #define CIRCQ_INSERT(elem, list) \
138 do { \
139 (elem)->cq_prev_e = (list)->cq_prev_e; \
140 (elem)->cq_next_l = (list); \
141 (list)->cq_prev_l->cq_next_l = (elem); \
142 (list)->cq_prev_l = (elem); \
143 } while (/*CONSTCOND*/0)
144
145 #define CIRCQ_APPEND(fst, snd) \
146 do { \
147 if (!CIRCQ_EMPTY(snd)) { \
148 (fst)->cq_prev_l->cq_next_l = (snd)->cq_next_l; \
149 (snd)->cq_next_l->cq_prev_l = (fst)->cq_prev_l; \
150 (snd)->cq_prev_l->cq_next_l = (fst); \
151 (fst)->cq_prev_l = (snd)->cq_prev_l; \
152 CIRCQ_INIT(snd); \
153 } \
154 } while (/*CONSTCOND*/0)
155
156 #define CIRCQ_REMOVE(elem) \
157 do { \
158 (elem)->cq_next_l->cq_prev_e = (elem)->cq_prev_e; \
159 (elem)->cq_prev_l->cq_next_e = (elem)->cq_next_e; \
160 } while (/*CONSTCOND*/0)
161
162 #define CIRCQ_FIRST(list) ((list)->cq_next_e)
163 #define CIRCQ_NEXT(elem) ((elem)->cq_next_e)
164 #define CIRCQ_LAST(elem,list) ((elem)->cq_next_l == (list))
165 #define CIRCQ_EMPTY(list) ((list)->cq_next_l == (list))
166
167 static void callout_softclock(void *);
168
169 struct callout_cpu {
170 kmutex_t cc_lock;
171 sleepq_t cc_sleepq;
172 u_int cc_nwait;
173 u_int cc_ticks;
174 lwp_t *cc_lwp;
175 callout_impl_t *cc_active;
176 callout_impl_t *cc_cancel;
177 struct evcnt cc_ev_late;
178 struct evcnt cc_ev_block;
179 struct callout_circq cc_todo; /* Worklist */
180 struct callout_circq cc_wheel[BUCKETS]; /* Queues of timeouts */
181 char cc_name1[12];
182 char cc_name2[12];
183 };
184
185 static struct callout_cpu callout_cpu0;
186 static void *callout_sih;
187
188 static inline kmutex_t *
189 callout_lock(callout_impl_t *c)
190 {
191 kmutex_t *lock;
192
193 for (;;) {
194 lock = &c->c_cpu->cc_lock;
195 mutex_spin_enter(lock);
196 if (__predict_true(lock == &c->c_cpu->cc_lock))
197 return lock;
198 mutex_spin_exit(lock);
199 }
200 }
201
202 /*
203 * callout_startup:
204 *
205 * Initialize the callout facility, called at system startup time.
206 * Do just enough to allow callouts to be safely registered.
207 */
208 void
209 callout_startup(void)
210 {
211 struct callout_cpu *cc;
212 int b;
213
214 KASSERT(curcpu()->ci_data.cpu_callout == NULL);
215
216 cc = &callout_cpu0;
217 mutex_init(&cc->cc_lock, MUTEX_DEFAULT, IPL_SCHED);
218 CIRCQ_INIT(&cc->cc_todo);
219 for (b = 0; b < BUCKETS; b++)
220 CIRCQ_INIT(&cc->cc_wheel[b]);
221 curcpu()->ci_data.cpu_callout = cc;
222 }
223
224 /*
225 * callout_init_cpu:
226 *
227 * Per-CPU initialization.
228 */
229 void
230 callout_init_cpu(struct cpu_info *ci)
231 {
232 struct callout_cpu *cc;
233 int b;
234
235 CTASSERT(sizeof(callout_impl_t) <= sizeof(callout_t));
236
237 if ((cc = ci->ci_data.cpu_callout) == NULL) {
238 cc = kmem_zalloc(sizeof(*cc), KM_SLEEP);
239 if (cc == NULL)
240 panic("callout_init_cpu (1)");
241 mutex_init(&cc->cc_lock, MUTEX_DEFAULT, IPL_SCHED);
242 CIRCQ_INIT(&cc->cc_todo);
243 for (b = 0; b < BUCKETS; b++)
244 CIRCQ_INIT(&cc->cc_wheel[b]);
245 } else {
246 /* Boot CPU, one time only. */
247 callout_sih = softint_establish(SOFTINT_CLOCK | SOFTINT_MPSAFE,
248 callout_softclock, NULL);
249 if (callout_sih == NULL)
250 panic("callout_init_cpu (2)");
251 }
252
253 sleepq_init(&cc->cc_sleepq);
254
255 snprintf(cc->cc_name1, sizeof(cc->cc_name1), "late/%u",
256 cpu_index(ci));
257 evcnt_attach_dynamic(&cc->cc_ev_late, EVCNT_TYPE_MISC,
258 NULL, "callout", cc->cc_name1);
259
260 snprintf(cc->cc_name2, sizeof(cc->cc_name2), "wait/%u",
261 cpu_index(ci));
262 evcnt_attach_dynamic(&cc->cc_ev_block, EVCNT_TYPE_MISC,
263 NULL, "callout", cc->cc_name2);
264
265 ci->ci_data.cpu_callout = cc;
266 }
267
268 /*
269 * callout_init:
270 *
271 * Initialize a callout structure. This must be quick, so we fill
272 * only the minimum number of fields.
273 */
274 void
275 callout_init(callout_t *cs, u_int flags)
276 {
277 callout_impl_t *c = (callout_impl_t *)cs;
278 struct callout_cpu *cc;
279
280 KASSERT((flags & ~CALLOUT_FLAGMASK) == 0);
281
282 cc = curcpu()->ci_data.cpu_callout;
283 c->c_func = NULL;
284 c->c_magic = CALLOUT_MAGIC;
285 if (__predict_true((flags & CALLOUT_MPSAFE) != 0 && cc != NULL)) {
286 c->c_flags = flags;
287 c->c_cpu = cc;
288 return;
289 }
290 c->c_flags = flags | CALLOUT_BOUND;
291 c->c_cpu = &callout_cpu0;
292 }
293
294 /*
295 * callout_destroy:
296 *
297 * Destroy a callout structure. The callout must be stopped.
298 */
299 void
300 callout_destroy(callout_t *cs)
301 {
302 callout_impl_t *c = (callout_impl_t *)cs;
303
304 /*
305 * It's not necessary to lock in order to see the correct value
306 * of c->c_flags. If the callout could potentially have been
307 * running, the current thread should have stopped it.
308 */
309 KASSERT((c->c_flags & CALLOUT_PENDING) == 0);
310 KASSERT(c->c_cpu->cc_lwp == curlwp || c->c_cpu->cc_active != c);
311 KASSERT(c->c_magic == CALLOUT_MAGIC);
312 c->c_magic = 0;
313 }
314
315 /*
316 * callout_schedule_locked:
317 *
318 * Schedule a callout to run. The function and argument must
319 * already be set in the callout structure. Must be called with
320 * callout_lock.
321 */
322 static void
323 callout_schedule_locked(callout_impl_t *c, kmutex_t *lock, int to_ticks)
324 {
325 struct callout_cpu *cc, *occ;
326 int old_time;
327
328 KASSERT(to_ticks >= 0);
329 KASSERT(c->c_func != NULL);
330
331 /* Initialize the time here, it won't change. */
332 occ = c->c_cpu;
333 c->c_flags &= ~(CALLOUT_FIRED | CALLOUT_INVOKING);
334
335 /*
336 * If this timeout is already scheduled and now is moved
337 * earlier, reschedule it now. Otherwise leave it in place
338 * and let it be rescheduled later.
339 */
340 if ((c->c_flags & CALLOUT_PENDING) != 0) {
341 /* Leave on existing CPU. */
342 old_time = c->c_time;
343 c->c_time = to_ticks + occ->cc_ticks;
344 if (c->c_time - old_time < 0) {
345 CIRCQ_REMOVE(&c->c_list);
346 CIRCQ_INSERT(&c->c_list, &occ->cc_todo);
347 }
348 mutex_spin_exit(lock);
349 return;
350 }
351
352 cc = curcpu()->ci_data.cpu_callout;
353 if ((c->c_flags & CALLOUT_BOUND) != 0 || cc == occ ||
354 !mutex_tryenter(&cc->cc_lock)) {
355 /* Leave on existing CPU. */
356 c->c_time = to_ticks + occ->cc_ticks;
357 c->c_flags |= CALLOUT_PENDING;
358 CIRCQ_INSERT(&c->c_list, &occ->cc_todo);
359 } else {
360 /* Move to this CPU. */
361 c->c_cpu = cc;
362 c->c_time = to_ticks + cc->cc_ticks;
363 c->c_flags |= CALLOUT_PENDING;
364 CIRCQ_INSERT(&c->c_list, &cc->cc_todo);
365 mutex_spin_exit(&cc->cc_lock);
366 }
367 mutex_spin_exit(lock);
368 }
369
370 /*
371 * callout_reset:
372 *
373 * Reset a callout structure with a new function and argument, and
374 * schedule it to run.
375 */
376 void
377 callout_reset(callout_t *cs, int to_ticks, void (*func)(void *), void *arg)
378 {
379 callout_impl_t *c = (callout_impl_t *)cs;
380 kmutex_t *lock;
381
382 KASSERT(c->c_magic == CALLOUT_MAGIC);
383 KASSERT(func != NULL);
384
385 lock = callout_lock(c);
386 c->c_func = func;
387 c->c_arg = arg;
388 callout_schedule_locked(c, lock, to_ticks);
389 }
390
391 /*
392 * callout_schedule:
393 *
394 * Schedule a callout to run. The function and argument must
395 * already be set in the callout structure.
396 */
397 void
398 callout_schedule(callout_t *cs, int to_ticks)
399 {
400 callout_impl_t *c = (callout_impl_t *)cs;
401 kmutex_t *lock;
402
403 KASSERT(c->c_magic == CALLOUT_MAGIC);
404
405 lock = callout_lock(c);
406 callout_schedule_locked(c, lock, to_ticks);
407 }
408
409 /*
410 * callout_stop:
411 *
412 * Try to cancel a pending callout. It may be too late: the callout
413 * could be running on another CPU. If called from interrupt context,
414 * the callout could already be in progress at a lower priority.
415 */
416 bool
417 callout_stop(callout_t *cs)
418 {
419 callout_impl_t *c = (callout_impl_t *)cs;
420 struct callout_cpu *cc;
421 kmutex_t *lock;
422 bool expired;
423
424 KASSERT(c->c_magic == CALLOUT_MAGIC);
425
426 lock = callout_lock(c);
427
428 if ((c->c_flags & CALLOUT_PENDING) != 0)
429 CIRCQ_REMOVE(&c->c_list);
430 expired = ((c->c_flags & CALLOUT_FIRED) != 0);
431 c->c_flags &= ~(CALLOUT_PENDING|CALLOUT_FIRED);
432
433 cc = c->c_cpu;
434 if (cc->cc_active == c) {
435 /*
436 * This is for non-MPSAFE callouts only. To synchronize
437 * effectively we must be called with kernel_lock held.
438 * It's also taken in callout_softclock.
439 */
440 cc->cc_cancel = c;
441 }
442
443 mutex_spin_exit(lock);
444
445 return expired;
446 }
447
448 /*
449 * callout_halt:
450 *
451 * Cancel a pending callout. If in-flight, block until it completes.
452 * May not be called from a hard interrupt handler. If the callout
453 * can take locks, the caller of callout_halt() must not hold any of
454 * those locks, otherwise the two could deadlock. If 'interlock' is
455 * non-NULL and we must wait for the callout to complete, it will be
456 * released and re-acquired before returning.
457 */
458 bool
459 callout_halt(callout_t *cs, void *interlock)
460 {
461 callout_impl_t *c = (callout_impl_t *)cs;
462 struct callout_cpu *cc;
463 struct lwp *l;
464 kmutex_t *lock, *relock;
465 bool expired;
466
467 KASSERT(c->c_magic == CALLOUT_MAGIC);
468 KASSERT(!cpu_intr_p());
469
470 lock = callout_lock(c);
471 relock = NULL;
472
473 expired = ((c->c_flags & CALLOUT_FIRED) != 0);
474 if ((c->c_flags & CALLOUT_PENDING) != 0)
475 CIRCQ_REMOVE(&c->c_list);
476 c->c_flags &= ~(CALLOUT_PENDING|CALLOUT_FIRED);
477
478 l = curlwp;
479 for (;;) {
480 cc = c->c_cpu;
481 if (__predict_true(cc->cc_active != c || cc->cc_lwp == l))
482 break;
483 if (interlock != NULL) {
484 /*
485 * Avoid potential scheduler lock order problems by
486 * dropping the interlock without the callout lock
487 * held.
488 */
489 mutex_spin_exit(lock);
490 mutex_exit(interlock);
491 relock = interlock;
492 interlock = NULL;
493 } else {
494 /* XXX Better to do priority inheritance. */
495 KASSERT(l->l_wchan == NULL);
496 cc->cc_nwait++;
497 cc->cc_ev_block.ev_count++;
498 l->l_kpriority = true;
499 sleepq_enter(&cc->cc_sleepq, l, &cc->cc_lock);
500 sleepq_enqueue(&cc->cc_sleepq, cc, "callout",
501 &sleep_syncobj);
502 sleepq_block(0, false);
503 }
504 lock = callout_lock(c);
505 }
506
507 mutex_spin_exit(lock);
508 if (__predict_false(relock != NULL))
509 mutex_enter(relock);
510
511 return expired;
512 }
513
514 #ifdef notyet
515 /*
516 * callout_bind:
517 *
518 * Bind a callout so that it will only execute on one CPU.
519 * The callout must be stopped, and must be MPSAFE.
520 *
521 * XXX Disabled for now until it is decided how to handle
522 * offlined CPUs. We may want weak+strong binding.
523 */
524 void
525 callout_bind(callout_t *cs, struct cpu_info *ci)
526 {
527 callout_impl_t *c = (callout_impl_t *)cs;
528 struct callout_cpu *cc;
529 kmutex_t *lock;
530
531 KASSERT((c->c_flags & CALLOUT_PENDING) == 0);
532 KASSERT(c->c_cpu->cc_active != c);
533 KASSERT(c->c_magic == CALLOUT_MAGIC);
534 KASSERT((c->c_flags & CALLOUT_MPSAFE) != 0);
535
536 lock = callout_lock(c);
537 cc = ci->ci_data.cpu_callout;
538 c->c_flags |= CALLOUT_BOUND;
539 if (c->c_cpu != cc) {
540 /*
541 * Assigning c_cpu effectively unlocks the callout
542 * structure, as we don't hold the new CPU's lock.
543 * Issue memory barrier to prevent accesses being
544 * reordered.
545 */
546 membar_exit();
547 c->c_cpu = cc;
548 }
549 mutex_spin_exit(lock);
550 }
551 #endif
552
553 void
554 callout_setfunc(callout_t *cs, void (*func)(void *), void *arg)
555 {
556 callout_impl_t *c = (callout_impl_t *)cs;
557 kmutex_t *lock;
558
559 KASSERT(c->c_magic == CALLOUT_MAGIC);
560 KASSERT(func != NULL);
561
562 lock = callout_lock(c);
563 c->c_func = func;
564 c->c_arg = arg;
565 mutex_spin_exit(lock);
566 }
567
568 bool
569 callout_expired(callout_t *cs)
570 {
571 callout_impl_t *c = (callout_impl_t *)cs;
572 kmutex_t *lock;
573 bool rv;
574
575 KASSERT(c->c_magic == CALLOUT_MAGIC);
576
577 lock = callout_lock(c);
578 rv = ((c->c_flags & CALLOUT_FIRED) != 0);
579 mutex_spin_exit(lock);
580
581 return rv;
582 }
583
584 bool
585 callout_active(callout_t *cs)
586 {
587 callout_impl_t *c = (callout_impl_t *)cs;
588 kmutex_t *lock;
589 bool rv;
590
591 KASSERT(c->c_magic == CALLOUT_MAGIC);
592
593 lock = callout_lock(c);
594 rv = ((c->c_flags & (CALLOUT_PENDING|CALLOUT_FIRED)) != 0);
595 mutex_spin_exit(lock);
596
597 return rv;
598 }
599
600 bool
601 callout_pending(callout_t *cs)
602 {
603 callout_impl_t *c = (callout_impl_t *)cs;
604 kmutex_t *lock;
605 bool rv;
606
607 KASSERT(c->c_magic == CALLOUT_MAGIC);
608
609 lock = callout_lock(c);
610 rv = ((c->c_flags & CALLOUT_PENDING) != 0);
611 mutex_spin_exit(lock);
612
613 return rv;
614 }
615
616 bool
617 callout_invoking(callout_t *cs)
618 {
619 callout_impl_t *c = (callout_impl_t *)cs;
620 kmutex_t *lock;
621 bool rv;
622
623 KASSERT(c->c_magic == CALLOUT_MAGIC);
624
625 lock = callout_lock(c);
626 rv = ((c->c_flags & CALLOUT_INVOKING) != 0);
627 mutex_spin_exit(lock);
628
629 return rv;
630 }
631
632 void
633 callout_ack(callout_t *cs)
634 {
635 callout_impl_t *c = (callout_impl_t *)cs;
636 kmutex_t *lock;
637
638 KASSERT(c->c_magic == CALLOUT_MAGIC);
639
640 lock = callout_lock(c);
641 c->c_flags &= ~CALLOUT_INVOKING;
642 mutex_spin_exit(lock);
643 }
644
645 /*
646 * callout_hardclock:
647 *
648 * Called from hardclock() once every tick. We schedule a soft
649 * interrupt if there is work to be done.
650 */
651 void
652 callout_hardclock(void)
653 {
654 struct callout_cpu *cc;
655 int needsoftclock, ticks;
656
657 cc = curcpu()->ci_data.cpu_callout;
658 mutex_spin_enter(&cc->cc_lock);
659
660 ticks = ++cc->cc_ticks;
661
662 MOVEBUCKET(cc, 0, ticks);
663 if (MASKWHEEL(0, ticks) == 0) {
664 MOVEBUCKET(cc, 1, ticks);
665 if (MASKWHEEL(1, ticks) == 0) {
666 MOVEBUCKET(cc, 2, ticks);
667 if (MASKWHEEL(2, ticks) == 0)
668 MOVEBUCKET(cc, 3, ticks);
669 }
670 }
671
672 needsoftclock = !CIRCQ_EMPTY(&cc->cc_todo);
673 mutex_spin_exit(&cc->cc_lock);
674
675 if (needsoftclock)
676 softint_schedule(callout_sih);
677 }
678
679 /*
680 * callout_softclock:
681 *
682 * Soft interrupt handler, scheduled above if there is work to
683 * be done. Callouts are made in soft interrupt context.
684 */
685 static void
686 callout_softclock(void *v)
687 {
688 callout_impl_t *c;
689 struct callout_cpu *cc;
690 void (*func)(void *);
691 void *arg;
692 int mpsafe, count, ticks, delta;
693 lwp_t *l;
694
695 l = curlwp;
696 KASSERT(l->l_cpu == curcpu());
697 cc = l->l_cpu->ci_data.cpu_callout;
698
699 mutex_spin_enter(&cc->cc_lock);
700 cc->cc_lwp = l;
701 while (!CIRCQ_EMPTY(&cc->cc_todo)) {
702 c = CIRCQ_FIRST(&cc->cc_todo);
703 KASSERT(c->c_magic == CALLOUT_MAGIC);
704 KASSERT(c->c_func != NULL);
705 KASSERT(c->c_cpu == cc);
706 KASSERT((c->c_flags & CALLOUT_PENDING) != 0);
707 KASSERT((c->c_flags & CALLOUT_FIRED) == 0);
708 CIRCQ_REMOVE(&c->c_list);
709
710 /* If due run it, otherwise insert it into the right bucket. */
711 ticks = cc->cc_ticks;
712 delta = c->c_time - ticks;
713 if (delta > 0) {
714 CIRCQ_INSERT(&c->c_list, BUCKET(cc, delta, c->c_time));
715 continue;
716 }
717 if (delta < 0)
718 cc->cc_ev_late.ev_count++;
719
720 c->c_flags = (c->c_flags & ~CALLOUT_PENDING) |
721 (CALLOUT_FIRED | CALLOUT_INVOKING);
722 mpsafe = (c->c_flags & CALLOUT_MPSAFE);
723 func = c->c_func;
724 arg = c->c_arg;
725 cc->cc_active = c;
726
727 mutex_spin_exit(&cc->cc_lock);
728 KASSERT(func != NULL);
729 if (!mpsafe) {
730 KERNEL_LOCK(1, NULL);
731 (*func)(arg);
732 KERNEL_UNLOCK_ONE(NULL);
733 } else
734 (*func)(arg);
735 mutex_spin_enter(&cc->cc_lock);
736
737 /*
738 * We can't touch 'c' here because it might be
739 * freed already. If LWPs waiting for callout
740 * to complete, awaken them.
741 */
742 cc->cc_active = NULL;
743 if ((count = cc->cc_nwait) != 0) {
744 cc->cc_nwait = 0;
745 /* sleepq_wake() drops the lock. */
746 sleepq_wake(&cc->cc_sleepq, cc, count, &cc->cc_lock);
747 mutex_spin_enter(&cc->cc_lock);
748 }
749 }
750 cc->cc_lwp = NULL;
751 mutex_spin_exit(&cc->cc_lock);
752 }
753
754 #ifdef DDB
755 static void
756 db_show_callout_bucket(struct callout_cpu *cc, struct callout_circq *bucket)
757 {
758 callout_impl_t *c;
759 db_expr_t offset;
760 const char *name;
761 static char question[] = "?";
762 int b;
763
764 if (CIRCQ_EMPTY(bucket))
765 return;
766
767 for (c = CIRCQ_FIRST(bucket); /*nothing*/; c = CIRCQ_NEXT(&c->c_list)) {
768 db_find_sym_and_offset((db_addr_t)(intptr_t)c->c_func, &name,
769 &offset);
770 name = name ? name : question;
771 b = (bucket - cc->cc_wheel);
772 if (b < 0)
773 b = -WHEELSIZE;
774 db_printf("%9d %2d/%-4d %16lx %s\n",
775 c->c_time - cc->cc_ticks, b / WHEELSIZE, b,
776 (u_long)c->c_arg, name);
777 if (CIRCQ_LAST(&c->c_list, bucket))
778 break;
779 }
780 }
781
782 void
783 db_show_callout(db_expr_t addr, bool haddr, db_expr_t count, const char *modif)
784 {
785 CPU_INFO_ITERATOR cii;
786 struct callout_cpu *cc;
787 struct cpu_info *ci;
788 int b;
789
790 db_printf("hardclock_ticks now: %d\n", hardclock_ticks);
791 db_printf(" ticks wheel arg func\n");
792
793 /*
794 * Don't lock the callwheel; all the other CPUs are paused
795 * anyhow, and we might be called in a circumstance where
796 * some other CPU was paused while holding the lock.
797 */
798 for (CPU_INFO_FOREACH(cii, ci)) {
799 cc = ci->ci_data.cpu_callout;
800 db_show_callout_bucket(cc, &cc->cc_todo);
801 }
802 for (b = 0; b < BUCKETS; b++) {
803 for (CPU_INFO_FOREACH(cii, ci)) {
804 cc = ci->ci_data.cpu_callout;
805 db_show_callout_bucket(cc, &cc->cc_wheel[b]);
806 }
807 }
808 }
809 #endif /* DDB */
Cache object: c499be69877458e2e442aa39f81a88ae
|