1 /*-
2 * Copyright (c) 1982, 1986, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 4. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * From: @(#)kern_clock.c 8.5 (Berkeley) 1/21/94
35 */
36
37 #include <sys/cdefs.h>
38 __FBSDID("$FreeBSD: releng/7.4/sys/kern/kern_timeout.c 187561 2009-01-21 18:54:35Z jhb $");
39
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/callout.h>
43 #include <sys/condvar.h>
44 #include <sys/kernel.h>
45 #include <sys/ktr.h>
46 #include <sys/lock.h>
47 #include <sys/mutex.h>
48 #include <sys/proc.h>
49 #include <sys/sleepqueue.h>
50 #include <sys/sysctl.h>
51
52 static int avg_depth;
53 SYSCTL_INT(_debug, OID_AUTO, to_avg_depth, CTLFLAG_RD, &avg_depth, 0,
54 "Average number of items examined per softclock call. Units = 1/1000");
55 static int avg_gcalls;
56 SYSCTL_INT(_debug, OID_AUTO, to_avg_gcalls, CTLFLAG_RD, &avg_gcalls, 0,
57 "Average number of Giant callouts made per softclock call. Units = 1/1000");
58 static int avg_mtxcalls;
59 SYSCTL_INT(_debug, OID_AUTO, to_avg_mtxcalls, CTLFLAG_RD, &avg_mtxcalls, 0,
60 "Average number of mtx callouts made per softclock call. Units = 1/1000");
61 static int avg_mpcalls;
62 SYSCTL_INT(_debug, OID_AUTO, to_avg_mpcalls, CTLFLAG_RD, &avg_mpcalls, 0,
63 "Average number of MP callouts made per softclock call. Units = 1/1000");
64 /*
65 * TODO:
66 * allocate more timeout table slots when table overflows.
67 */
68
69 /* Exported to machdep.c and/or kern_clock.c. */
70 struct callout *callout;
71 struct callout_list callfree;
72 int callwheelsize, callwheelbits, callwheelmask;
73 struct callout_tailq *callwheel;
74 int softticks; /* Like ticks, but for softclock(). */
75 struct mtx callout_lock;
76
77 static struct callout *nextsoftcheck; /* Next callout to be checked. */
78
79 /**
80 * Locked by callout_lock:
81 * curr_callout - If a callout is in progress, it is curr_callout.
82 * If curr_callout is non-NULL, threads waiting in
83 * callout_drain() will be woken up as soon as the
84 * relevant callout completes.
85 * curr_cancelled - Changing to 1 with both callout_lock and c_mtx held
86 * guarantees that the current callout will not run.
87 * The softclock() function sets this to 0 before it
88 * drops callout_lock to acquire c_mtx, and it calls
89 * the handler only if curr_cancelled is still 0 after
90 * c_mtx is successfully acquired.
91 * callout_wait - If a thread is waiting in callout_drain(), then
92 * callout_wait is nonzero. Set only when
93 * curr_callout is non-NULL.
94 */
95 static struct callout *curr_callout;
96 static int curr_cancelled;
97 static int callout_wait;
98
99 /*
100 * kern_timeout_callwheel_alloc() - kernel low level callwheel initialization
101 *
102 * This code is called very early in the kernel initialization sequence,
103 * and may be called more then once.
104 */
105 caddr_t
106 kern_timeout_callwheel_alloc(caddr_t v)
107 {
108 /*
109 * Calculate callout wheel size
110 */
111 for (callwheelsize = 1, callwheelbits = 0;
112 callwheelsize < ncallout;
113 callwheelsize <<= 1, ++callwheelbits)
114 ;
115 callwheelmask = callwheelsize - 1;
116
117 callout = (struct callout *)v;
118 v = (caddr_t)(callout + ncallout);
119 callwheel = (struct callout_tailq *)v;
120 v = (caddr_t)(callwheel + callwheelsize);
121 return(v);
122 }
123
124 /*
125 * kern_timeout_callwheel_init() - initialize previously reserved callwheel
126 * space.
127 *
128 * This code is called just once, after the space reserved for the
129 * callout wheel has been finalized.
130 */
131 void
132 kern_timeout_callwheel_init(void)
133 {
134 int i;
135
136 SLIST_INIT(&callfree);
137 for (i = 0; i < ncallout; i++) {
138 callout_init(&callout[i], 0);
139 callout[i].c_flags = CALLOUT_LOCAL_ALLOC;
140 SLIST_INSERT_HEAD(&callfree, &callout[i], c_links.sle);
141 }
142 for (i = 0; i < callwheelsize; i++) {
143 TAILQ_INIT(&callwheel[i]);
144 }
145 mtx_init(&callout_lock, "callout", NULL, MTX_SPIN | MTX_RECURSE);
146 }
147
148 /*
149 * The callout mechanism is based on the work of Adam M. Costello and
150 * George Varghese, published in a technical report entitled "Redesigning
151 * the BSD Callout and Timer Facilities" and modified slightly for inclusion
152 * in FreeBSD by Justin T. Gibbs. The original work on the data structures
153 * used in this implementation was published by G. Varghese and T. Lauck in
154 * the paper "Hashed and Hierarchical Timing Wheels: Data Structures for
155 * the Efficient Implementation of a Timer Facility" in the Proceedings of
156 * the 11th ACM Annual Symposium on Operating Systems Principles,
157 * Austin, Texas Nov 1987.
158 */
159
160 /*
161 * Software (low priority) clock interrupt.
162 * Run periodic events from timeout queue.
163 */
164 void
165 softclock(void *dummy)
166 {
167 struct callout *c;
168 struct callout_tailq *bucket;
169 int curticks;
170 int steps; /* #steps since we last allowed interrupts */
171 int depth;
172 int mpcalls;
173 int mtxcalls;
174 int gcalls;
175 #ifdef DIAGNOSTIC
176 struct bintime bt1, bt2;
177 struct timespec ts2;
178 static uint64_t maxdt = 36893488147419102LL; /* 2 msec */
179 static timeout_t *lastfunc;
180 #endif
181
182 #ifndef MAX_SOFTCLOCK_STEPS
183 #define MAX_SOFTCLOCK_STEPS 100 /* Maximum allowed value of steps. */
184 #endif /* MAX_SOFTCLOCK_STEPS */
185
186 mpcalls = 0;
187 mtxcalls = 0;
188 gcalls = 0;
189 depth = 0;
190 steps = 0;
191 mtx_lock_spin(&callout_lock);
192 while (softticks != ticks) {
193 softticks++;
194 /*
195 * softticks may be modified by hard clock, so cache
196 * it while we work on a given bucket.
197 */
198 curticks = softticks;
199 bucket = &callwheel[curticks & callwheelmask];
200 c = TAILQ_FIRST(bucket);
201 while (c) {
202 depth++;
203 if (c->c_time != curticks) {
204 c = TAILQ_NEXT(c, c_links.tqe);
205 ++steps;
206 if (steps >= MAX_SOFTCLOCK_STEPS) {
207 nextsoftcheck = c;
208 /* Give interrupts a chance. */
209 mtx_unlock_spin(&callout_lock);
210 ; /* nothing */
211 mtx_lock_spin(&callout_lock);
212 c = nextsoftcheck;
213 steps = 0;
214 }
215 } else {
216 void (*c_func)(void *);
217 void *c_arg;
218 struct mtx *c_mtx;
219 int c_flags;
220
221 nextsoftcheck = TAILQ_NEXT(c, c_links.tqe);
222 TAILQ_REMOVE(bucket, c, c_links.tqe);
223 c_func = c->c_func;
224 c_arg = c->c_arg;
225 c_mtx = c->c_mtx;
226 c_flags = c->c_flags;
227 if (c->c_flags & CALLOUT_LOCAL_ALLOC) {
228 c->c_func = NULL;
229 c->c_flags = CALLOUT_LOCAL_ALLOC;
230 SLIST_INSERT_HEAD(&callfree, c,
231 c_links.sle);
232 curr_callout = NULL;
233 } else {
234 c->c_flags =
235 (c->c_flags & ~CALLOUT_PENDING);
236 curr_callout = c;
237 }
238 curr_cancelled = 0;
239 mtx_unlock_spin(&callout_lock);
240 if (c_mtx != NULL) {
241 mtx_lock(c_mtx);
242 /*
243 * The callout may have been cancelled
244 * while we switched locks.
245 */
246 if (curr_cancelled) {
247 mtx_unlock(c_mtx);
248 goto skip;
249 }
250 /* The callout cannot be stopped now. */
251 curr_cancelled = 1;
252
253 if (c_mtx == &Giant) {
254 gcalls++;
255 CTR3(KTR_CALLOUT,
256 "callout %p func %p arg %p",
257 c, c_func, c_arg);
258 } else {
259 mtxcalls++;
260 CTR3(KTR_CALLOUT, "callout mtx"
261 " %p func %p arg %p",
262 c, c_func, c_arg);
263 }
264 } else {
265 mpcalls++;
266 CTR3(KTR_CALLOUT,
267 "callout mpsafe %p func %p arg %p",
268 c, c_func, c_arg);
269 }
270 #ifdef DIAGNOSTIC
271 binuptime(&bt1);
272 #endif
273 THREAD_NO_SLEEPING();
274 c_func(c_arg);
275 THREAD_SLEEPING_OK();
276 #ifdef DIAGNOSTIC
277 binuptime(&bt2);
278 bintime_sub(&bt2, &bt1);
279 if (bt2.frac > maxdt) {
280 if (lastfunc != c_func ||
281 bt2.frac > maxdt * 2) {
282 bintime2timespec(&bt2, &ts2);
283 printf(
284 "Expensive timeout(9) function: %p(%p) %jd.%09ld s\n",
285 c_func, c_arg,
286 (intmax_t)ts2.tv_sec,
287 ts2.tv_nsec);
288 }
289 maxdt = bt2.frac;
290 lastfunc = c_func;
291 }
292 #endif
293 CTR1(KTR_CALLOUT, "callout %p finished", c);
294 if ((c_flags & CALLOUT_RETURNUNLOCKED) == 0)
295 mtx_unlock(c_mtx);
296 skip:
297 mtx_lock_spin(&callout_lock);
298 curr_callout = NULL;
299 if (callout_wait) {
300 /*
301 * There is someone waiting
302 * for the callout to complete.
303 */
304 callout_wait = 0;
305 mtx_unlock_spin(&callout_lock);
306 wakeup(&callout_wait);
307 mtx_lock_spin(&callout_lock);
308 }
309 steps = 0;
310 c = nextsoftcheck;
311 }
312 }
313 }
314 avg_depth += (depth * 1000 - avg_depth) >> 8;
315 avg_mpcalls += (mpcalls * 1000 - avg_mpcalls) >> 8;
316 avg_mtxcalls += (mtxcalls * 1000 - avg_mtxcalls) >> 8;
317 avg_gcalls += (gcalls * 1000 - avg_gcalls) >> 8;
318 nextsoftcheck = NULL;
319 mtx_unlock_spin(&callout_lock);
320 }
321
322 /*
323 * timeout --
324 * Execute a function after a specified length of time.
325 *
326 * untimeout --
327 * Cancel previous timeout function call.
328 *
329 * callout_handle_init --
330 * Initialize a handle so that using it with untimeout is benign.
331 *
332 * See AT&T BCI Driver Reference Manual for specification. This
333 * implementation differs from that one in that although an
334 * identification value is returned from timeout, the original
335 * arguments to timeout as well as the identifier are used to
336 * identify entries for untimeout.
337 */
338 struct callout_handle
339 timeout(ftn, arg, to_ticks)
340 timeout_t *ftn;
341 void *arg;
342 int to_ticks;
343 {
344 struct callout *new;
345 struct callout_handle handle;
346
347 mtx_lock_spin(&callout_lock);
348
349 /* Fill in the next free callout structure. */
350 new = SLIST_FIRST(&callfree);
351 if (new == NULL)
352 /* XXX Attempt to malloc first */
353 panic("timeout table full");
354 SLIST_REMOVE_HEAD(&callfree, c_links.sle);
355
356 callout_reset(new, to_ticks, ftn, arg);
357
358 handle.callout = new;
359 mtx_unlock_spin(&callout_lock);
360 return (handle);
361 }
362
363 void
364 untimeout(ftn, arg, handle)
365 timeout_t *ftn;
366 void *arg;
367 struct callout_handle handle;
368 {
369
370 /*
371 * Check for a handle that was initialized
372 * by callout_handle_init, but never used
373 * for a real timeout.
374 */
375 if (handle.callout == NULL)
376 return;
377
378 mtx_lock_spin(&callout_lock);
379 if (handle.callout->c_func == ftn && handle.callout->c_arg == arg)
380 callout_stop(handle.callout);
381 mtx_unlock_spin(&callout_lock);
382 }
383
384 void
385 callout_handle_init(struct callout_handle *handle)
386 {
387 handle->callout = NULL;
388 }
389
390 /*
391 * New interface; clients allocate their own callout structures.
392 *
393 * callout_reset() - establish or change a timeout
394 * callout_stop() - disestablish a timeout
395 * callout_init() - initialize a callout structure so that it can
396 * safely be passed to callout_reset() and callout_stop()
397 *
398 * <sys/callout.h> defines three convenience macros:
399 *
400 * callout_active() - returns truth if callout has not been stopped,
401 * drained, or deactivated since the last time the callout was
402 * reset.
403 * callout_pending() - returns truth if callout is still waiting for timeout
404 * callout_deactivate() - marks the callout as having been serviced
405 */
406 int
407 callout_reset(c, to_ticks, ftn, arg)
408 struct callout *c;
409 int to_ticks;
410 void (*ftn)(void *);
411 void *arg;
412 {
413 int cancelled = 0;
414
415 #ifdef notyet /* Some callers of timeout() do not hold Giant. */
416 if (c->c_mtx != NULL)
417 mtx_assert(c->c_mtx, MA_OWNED);
418 #endif
419
420 mtx_lock_spin(&callout_lock);
421 if (c == curr_callout) {
422 /*
423 * We're being asked to reschedule a callout which is
424 * currently in progress. If there is a mutex then we
425 * can cancel the callout if it has not really started.
426 */
427 if (c->c_mtx != NULL && !curr_cancelled)
428 cancelled = curr_cancelled = 1;
429 if (callout_wait) {
430 /*
431 * Someone has called callout_drain to kill this
432 * callout. Don't reschedule.
433 */
434 CTR4(KTR_CALLOUT, "%s %p func %p arg %p",
435 cancelled ? "cancelled" : "failed to cancel",
436 c, c->c_func, c->c_arg);
437 mtx_unlock_spin(&callout_lock);
438 return (cancelled);
439 }
440 }
441 if (c->c_flags & CALLOUT_PENDING) {
442 if (nextsoftcheck == c) {
443 nextsoftcheck = TAILQ_NEXT(c, c_links.tqe);
444 }
445 TAILQ_REMOVE(&callwheel[c->c_time & callwheelmask], c,
446 c_links.tqe);
447
448 cancelled = 1;
449
450 /*
451 * Part of the normal "stop a pending callout" process
452 * is to clear the CALLOUT_ACTIVE and CALLOUT_PENDING
453 * flags. We're not going to bother doing that here,
454 * because we're going to be setting those flags ten lines
455 * after this point, and we're holding callout_lock
456 * between now and then.
457 */
458 }
459
460 /*
461 * We could unlock callout_lock here and lock it again before the
462 * TAILQ_INSERT_TAIL, but there's no point since doing this setup
463 * doesn't take much time.
464 */
465 if (to_ticks <= 0)
466 to_ticks = 1;
467
468 c->c_arg = arg;
469 c->c_flags |= (CALLOUT_ACTIVE | CALLOUT_PENDING);
470 c->c_func = ftn;
471 c->c_time = ticks + to_ticks;
472 TAILQ_INSERT_TAIL(&callwheel[c->c_time & callwheelmask],
473 c, c_links.tqe);
474 CTR5(KTR_CALLOUT, "%sscheduled %p func %p arg %p in %d",
475 cancelled ? "re" : "", c, c->c_func, c->c_arg, to_ticks);
476 mtx_unlock_spin(&callout_lock);
477
478 return (cancelled);
479 }
480
481 int
482 _callout_stop_safe(c, safe)
483 struct callout *c;
484 int safe;
485 {
486 int use_mtx, sq_locked;
487
488 if (!safe && c->c_mtx != NULL) {
489 #ifdef notyet /* Some callers do not hold Giant for Giant-locked callouts. */
490 mtx_assert(c->c_mtx, MA_OWNED);
491 use_mtx = 1;
492 #else
493 use_mtx = mtx_owned(c->c_mtx);
494 #endif
495 } else {
496 use_mtx = 0;
497 }
498
499 sq_locked = 0;
500 again:
501 mtx_lock_spin(&callout_lock);
502 /*
503 * If the callout isn't pending, it's not on the queue, so
504 * don't attempt to remove it from the queue. We can try to
505 * stop it by other means however.
506 */
507 if (!(c->c_flags & CALLOUT_PENDING)) {
508 c->c_flags &= ~CALLOUT_ACTIVE;
509
510 /*
511 * If it wasn't on the queue and it isn't the current
512 * callout, then we can't stop it, so just bail.
513 */
514 if (c != curr_callout) {
515 CTR3(KTR_CALLOUT, "failed to stop %p func %p arg %p",
516 c, c->c_func, c->c_arg);
517 mtx_unlock_spin(&callout_lock);
518 if (sq_locked)
519 sleepq_release(&callout_wait);
520 return (0);
521 }
522
523 if (safe) {
524 /*
525 * The current callout is running (or just
526 * about to run) and blocking is allowed, so
527 * just wait for the current invocation to
528 * finish.
529 */
530 while (c == curr_callout) {
531
532 /*
533 * Use direct calls to sleepqueue interface
534 * instead of cv/msleep in order to avoid
535 * a LOR between callout_lock and sleepqueue
536 * chain spinlocks. This piece of code
537 * emulates a msleep_spin() call actually.
538 *
539 * If we already have the sleepqueue chain
540 * locked, then we can safely block. If we
541 * don't already have it locked, however,
542 * we have to drop the callout_lock to lock
543 * it. This opens several races, so we
544 * restart at the beginning once we have
545 * both locks. If nothing has changed, then
546 * we will end up back here with sq_locked
547 * set.
548 */
549 if (!sq_locked) {
550 mtx_unlock_spin(&callout_lock);
551 sleepq_lock(&callout_wait);
552 sq_locked = 1;
553 goto again;
554 }
555
556 callout_wait = 1;
557 DROP_GIANT();
558 mtx_unlock_spin(&callout_lock);
559 sleepq_add(&callout_wait,
560 &callout_lock.lock_object, "codrain",
561 SLEEPQ_SLEEP, 0);
562 sleepq_wait(&callout_wait);
563 sq_locked = 0;
564
565 /* Reacquire locks previously released. */
566 PICKUP_GIANT();
567 mtx_lock_spin(&callout_lock);
568 }
569 } else if (use_mtx && !curr_cancelled) {
570 /*
571 * The current callout is waiting for it's
572 * mutex which we hold. Cancel the callout
573 * and return. After our caller drops the
574 * mutex, the callout will be skipped in
575 * softclock().
576 */
577 curr_cancelled = 1;
578 CTR3(KTR_CALLOUT, "cancelled %p func %p arg %p",
579 c, c->c_func, c->c_arg);
580 mtx_unlock_spin(&callout_lock);
581 KASSERT(!sq_locked, ("sleepqueue chain locked"));
582 return (1);
583 }
584 CTR3(KTR_CALLOUT, "failed to stop %p func %p arg %p",
585 c, c->c_func, c->c_arg);
586 mtx_unlock_spin(&callout_lock);
587 KASSERT(!sq_locked, ("sleepqueue chain still locked"));
588 return (0);
589 }
590 if (sq_locked)
591 sleepq_release(&callout_wait);
592
593 c->c_flags &= ~(CALLOUT_ACTIVE | CALLOUT_PENDING);
594
595 if (nextsoftcheck == c) {
596 nextsoftcheck = TAILQ_NEXT(c, c_links.tqe);
597 }
598 TAILQ_REMOVE(&callwheel[c->c_time & callwheelmask], c, c_links.tqe);
599
600 CTR3(KTR_CALLOUT, "cancelled %p func %p arg %p",
601 c, c->c_func, c->c_arg);
602
603 if (c->c_flags & CALLOUT_LOCAL_ALLOC) {
604 c->c_func = NULL;
605 SLIST_INSERT_HEAD(&callfree, c, c_links.sle);
606 }
607 mtx_unlock_spin(&callout_lock);
608 return (1);
609 }
610
611 void
612 callout_init(c, mpsafe)
613 struct callout *c;
614 int mpsafe;
615 {
616 bzero(c, sizeof *c);
617 if (mpsafe) {
618 c->c_mtx = NULL;
619 c->c_flags = CALLOUT_RETURNUNLOCKED;
620 } else {
621 c->c_mtx = &Giant;
622 c->c_flags = 0;
623 }
624 }
625
626 void
627 callout_init_mtx(c, mtx, flags)
628 struct callout *c;
629 struct mtx *mtx;
630 int flags;
631 {
632 bzero(c, sizeof *c);
633 c->c_mtx = mtx;
634 KASSERT((flags & ~(CALLOUT_RETURNUNLOCKED)) == 0,
635 ("callout_init_mtx: bad flags %d", flags));
636 /* CALLOUT_RETURNUNLOCKED makes no sense without a mutex. */
637 KASSERT(mtx != NULL || (flags & CALLOUT_RETURNUNLOCKED) == 0,
638 ("callout_init_mtx: CALLOUT_RETURNUNLOCKED with no mutex"));
639 c->c_flags = flags & (CALLOUT_RETURNUNLOCKED);
640 }
641
642 #ifdef APM_FIXUP_CALLTODO
643 /*
644 * Adjust the kernel calltodo timeout list. This routine is used after
645 * an APM resume to recalculate the calltodo timer list values with the
646 * number of hz's we have been sleeping. The next hardclock() will detect
647 * that there are fired timers and run softclock() to execute them.
648 *
649 * Please note, I have not done an exhaustive analysis of what code this
650 * might break. I am motivated to have my select()'s and alarm()'s that
651 * have expired during suspend firing upon resume so that the applications
652 * which set the timer can do the maintanence the timer was for as close
653 * as possible to the originally intended time. Testing this code for a
654 * week showed that resuming from a suspend resulted in 22 to 25 timers
655 * firing, which seemed independant on whether the suspend was 2 hours or
656 * 2 days. Your milage may vary. - Ken Key <key@cs.utk.edu>
657 */
658 void
659 adjust_timeout_calltodo(time_change)
660 struct timeval *time_change;
661 {
662 register struct callout *p;
663 unsigned long delta_ticks;
664
665 /*
666 * How many ticks were we asleep?
667 * (stolen from tvtohz()).
668 */
669
670 /* Don't do anything */
671 if (time_change->tv_sec < 0)
672 return;
673 else if (time_change->tv_sec <= LONG_MAX / 1000000)
674 delta_ticks = (time_change->tv_sec * 1000000 +
675 time_change->tv_usec + (tick - 1)) / tick + 1;
676 else if (time_change->tv_sec <= LONG_MAX / hz)
677 delta_ticks = time_change->tv_sec * hz +
678 (time_change->tv_usec + (tick - 1)) / tick + 1;
679 else
680 delta_ticks = LONG_MAX;
681
682 if (delta_ticks > INT_MAX)
683 delta_ticks = INT_MAX;
684
685 /*
686 * Now rip through the timer calltodo list looking for timers
687 * to expire.
688 */
689
690 /* don't collide with softclock() */
691 mtx_lock_spin(&callout_lock);
692 for (p = calltodo.c_next; p != NULL; p = p->c_next) {
693 p->c_time -= delta_ticks;
694
695 /* Break if the timer had more time on it than delta_ticks */
696 if (p->c_time > 0)
697 break;
698
699 /* take back the ticks the timer didn't use (p->c_time <= 0) */
700 delta_ticks = -p->c_time;
701 }
702 mtx_unlock_spin(&callout_lock);
703
704 return;
705 }
706 #endif /* APM_FIXUP_CALLTODO */
Cache object: d033f9ca07d46a89aae3dc12bac0d410
|