1 /*-
2 * Copyright (c) 1982, 1986, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 4. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * From: @(#)kern_clock.c 8.5 (Berkeley) 1/21/94
35 */
36
37 #include <sys/cdefs.h>
38 __FBSDID("$FreeBSD: releng/5.4/sys/kern/kern_timeout.c 145335 2005-04-20 19:11:07Z cvs2svn $");
39
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/callout.h>
43 #include <sys/condvar.h>
44 #include <sys/kernel.h>
45 #include <sys/ktr.h>
46 #include <sys/lock.h>
47 #include <sys/mutex.h>
48 #include <sys/sysctl.h>
49
50 static int avg_depth;
51 SYSCTL_INT(_debug, OID_AUTO, to_avg_depth, CTLFLAG_RD, &avg_depth, 0,
52 "Average number of items examined per softclock call. Units = 1/1000");
53 static int avg_gcalls;
54 SYSCTL_INT(_debug, OID_AUTO, to_avg_gcalls, CTLFLAG_RD, &avg_gcalls, 0,
55 "Average number of Giant callouts made per softclock call. Units = 1/1000");
56 static int avg_mpcalls;
57 SYSCTL_INT(_debug, OID_AUTO, to_avg_mpcalls, CTLFLAG_RD, &avg_mpcalls, 0,
58 "Average number of MP callouts made per softclock call. Units = 1/1000");
59 /*
60 * TODO:
61 * allocate more timeout table slots when table overflows.
62 */
63
64 /* Exported to machdep.c and/or kern_clock.c. */
65 struct callout *callout;
66 struct callout_list callfree;
67 int callwheelsize, callwheelbits, callwheelmask;
68 struct callout_tailq *callwheel;
69 int softticks; /* Like ticks, but for softclock(). */
70 struct mtx callout_lock;
71 #ifdef DIAGNOSTIC
72 struct mtx dont_sleep_in_callout;
73 #endif
74
75 static struct callout *nextsoftcheck; /* Next callout to be checked. */
76
77 /**
78 * Locked by callout_lock:
79 * curr_callout - If a callout is in progress, it is curr_callout.
80 * If curr_callout is non-NULL, threads waiting on
81 * callout_wait will be woken up as soon as the
82 * relevant callout completes.
83 * curr_cancelled - Changing to 1 with both callout_lock and Giant held
84 * guarantees that the current callout will not run.
85 * The softclock() function sets this to 0 before it
86 * drops callout_lock to acquire Giant, and it calls
87 * the handler only if curr_cancelled still 0 when
88 * Giant is successfully acquired.
89 * wakeup_ctr - Incremented every time a thread wants to wait
90 * for a callout to complete. Modified only when
91 * curr_callout is non-NULL.
92 * wakeup_needed - If a thread is waiting on callout_wait, then
93 * wakeup_needed is nonzero. Increased only when
94 * cutt_callout is non-NULL.
95 */
96 static struct callout *curr_callout;
97 static int curr_cancelled;
98 static int wakeup_ctr;
99 static int wakeup_needed;
100
101 /**
102 * Locked by callout_wait_lock:
103 * callout_wait - If wakeup_needed is set, callout_wait will be
104 * triggered after the current callout finishes.
105 * wakeup_done_ctr - Set to the current value of wakeup_ctr after
106 * callout_wait is triggered.
107 */
108 static struct mtx callout_wait_lock;
109 static struct cv callout_wait;
110 static int wakeup_done_ctr;
111
112 /*
113 * kern_timeout_callwheel_alloc() - kernel low level callwheel initialization
114 *
115 * This code is called very early in the kernel initialization sequence,
116 * and may be called more then once.
117 */
118 caddr_t
119 kern_timeout_callwheel_alloc(caddr_t v)
120 {
121 /*
122 * Calculate callout wheel size
123 */
124 for (callwheelsize = 1, callwheelbits = 0;
125 callwheelsize < ncallout;
126 callwheelsize <<= 1, ++callwheelbits)
127 ;
128 callwheelmask = callwheelsize - 1;
129
130 callout = (struct callout *)v;
131 v = (caddr_t)(callout + ncallout);
132 callwheel = (struct callout_tailq *)v;
133 v = (caddr_t)(callwheel + callwheelsize);
134 return(v);
135 }
136
137 /*
138 * kern_timeout_callwheel_init() - initialize previously reserved callwheel
139 * space.
140 *
141 * This code is called just once, after the space reserved for the
142 * callout wheel has been finalized.
143 */
144 void
145 kern_timeout_callwheel_init(void)
146 {
147 int i;
148
149 SLIST_INIT(&callfree);
150 for (i = 0; i < ncallout; i++) {
151 callout_init(&callout[i], 0);
152 callout[i].c_flags = CALLOUT_LOCAL_ALLOC;
153 SLIST_INSERT_HEAD(&callfree, &callout[i], c_links.sle);
154 }
155 for (i = 0; i < callwheelsize; i++) {
156 TAILQ_INIT(&callwheel[i]);
157 }
158 mtx_init(&callout_lock, "callout", NULL, MTX_SPIN | MTX_RECURSE);
159 #ifdef DIAGNOSTIC
160 mtx_init(&dont_sleep_in_callout, "dont_sleep_in_callout", NULL, MTX_DEF);
161 #endif
162 mtx_init(&callout_wait_lock, "callout_wait_lock", NULL, MTX_DEF);
163 cv_init(&callout_wait, "callout_wait");
164 }
165
166 /*
167 * The callout mechanism is based on the work of Adam M. Costello and
168 * George Varghese, published in a technical report entitled "Redesigning
169 * the BSD Callout and Timer Facilities" and modified slightly for inclusion
170 * in FreeBSD by Justin T. Gibbs. The original work on the data structures
171 * used in this implementation was published by G. Varghese and T. Lauck in
172 * the paper "Hashed and Hierarchical Timing Wheels: Data Structures for
173 * the Efficient Implementation of a Timer Facility" in the Proceedings of
174 * the 11th ACM Annual Symposium on Operating Systems Principles,
175 * Austin, Texas Nov 1987.
176 */
177
178 /*
179 * Software (low priority) clock interrupt.
180 * Run periodic events from timeout queue.
181 */
182 void
183 softclock(void *dummy)
184 {
185 struct callout *c;
186 struct callout_tailq *bucket;
187 int curticks;
188 int steps; /* #steps since we last allowed interrupts */
189 int depth;
190 int mpcalls;
191 int gcalls;
192 int wakeup_cookie;
193 #ifdef DIAGNOSTIC
194 struct bintime bt1, bt2;
195 struct timespec ts2;
196 static uint64_t maxdt = 36893488147419102LL; /* 2 msec */
197 static timeout_t *lastfunc;
198 #endif
199
200 #ifndef MAX_SOFTCLOCK_STEPS
201 #define MAX_SOFTCLOCK_STEPS 100 /* Maximum allowed value of steps. */
202 #endif /* MAX_SOFTCLOCK_STEPS */
203
204 mpcalls = 0;
205 gcalls = 0;
206 depth = 0;
207 steps = 0;
208 mtx_lock_spin(&callout_lock);
209 while (softticks != ticks) {
210 softticks++;
211 /*
212 * softticks may be modified by hard clock, so cache
213 * it while we work on a given bucket.
214 */
215 curticks = softticks;
216 bucket = &callwheel[curticks & callwheelmask];
217 c = TAILQ_FIRST(bucket);
218 while (c) {
219 depth++;
220 if (c->c_time != curticks) {
221 c = TAILQ_NEXT(c, c_links.tqe);
222 ++steps;
223 if (steps >= MAX_SOFTCLOCK_STEPS) {
224 nextsoftcheck = c;
225 /* Give interrupts a chance. */
226 mtx_unlock_spin(&callout_lock);
227 ; /* nothing */
228 mtx_lock_spin(&callout_lock);
229 c = nextsoftcheck;
230 steps = 0;
231 }
232 } else {
233 void (*c_func)(void *);
234 void *c_arg;
235 int c_flags;
236
237 nextsoftcheck = TAILQ_NEXT(c, c_links.tqe);
238 TAILQ_REMOVE(bucket, c, c_links.tqe);
239 c_func = c->c_func;
240 c_arg = c->c_arg;
241 c_flags = c->c_flags;
242 c->c_func = NULL;
243 if (c->c_flags & CALLOUT_LOCAL_ALLOC) {
244 c->c_flags = CALLOUT_LOCAL_ALLOC;
245 SLIST_INSERT_HEAD(&callfree, c,
246 c_links.sle);
247 curr_callout = NULL;
248 } else {
249 c->c_flags =
250 (c->c_flags & ~CALLOUT_PENDING);
251 curr_callout = c;
252 }
253 curr_cancelled = 0;
254 mtx_unlock_spin(&callout_lock);
255 if (!(c_flags & CALLOUT_MPSAFE)) {
256 mtx_lock(&Giant);
257 /*
258 * The callout may have been cancelled
259 * while we switched locks.
260 */
261 if (curr_cancelled) {
262 mtx_unlock(&Giant);
263 mtx_lock_spin(&callout_lock);
264 goto done_locked;
265 }
266 /* The callout cannot be stopped now. */
267 curr_cancelled = 1;
268 gcalls++;
269 CTR1(KTR_CALLOUT, "callout %p", c_func);
270 } else {
271 mpcalls++;
272 CTR1(KTR_CALLOUT, "callout mpsafe %p",
273 c_func);
274 }
275 #ifdef DIAGNOSTIC
276 binuptime(&bt1);
277 mtx_lock(&dont_sleep_in_callout);
278 #endif
279 c_func(c_arg);
280 #ifdef DIAGNOSTIC
281 mtx_unlock(&dont_sleep_in_callout);
282 binuptime(&bt2);
283 bintime_sub(&bt2, &bt1);
284 if (bt2.frac > maxdt) {
285 if (lastfunc != c_func ||
286 bt2.frac > maxdt * 2) {
287 bintime2timespec(&bt2, &ts2);
288 printf(
289 "Expensive timeout(9) function: %p(%p) %jd.%09ld s\n",
290 c_func, c_arg,
291 (intmax_t)ts2.tv_sec,
292 ts2.tv_nsec);
293 }
294 maxdt = bt2.frac;
295 lastfunc = c_func;
296 }
297 #endif
298 if (!(c_flags & CALLOUT_MPSAFE))
299 mtx_unlock(&Giant);
300 mtx_lock_spin(&callout_lock);
301 done_locked:
302 curr_callout = NULL;
303 if (wakeup_needed) {
304 /*
305 * There might be someone waiting
306 * for the callout to complete.
307 */
308 wakeup_cookie = wakeup_ctr;
309 mtx_unlock_spin(&callout_lock);
310 mtx_lock(&callout_wait_lock);
311 cv_broadcast(&callout_wait);
312 wakeup_done_ctr = wakeup_cookie;
313 mtx_unlock(&callout_wait_lock);
314 mtx_lock_spin(&callout_lock);
315 wakeup_needed = 0;
316 }
317 steps = 0;
318 c = nextsoftcheck;
319 }
320 }
321 }
322 avg_depth += (depth * 1000 - avg_depth) >> 8;
323 avg_mpcalls += (mpcalls * 1000 - avg_mpcalls) >> 8;
324 avg_gcalls += (gcalls * 1000 - avg_gcalls) >> 8;
325 nextsoftcheck = NULL;
326 mtx_unlock_spin(&callout_lock);
327 }
328
329 /*
330 * timeout --
331 * Execute a function after a specified length of time.
332 *
333 * untimeout --
334 * Cancel previous timeout function call.
335 *
336 * callout_handle_init --
337 * Initialize a handle so that using it with untimeout is benign.
338 *
339 * See AT&T BCI Driver Reference Manual for specification. This
340 * implementation differs from that one in that although an
341 * identification value is returned from timeout, the original
342 * arguments to timeout as well as the identifier are used to
343 * identify entries for untimeout.
344 */
345 struct callout_handle
346 timeout(ftn, arg, to_ticks)
347 timeout_t *ftn;
348 void *arg;
349 int to_ticks;
350 {
351 struct callout *new;
352 struct callout_handle handle;
353
354 mtx_lock_spin(&callout_lock);
355
356 /* Fill in the next free callout structure. */
357 new = SLIST_FIRST(&callfree);
358 if (new == NULL)
359 /* XXX Attempt to malloc first */
360 panic("timeout table full");
361 SLIST_REMOVE_HEAD(&callfree, c_links.sle);
362
363 callout_reset(new, to_ticks, ftn, arg);
364
365 handle.callout = new;
366 mtx_unlock_spin(&callout_lock);
367 return (handle);
368 }
369
370 void
371 untimeout(ftn, arg, handle)
372 timeout_t *ftn;
373 void *arg;
374 struct callout_handle handle;
375 {
376
377 /*
378 * Check for a handle that was initialized
379 * by callout_handle_init, but never used
380 * for a real timeout.
381 */
382 if (handle.callout == NULL)
383 return;
384
385 mtx_lock_spin(&callout_lock);
386 if (handle.callout->c_func == ftn && handle.callout->c_arg == arg)
387 callout_stop(handle.callout);
388 mtx_unlock_spin(&callout_lock);
389 }
390
391 void
392 callout_handle_init(struct callout_handle *handle)
393 {
394 handle->callout = NULL;
395 }
396
397 /*
398 * New interface; clients allocate their own callout structures.
399 *
400 * callout_reset() - establish or change a timeout
401 * callout_stop() - disestablish a timeout
402 * callout_init() - initialize a callout structure so that it can
403 * safely be passed to callout_reset() and callout_stop()
404 *
405 * <sys/callout.h> defines three convenience macros:
406 *
407 * callout_active() - returns truth if callout has not been serviced
408 * callout_pending() - returns truth if callout is still waiting for timeout
409 * callout_deactivate() - marks the callout as having been serviced
410 */
411 void
412 callout_reset(c, to_ticks, ftn, arg)
413 struct callout *c;
414 int to_ticks;
415 void (*ftn)(void *);
416 void *arg;
417 {
418
419 mtx_lock_spin(&callout_lock);
420 if (c == curr_callout) {
421 /*
422 * We're being asked to reschedule a callout which is
423 * currently in progress. If there is a mutex then we
424 * can cancel the callout if it has not really started.
425 */
426 if ((c->c_flags & CALLOUT_MPSAFE) == 0 && !curr_cancelled)
427 curr_cancelled = 1;
428 if (wakeup_needed) {
429 /*
430 * Someone has called callout_drain to kill this
431 * callout. Don't reschedule.
432 */
433 mtx_unlock_spin(&callout_lock);
434 return;
435 }
436 }
437 if (c->c_flags & CALLOUT_PENDING) {
438 if (nextsoftcheck == c) {
439 nextsoftcheck = TAILQ_NEXT(c, c_links.tqe);
440 }
441 TAILQ_REMOVE(&callwheel[c->c_time & callwheelmask], c,
442 c_links.tqe);
443
444 /*
445 * Part of the normal "stop a pending callout" process
446 * is to clear the CALLOUT_ACTIVE and CALLOUT_PENDING
447 * flags. We're not going to bother doing that here,
448 * because we're going to be setting those flags ten lines
449 * after this point, and we're holding callout_lock
450 * between now and then.
451 */
452 }
453
454 /*
455 * We could unlock callout_lock here and lock it again before the
456 * TAILQ_INSERT_TAIL, but there's no point since doing this setup
457 * doesn't take much time.
458 */
459 if (to_ticks <= 0)
460 to_ticks = 1;
461
462 c->c_arg = arg;
463 c->c_flags |= (CALLOUT_ACTIVE | CALLOUT_PENDING);
464 c->c_func = ftn;
465 c->c_time = ticks + to_ticks;
466 TAILQ_INSERT_TAIL(&callwheel[c->c_time & callwheelmask],
467 c, c_links.tqe);
468 mtx_unlock_spin(&callout_lock);
469 }
470
471 int
472 _callout_stop_safe(c, safe)
473 struct callout *c;
474 int safe;
475 {
476 int use_mtx, wakeup_cookie;
477
478 if (!safe && (c->c_flags & CALLOUT_MPSAFE) == 0) {
479 use_mtx = mtx_owned(&Giant);
480 } else {
481 use_mtx = 0;
482 }
483
484 mtx_lock_spin(&callout_lock);
485 /*
486 * Don't attempt to delete a callout that's not on the queue.
487 */
488 if (!(c->c_flags & CALLOUT_PENDING)) {
489 c->c_flags &= ~CALLOUT_ACTIVE;
490 if (c != curr_callout) {
491 mtx_unlock_spin(&callout_lock);
492 return (0);
493 }
494 if (safe) {
495 /* We need to wait until the callout is finished. */
496 wakeup_needed = 1;
497 wakeup_cookie = wakeup_ctr++;
498 mtx_unlock_spin(&callout_lock);
499 mtx_lock(&callout_wait_lock);
500
501 /*
502 * Check to make sure that softclock() didn't
503 * do the wakeup in between our dropping
504 * callout_lock and picking up callout_wait_lock
505 */
506 if (wakeup_cookie - wakeup_done_ctr > 0)
507 cv_wait(&callout_wait, &callout_wait_lock);
508
509 mtx_unlock(&callout_wait_lock);
510 } else if (use_mtx && !curr_cancelled) {
511 /* We can stop the callout before it runs. */
512 curr_cancelled = 1;
513 mtx_unlock_spin(&callout_lock);
514 return (1);
515 } else
516 mtx_unlock_spin(&callout_lock);
517 return (0);
518 }
519 c->c_flags &= ~(CALLOUT_ACTIVE | CALLOUT_PENDING);
520
521 if (nextsoftcheck == c) {
522 nextsoftcheck = TAILQ_NEXT(c, c_links.tqe);
523 }
524 TAILQ_REMOVE(&callwheel[c->c_time & callwheelmask], c, c_links.tqe);
525 c->c_func = NULL;
526
527 if (c->c_flags & CALLOUT_LOCAL_ALLOC) {
528 SLIST_INSERT_HEAD(&callfree, c, c_links.sle);
529 }
530 mtx_unlock_spin(&callout_lock);
531 return (1);
532 }
533
534 void
535 callout_init(c, mpsafe)
536 struct callout *c;
537 int mpsafe;
538 {
539 bzero(c, sizeof *c);
540 if (mpsafe)
541 c->c_flags |= CALLOUT_MPSAFE;
542 }
543
544 #ifdef APM_FIXUP_CALLTODO
545 /*
546 * Adjust the kernel calltodo timeout list. This routine is used after
547 * an APM resume to recalculate the calltodo timer list values with the
548 * number of hz's we have been sleeping. The next hardclock() will detect
549 * that there are fired timers and run softclock() to execute them.
550 *
551 * Please note, I have not done an exhaustive analysis of what code this
552 * might break. I am motivated to have my select()'s and alarm()'s that
553 * have expired during suspend firing upon resume so that the applications
554 * which set the timer can do the maintanence the timer was for as close
555 * as possible to the originally intended time. Testing this code for a
556 * week showed that resuming from a suspend resulted in 22 to 25 timers
557 * firing, which seemed independant on whether the suspend was 2 hours or
558 * 2 days. Your milage may vary. - Ken Key <key@cs.utk.edu>
559 */
560 void
561 adjust_timeout_calltodo(time_change)
562 struct timeval *time_change;
563 {
564 register struct callout *p;
565 unsigned long delta_ticks;
566
567 /*
568 * How many ticks were we asleep?
569 * (stolen from tvtohz()).
570 */
571
572 /* Don't do anything */
573 if (time_change->tv_sec < 0)
574 return;
575 else if (time_change->tv_sec <= LONG_MAX / 1000000)
576 delta_ticks = (time_change->tv_sec * 1000000 +
577 time_change->tv_usec + (tick - 1)) / tick + 1;
578 else if (time_change->tv_sec <= LONG_MAX / hz)
579 delta_ticks = time_change->tv_sec * hz +
580 (time_change->tv_usec + (tick - 1)) / tick + 1;
581 else
582 delta_ticks = LONG_MAX;
583
584 if (delta_ticks > INT_MAX)
585 delta_ticks = INT_MAX;
586
587 /*
588 * Now rip through the timer calltodo list looking for timers
589 * to expire.
590 */
591
592 /* don't collide with softclock() */
593 mtx_lock_spin(&callout_lock);
594 for (p = calltodo.c_next; p != NULL; p = p->c_next) {
595 p->c_time -= delta_ticks;
596
597 /* Break if the timer had more time on it than delta_ticks */
598 if (p->c_time > 0)
599 break;
600
601 /* take back the ticks the timer didn't use (p->c_time <= 0) */
602 delta_ticks = -p->c_time;
603 }
604 mtx_unlock_spin(&callout_lock);
605
606 return;
607 }
608 #endif /* APM_FIXUP_CALLTODO */
Cache object: 999bd4325a5c4cf1b6d1db68209c2a21
|