FreeBSD/Linux Kernel Cross Reference
sys/kern/mach_clock.c
1 /*
2 * Mach Operating System
3 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
4 * All Rights Reserved.
5 *
6 * Permission to use, copy, modify and distribute this software and its
7 * documentation is hereby granted, provided that both the copyright
8 * notice and this permission notice appear in all copies of the
9 * software, derivative works or modified versions, and any portions
10 * thereof, and that both notices appear in supporting documentation.
11 *
12 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
13 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
14 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
15 *
16 * Carnegie Mellon requests users of this software to return to
17 *
18 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
19 * School of Computer Science
20 * Carnegie Mellon University
21 * Pittsburgh PA 15213-3890
22 *
23 * any improvements or extensions that they make and grant Carnegie Mellon
24 * the rights to redistribute these changes.
25 */
26 /*
27 * HISTORY
28 * $Log: mach_clock.c,v $
29 * Revision 2.26 93/08/03 12:31:12 mrt
30 * [93/08/02 16:51:35 bershad]
31 *
32 * Flavor support for sampling.
33 * [93/07/30 10:21:52 bershad]
34 *
35 * Revision 2.25 93/05/15 18:53:40 mrt
36 * machparam.h -> machspl.h
37 *
38 * Revision 2.24 93/05/10 17:47:45 rvb
39 * Rudy asked for this change for xntp and dbg thought it would
40 * do no harm. (I think that HZ is pretty always 100 so that this
41 * code and the previous version always go the tickadj = 1; route.)
42 * [93/05/10 15:52:26 rvb]
43 *
44 * Revision 2.23 93/03/09 10:55:07 danner
45 * Removed gratuitous casts to ints.
46 * [93/03/05 af]
47 *
48 * Revision 2.22 93/01/27 09:33:55 danner
49 * take_pc_sample() is void.
50 * [93/01/25 jfriedl]
51 *
52 * Revision 2.21 93/01/24 13:19:29 danner
53 * Add pc sampling from C Maeda. Make it conditional on thread or
54 * task sampling being enabled.
55 * [93/01/12 rvb]
56 *
57 * Revision 2.20 93/01/14 17:35:12 danner
58 * Proper spl typing.
59 * [92/12/01 af]
60 *
61 * Revision 2.19 92/08/03 17:38:09 jfriedl
62 * removed silly prototypes
63 * [92/08/02 jfriedl]
64 *
65 * Revision 2.18 92/05/21 17:14:33 jfriedl
66 * Added void to fcns that yet needed it.
67 * [92/05/16 jfriedl]
68 *
69 * Revision 2.17 92/03/10 16:26:41 jsb
70 * Removed NORMA_IPC code.
71 * [92/01/17 11:38:55 jsb]
72 *
73 * Revision 2.16 91/08/03 18:18:56 jsb
74 * NORMA_IPC: added call to netipc_timeout in hardclock.
75 * [91/07/24 22:30:22 jsb]
76 *
77 * Revision 2.15 91/07/31 17:45:57 dbg
78 * Fixed timeout race. Implemented host_adjust_time.
79 * [91/07/30 17:03:54 dbg]
80 *
81 * Revision 2.14 91/05/18 14:32:29 rpd
82 * Fixed timeout/untimeout to use a fixed-size array of timers
83 * instead of a zone.
84 * [91/03/31 rpd]
85 * Fixed host_set_time to update the mapped time value.
86 * Changed the mapped time value to include a check field.
87 * [91/03/19 rpd]
88 *
89 * Revision 2.13 91/05/14 16:44:06 mrt
90 * Correcting copyright
91 *
92 * Revision 2.12 91/03/16 14:50:45 rpd
93 * Updated for new kmem_alloc interface.
94 * [91/03/03 rpd]
95 * Use counter macros to track thread and stack usage.
96 * [91/03/01 17:43:15 rpd]
97 *
98 * Revision 2.11 91/02/05 17:27:45 mrt
99 * Changed to new Mach copyright
100 * [91/02/01 16:14:47 mrt]
101 *
102 * Revision 2.10 91/01/08 15:16:22 rpd
103 * Added continuation argument to thread_block.
104 * [90/12/08 rpd]
105 *
106 * Revision 2.9 90/11/05 14:31:27 rpd
107 * Unified untimeout and untimeout_try.
108 * [90/10/29 rpd]
109 *
110 * Revision 2.8 90/10/12 18:07:29 rpd
111 * Fixed calls to thread_bind in host_set_time.
112 * Fix from Philippe Bernadat.
113 * [90/10/10 rpd]
114 *
115 * Revision 2.7 90/09/09 14:32:18 rpd
116 * Use decl_simple_lock_data.
117 * [90/08/30 rpd]
118 *
119 * Revision 2.6 90/08/27 22:02:48 dbg
120 * Add untimeout_try for multiprocessors. Reduce lint.
121 * [90/07/17 dbg]
122 *
123 * Revision 2.5 90/06/02 14:55:04 rpd
124 * Converted to new IPC and new host port technology.
125 * [90/03/26 22:10:04 rpd]
126 *
127 * Revision 2.4 90/01/11 11:43:31 dbg
128 * Switch to master CPU in host_set_time.
129 * [90/01/03 dbg]
130 *
131 * Revision 2.3 89/08/09 14:33:09 rwd
132 * Include mach/vm_param.h and use PAGE_SIZE instead of NBPG.
133 * [89/08/08 rwd]
134 * Removed timemmap to machine/model_dep.c
135 * [89/08/08 rwd]
136 *
137 * Revision 2.2 89/08/05 16:07:11 rwd
138 * Added mappable time code.
139 * [89/08/02 rwd]
140 *
141 * 14-Jan-89 David Golub (dbg) at Carnegie-Mellon University
142 * Split into two new files: mach_clock (for timing) and priority
143 * (for priority calculation).
144 *
145 * 8-Dec-88 David Golub (dbg) at Carnegie-Mellon University
146 * Use sentinel for root of timer queue, to speed up search loops.
147 *
148 * 30-Jun-88 David Golub (dbg) at Carnegie-Mellon University
149 * Created.
150 *
151 */
152 /*
153 * File: clock_prim.c
154 * Author: Avadis Tevanian, Jr.
155 * Date: 1986
156 *
157 * Clock primitives.
158 */
159 #include <cpus.h>
160 #include <mach_pcsample.h>
161 #include <stat_time.h>
162
163 #include <mach/boolean.h>
164 #include <mach/machine.h>
165 #include <mach/time_value.h>
166 #include <mach/vm_param.h>
167 #include <mach/vm_prot.h>
168 #include <mach/pc_sample.h>
169 #include <kern/counters.h>
170 #include <kern/cpu_number.h>
171 #include <kern/host.h>
172 #include <kern/lock.h>
173 #include <kern/mach_param.h>
174 #include <kern/processor.h>
175 #include <kern/sched.h>
176 #include <kern/sched_prim.h>
177 #include <kern/thread.h>
178 #include <kern/time_out.h>
179 #include <kern/time_stamp.h>
180 #include <vm/vm_kern.h>
181 #include <sys/time.h>
182 #include <machine/mach_param.h> /* HZ */
183 #include <machine/machspl.h>
184
185
186
187 extern void thread_quantum_update();
188
189 void softclock(); /* forward */
190
191 int hz = HZ; /* number of ticks per second */
192 int tick = (1000000 / HZ); /* number of usec per tick */
193 time_value_t time = { 0, 0 }; /* time since bootup (uncorrected) */
194 unsigned long elapsed_ticks = 0; /* ticks elapsed since bootup */
195
196 int timedelta = 0;
197 int tickdelta = 0;
198
199 #if HZ > 500
200 int tickadj = 1; /* can adjust HZ usecs per second */
201 #else
202 int tickadj = 500 / HZ; /* can adjust 100 usecs per second */
203 #endif
204 int bigadj = 1000000; /* adjust 10*tickadj if adjustment
205 > bigadj */
206
207 /*
208 * This update protocol, with a check value, allows
209 * do {
210 * secs = mtime->seconds;
211 * usecs = mtime->microseconds;
212 * } while (secs != mtime->check_seconds);
213 * to read the time correctly. (On a multiprocessor this assumes
214 * that processors see each other's writes in the correct order.
215 * We may have to insert fence operations.)
216 */
217
218 mapped_time_value_t *mtime = 0;
219
220 #define update_mapped_time(time) \
221 MACRO_BEGIN \
222 if (mtime != 0) { \
223 mtime->check_seconds = (time)->seconds; \
224 mtime->microseconds = (time)->microseconds; \
225 mtime->seconds = (time)->seconds; \
226 } \
227 MACRO_END
228
229 decl_simple_lock_data(, timer_lock) /* lock for ... */
230 timer_elt_data_t timer_head; /* ordered list of timeouts */
231 /* (doubles as end-of-list) */
232
233 /*
234 * Handle clock interrupts.
235 *
236 * The clock interrupt is assumed to be called at a (more or less)
237 * constant rate. The rate must be identical on all CPUS (XXX - fix).
238 *
239 * Usec is the number of microseconds that have elapsed since the
240 * last clock tick. It may be constant or computed, depending on
241 * the accuracy of the hardware clock.
242 *
243 */
244 void clock_interrupt(usec, usermode, basepri)
245 register int usec; /* microseconds per tick */
246 boolean_t usermode; /* executing user code */
247 boolean_t basepri; /* at base priority */
248 {
249 register int my_cpu = cpu_number();
250 register thread_t thread = current_thread();
251
252 counter(c_clock_ticks++);
253 counter(c_threads_total += c_threads_current);
254 counter(c_stacks_total += c_stacks_current);
255
256 #if STAT_TIME
257 /*
258 * Increment the thread time, if using
259 * statistical timing.
260 */
261 if (usermode) {
262 timer_bump(&thread->user_timer, usec);
263 }
264 else {
265 timer_bump(&thread->system_timer, usec);
266 }
267 #endif STAT_TIME
268
269 /*
270 * Increment the CPU time statistics.
271 */
272 {
273 register int state;
274
275 if (usermode)
276 state = CPU_STATE_USER;
277 else if (!cpu_idle(my_cpu))
278 state = CPU_STATE_SYSTEM;
279 else
280 state = CPU_STATE_IDLE;
281
282 machine_slot[my_cpu].cpu_ticks[state]++;
283
284 /*
285 * Adjust the thread's priority and check for
286 * quantum expiration.
287 */
288
289 thread_quantum_update(my_cpu, thread, 1, state);
290 }
291
292 #if MACH_SAMPLE > 0
293 /*
294 * Take a sample of pc for the user if required.
295 * This had better be MP safe. It might be interesting
296 * to keep track of cpu in the sample.
297 */
298 if (usermode) {
299 if (thread->pc_sample.buffer &&
300 (thread->pc_sample.sampletypes & SAMPLED_PC_PERIODIC))
301 take_pc_sample(thread, &thread->pc_sample,
302 SAMPLED_PC_PERIODIC);
303 if (thread->task->pc_sample.buffer &&
304 (thread->task->pc_sample.sampletypes & SAMPLED_PC_PERIODIC))
305 take_pc_sample(thread, &thread->task->pc_sample,
306 SAMPLED_PC_PERIODIC);
307 }
308 #endif /* MACH_PCSAMPLE > 0 */
309
310 /*
311 * Time-of-day and time-out list are updated only
312 * on the master CPU.
313 */
314 if (my_cpu == master_cpu) {
315
316 register spl_t s;
317 register timer_elt_t telt;
318 boolean_t needsoft = FALSE;
319
320 #if TS_FORMAT == 1
321 /*
322 * Increment the tick count for the timestamping routine.
323 */
324 ts_tick_count++;
325 #endif TS_FORMAT == 1
326
327 /*
328 * Update the tick count since bootup, and handle
329 * timeouts.
330 */
331
332 s = splsched();
333 simple_lock(&timer_lock);
334
335 elapsed_ticks++;
336
337 telt = (timer_elt_t)queue_first(&timer_head.chain);
338 if (telt->ticks <= elapsed_ticks)
339 needsoft = TRUE;
340 simple_unlock(&timer_lock);
341 splx(s);
342
343 /*
344 * Increment the time-of-day clock.
345 */
346 if (timedelta == 0) {
347 time_value_add_usec(&time, usec);
348 }
349 else {
350 register int delta;
351
352 if (timedelta < 0) {
353 delta = usec - tickdelta;
354 timedelta += tickdelta;
355 }
356 else {
357 delta = usec + tickdelta;
358 timedelta -= tickdelta;
359 }
360 time_value_add_usec(&time, delta);
361 }
362 update_mapped_time(&time);
363
364 /*
365 * Schedule soft-interupt for timeout if needed
366 */
367 if (needsoft) {
368 if (basepri) {
369 (void) splsoftclock();
370 softclock();
371 }
372 else {
373 setsoftclock();
374 }
375 }
376 }
377 }
378
379 /*
380 * There is a nasty race between softclock and reset_timeout.
381 * For example, scheduling code looks at timer_set and calls
382 * reset_timeout, thinking the timer is set. However, softclock
383 * has already removed the timer but hasn't called thread_timeout
384 * yet.
385 *
386 * Interim solution: We initialize timers after pulling
387 * them out of the queue, so a race with reset_timeout won't
388 * hurt. The timeout functions (eg, thread_timeout,
389 * thread_depress_timeout) check timer_set/depress_priority
390 * to see if the timer has been cancelled and if so do nothing.
391 *
392 * This still isn't correct. For example, softclock pulls a
393 * timer off the queue, then thread_go resets timer_set (but
394 * reset_timeout does nothing), then thread_set_timeout puts the
395 * timer back on the queue and sets timer_set, then
396 * thread_timeout finally runs and clears timer_set, then
397 * thread_set_timeout tries to put the timer on the queue again
398 * and corrupts it.
399 */
400
401 void softclock()
402 {
403 /*
404 * Handle timeouts.
405 */
406 spl_t s;
407 register timer_elt_t telt;
408 register int (*fcn)();
409 register char *param;
410
411 while (TRUE) {
412 s = splsched();
413 simple_lock(&timer_lock);
414 telt = (timer_elt_t) queue_first(&timer_head.chain);
415 if (telt->ticks > elapsed_ticks) {
416 simple_unlock(&timer_lock);
417 splx(s);
418 break;
419 }
420 fcn = telt->fcn;
421 param = telt->param;
422
423 remqueue(&timer_head.chain, (queue_entry_t)telt);
424 telt->set = TELT_UNSET;
425 simple_unlock(&timer_lock);
426 splx(s);
427
428 assert(fcn != 0);
429 (*fcn)(param);
430 }
431 }
432
433 /*
434 * Set timeout.
435 *
436 * Parameters:
437 * telt timer element. Function and param are already set.
438 * interval time-out interval, in hz.
439 */
440 void set_timeout(telt, interval)
441 register timer_elt_t telt; /* already loaded */
442 register unsigned int interval;
443 {
444 spl_t s;
445 register timer_elt_t next;
446
447 s = splsched();
448 simple_lock(&timer_lock);
449
450 interval += elapsed_ticks;
451
452 for (next = (timer_elt_t)queue_first(&timer_head.chain);
453 ;
454 next = (timer_elt_t)queue_next((queue_entry_t)next)) {
455
456 if (next->ticks > interval)
457 break;
458 }
459 telt->ticks = interval;
460 /*
461 * Insert new timer element before 'next'
462 * (after 'next'->prev)
463 */
464 insque((queue_entry_t) telt, ((queue_entry_t)next)->prev);
465 telt->set = TELT_SET;
466 simple_unlock(&timer_lock);
467 splx(s);
468 }
469
470 boolean_t reset_timeout(telt)
471 register timer_elt_t telt;
472 {
473 spl_t s;
474
475 s = splsched();
476 simple_lock(&timer_lock);
477 if (telt->set) {
478 remqueue(&timer_head.chain, (queue_entry_t)telt);
479 telt->set = TELT_UNSET;
480 simple_unlock(&timer_lock);
481 splx(s);
482 return TRUE;
483 }
484 else {
485 simple_unlock(&timer_lock);
486 splx(s);
487 return FALSE;
488 }
489 }
490
491 void init_timeout()
492 {
493 simple_lock_init(&timer_lock);
494 queue_init(&timer_head.chain);
495 timer_head.ticks = ~0; /* MAXUINT - sentinel */
496
497 elapsed_ticks = 0;
498 }
499
500 /*
501 * Read the time.
502 */
503 kern_return_t
504 host_get_time(host, current_time)
505 host_t host;
506 time_value_t *current_time; /* OUT */
507 {
508 if (host == HOST_NULL)
509 return(KERN_INVALID_HOST);
510
511 do {
512 current_time->seconds = mtime->seconds;
513 current_time->microseconds = mtime->microseconds;
514 } while (current_time->seconds != mtime->check_seconds);
515
516 return (KERN_SUCCESS);
517 }
518
519 /*
520 * Set the time. Only available to privileged users.
521 */
522 kern_return_t
523 host_set_time(host, new_time)
524 host_t host;
525 time_value_t new_time;
526 {
527 spl_t s;
528
529 if (host == HOST_NULL)
530 return(KERN_INVALID_HOST);
531
532 #if NCPUS > 1
533 /*
534 * Switch to the master CPU to synchronize correctly.
535 */
536 thread_bind(current_thread(), master_processor);
537 if (current_processor() != master_processor)
538 thread_block((void (*)) 0);
539 #endif NCPUS > 1
540
541 s = splhigh();
542 time = new_time;
543 update_mapped_time(&time);
544 resettodr();
545 splx(s);
546
547 #if NCPUS > 1
548 /*
549 * Switch off the master CPU.
550 */
551 thread_bind(current_thread(), PROCESSOR_NULL);
552 #endif NCPUS > 1
553
554 return (KERN_SUCCESS);
555 }
556
557 /*
558 * Adjust the time gradually.
559 */
560 kern_return_t
561 host_adjust_time(host, new_adjustment, old_adjustment)
562 host_t host;
563 time_value_t new_adjustment;
564 time_value_t *old_adjustment; /* OUT */
565 {
566 time_value_t oadj;
567 unsigned int ndelta;
568 spl_t s;
569
570 if (host == HOST_NULL)
571 return (KERN_INVALID_HOST);
572
573 ndelta = new_adjustment.seconds * 1000000
574 + new_adjustment.microseconds;
575
576 #if NCPUS > 1
577 thread_bind(current_thread(), master_processor);
578 if (current_processor() != master_processor)
579 thread_block((void (*)) 0);
580 #endif NCPUS > 1
581
582 s = splclock();
583
584 oadj.seconds = timedelta / 1000000;
585 oadj.microseconds = timedelta % 1000000;
586
587 if (timedelta == 0) {
588 if (ndelta > bigadj)
589 tickdelta = 10 * tickadj;
590 else
591 tickdelta = tickadj;
592 }
593 if (ndelta % tickdelta)
594 ndelta = ndelta / tickdelta * tickdelta;
595
596 timedelta = ndelta;
597
598 splx(s);
599 #if NCPUS > 1
600 thread_bind(current_thread(), PROCESSOR_NULL);
601 #endif NCPUS > 1
602
603 *old_adjustment = oadj;
604
605 return (KERN_SUCCESS);
606 }
607
608 void mapable_time_init()
609 {
610 if (kmem_alloc_wired(kernel_map, (vm_offset_t *) &mtime, PAGE_SIZE)
611 != KERN_SUCCESS)
612 panic("mapable_time_init");
613 bzero((char *)mtime, PAGE_SIZE);
614 update_mapped_time(&time);
615 }
616
617 int timeopen()
618 {
619 return(0);
620 }
621 int timeclose()
622 {
623 return(0);
624 }
625
626 /*
627 * Compatibility for device drivers.
628 * New code should use set_timeout/reset_timeout and private timers.
629 * These code can't use a zone to allocate timers, because
630 * it can be called from interrupt handlers.
631 */
632
633 #define NTIMERS 20
634
635 timer_elt_data_t timeout_timers[NTIMERS];
636
637 /*
638 * Set timeout.
639 *
640 * fcn: function to call
641 * param: parameter to pass to function
642 * interval: timeout interval, in hz.
643 */
644 void timeout(fcn, param, interval)
645 int (*fcn)(/* char * param */);
646 char * param;
647 int interval;
648 {
649 spl_t s;
650 register timer_elt_t elt;
651
652 s = splsched();
653 simple_lock(&timer_lock);
654 for (elt = &timeout_timers[0]; elt < &timeout_timers[NTIMERS]; elt++)
655 if (elt->set == TELT_UNSET)
656 break;
657 if (elt == &timeout_timers[NTIMERS])
658 panic("timeout");
659 elt->fcn = fcn;
660 elt->param = param;
661 elt->set = TELT_ALLOC;
662 simple_unlock(&timer_lock);
663 splx(s);
664
665 set_timeout(elt, (unsigned int)interval);
666 }
667
668 /*
669 * Returns a boolean indicating whether the timeout element was found
670 * and removed.
671 */
672 boolean_t untimeout(fcn, param)
673 register int (*fcn)();
674 register char * param;
675 {
676 spl_t s;
677 register timer_elt_t elt;
678
679 s = splsched();
680 simple_lock(&timer_lock);
681 queue_iterate(&timer_head.chain, elt, timer_elt_t, chain) {
682
683 if ((fcn == elt->fcn) && (param == elt->param)) {
684 /*
685 * Found it.
686 */
687 remqueue(&timer_head.chain, (queue_entry_t)elt);
688 elt->set = TELT_UNSET;
689
690 simple_unlock(&timer_lock);
691 splx(s);
692 return (TRUE);
693 }
694 }
695 simple_unlock(&timer_lock);
696 splx(s);
697 return (FALSE);
698 }
Cache object: 62c23cb5896f6e77ee343ac19c49c0ec
|