1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
7 *
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * file.
14 *
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
22 *
23 * @APPLE_LICENSE_HEADER_END@
24 */
25 /*
26 * @OSF_COPYRIGHT@
27 */
28
29 /*
30 * File: i386/rtclock.c
31 * Purpose: Routines for handling the machine dependent
32 * real-time clock. This clock is generated by
33 * the Intel 8254 Programmable Interval Timer.
34 */
35
36 #include <cpus.h>
37 #include <platforms.h>
38 #include <mach_kdb.h>
39
40 #include <mach/mach_types.h>
41
42 #include <kern/cpu_number.h>
43 #include <kern/cpu_data.h>
44 #include <kern/clock.h>
45 #include <kern/host_notify.h>
46 #include <kern/macro_help.h>
47 #include <kern/misc_protos.h>
48 #include <kern/spl.h>
49 #include <machine/mach_param.h> /* HZ */
50 #include <mach/vm_prot.h>
51 #include <vm/pmap.h>
52 #include <vm/vm_kern.h> /* for kernel_map */
53 #include <i386/ipl.h>
54 #include <i386/pit.h>
55 #include <i386/pio.h>
56 #include <i386/misc_protos.h>
57 #include <i386/rtclock_entries.h>
58 #include <i386/hardclock_entries.h>
59 #include <i386/proc_reg.h>
60 #include <i386/machine_cpu.h>
61 #include <pexpert/pexpert.h>
62
63 #define DISPLAYENTER(x) printf("[RTCLOCK] entering " #x "\n");
64 #define DISPLAYEXIT(x) printf("[RTCLOCK] leaving " #x "\n");
65 #define DISPLAYVALUE(x,y) printf("[RTCLOCK] " #x ":" #y " = 0x%08x \n",y);
66
67 int sysclk_config(void);
68
69 int sysclk_init(void);
70
71 kern_return_t sysclk_gettime(
72 mach_timespec_t *cur_time);
73
74 kern_return_t sysclk_getattr(
75 clock_flavor_t flavor,
76 clock_attr_t attr,
77 mach_msg_type_number_t *count);
78
79 kern_return_t sysclk_setattr(
80 clock_flavor_t flavor,
81 clock_attr_t attr,
82 mach_msg_type_number_t count);
83
84 void sysclk_setalarm(
85 mach_timespec_t *alarm_time);
86
87 extern void (*IOKitRegisterInterruptHook)(void *, int irq, int isclock);
88
89 /*
90 * Lists of clock routines.
91 */
92 struct clock_ops sysclk_ops = {
93 sysclk_config, sysclk_init,
94 sysclk_gettime, 0,
95 sysclk_getattr, sysclk_setattr,
96 sysclk_setalarm,
97 };
98
99 int calend_config(void);
100
101 int calend_init(void);
102
103 kern_return_t calend_gettime(
104 mach_timespec_t *cur_time);
105
106 kern_return_t calend_getattr(
107 clock_flavor_t flavor,
108 clock_attr_t attr,
109 mach_msg_type_number_t *count);
110
111 struct clock_ops calend_ops = {
112 calend_config, calend_init,
113 calend_gettime, 0,
114 calend_getattr, 0,
115 0,
116 };
117
118 /* local data declarations */
119 mach_timespec_t *RtcTime = (mach_timespec_t *)0;
120 mach_timespec_t *RtcAlrm;
121 clock_res_t RtcDelt;
122
123 /* global data declarations */
124 struct {
125 uint64_t abstime;
126
127 mach_timespec_t time;
128 mach_timespec_t alarm_time; /* time of next alarm */
129
130 mach_timespec_t calend_offset;
131 boolean_t calend_is_set;
132
133 int64_t calend_adjtotal;
134 int32_t calend_adjdelta;
135
136 uint64_t timer_deadline;
137 boolean_t timer_is_set;
138 clock_timer_func_t timer_expire;
139
140 clock_res_t new_ires; /* pending new resolution (nano ) */
141 clock_res_t intr_nsec; /* interrupt resolution (nano) */
142 mach_timebase_info_data_t timebase_const;
143
144 decl_simple_lock_data(,lock) /* real-time clock device lock */
145 } rtclock;
146
147 unsigned int clknum; /* clks per second */
148 unsigned int new_clknum; /* pending clknum */
149 unsigned int time_per_clk; /* time per clk in ZHZ */
150 unsigned int clks_per_int; /* clks per interrupt */
151 unsigned int clks_per_int_99;
152 int rtc_intr_count; /* interrupt counter */
153 int rtc_intr_hertz; /* interrupts per HZ */
154 int rtc_intr_freq; /* interrupt frequency */
155 int rtc_print_lost_tick; /* print lost tick */
156
157 uint32_t rtc_cyc_per_sec; /* processor cycles per seconds */
158 uint32_t rtc_quant_scale; /* used internally to convert clocks to nanos */
159
160 /*
161 * Macros to lock/unlock real-time clock device.
162 */
163 #define LOCK_RTC(s) \
164 MACRO_BEGIN \
165 (s) = splclock(); \
166 simple_lock(&rtclock.lock); \
167 MACRO_END
168
169 #define UNLOCK_RTC(s) \
170 MACRO_BEGIN \
171 simple_unlock(&rtclock.lock); \
172 splx(s); \
173 MACRO_END
174
175 /*
176 * i8254 control. ** MONUMENT **
177 *
178 * The i8254 is a traditional PC device with some arbitrary characteristics.
179 * Basically, it is a register that counts at a fixed rate and can be
180 * programmed to generate an interrupt every N counts. The count rate is
181 * clknum counts per second (see pit.h), historically 1193167 we believe.
182 * Various constants are computed based on this value, and we calculate
183 * them at init time for execution efficiency. To obtain sufficient
184 * accuracy, some of the calculation are most easily done in floating
185 * point and then converted to int.
186 *
187 * We want an interrupt every 10 milliseconds, approximately. The count
188 * which will do that is clks_per_int. However, that many counts is not
189 * *exactly* 10 milliseconds; it is a bit more or less depending on
190 * roundoff. The actual time per tick is calculated and saved in
191 * rtclock.intr_nsec, and it is that value which is added to the time
192 * register on each tick.
193 *
194 * The i8254 counter can be read between interrupts in order to determine
195 * the time more accurately. The counter counts down from the preset value
196 * toward 0, and we have to handle the case where the counter has been
197 * reset just before being read and before the interrupt has been serviced.
198 * Given a count since the last interrupt, the time since then is given
199 * by (count * time_per_clk). In order to minimize integer truncation,
200 * we perform this calculation in an arbitrary unit of time which maintains
201 * the maximum precision, i.e. such that one tick is 1.0e9 of these units,
202 * or close to the precision of a 32-bit int. We then divide by this unit
203 * (which doesn't lose precision) to get nanoseconds. For notation
204 * purposes, this unit is defined as ZHZ = zanoseconds per nanosecond.
205 *
206 * This sequence to do all this is in sysclk_gettime. For efficiency, this
207 * sequence also needs the value that the counter will have if it has just
208 * overflowed, so we precompute that also.
209 *
210 * The fix for certain really old certain platforms has been removed
211 * (specifically the DEC XL5100) have been observed to have problem
212 * with latching the counter, and they occasionally (say, one out of
213 * 100,000 times) return a bogus value. Hence, the present code reads
214 * the counter twice and checks for a consistent pair of values.
215 * the code was:
216 * do {
217 * READ_8254(val);
218 * READ_8254(val2);
219 * } while ( val2 > val || val2 < val - 10 );
220 *
221 *
222 * Some attributes of the rt clock can be changed, including the
223 * interrupt resolution. We default to the minimum resolution (10 ms),
224 * but allow a finer resolution to be requested. The assumed frequency
225 * of the clock can also be set since it appears that the actual
226 * frequency of real-world hardware can vary from the nominal by
227 * 200 ppm or more. When the frequency is set, the values above are
228 * recomputed and we continue without resetting or changing anything else.
229 */
230 #define RTC_MINRES (NSEC_PER_SEC / HZ) /* nsec per tick */
231 #define RTC_MAXRES (RTC_MINRES / 20) /* nsec per tick */
232 #define ZANO (1000000000)
233 #define ZHZ (ZANO / (NSEC_PER_SEC / HZ))
234 #define READ_8254(val) { \
235 outb(PITCTL_PORT, PIT_C0); \
236 (val) = inb(PITCTR0_PORT); \
237 (val) |= inb(PITCTR0_PORT) << 8 ; }
238
239 #define UI_CPUFREQ_ROUNDING_FACTOR 10000000
240
241
242 /*
243 * Forward decl.
244 */
245
246 void rtc_setvals( unsigned int, clock_res_t );
247
248 static void rtc_set_cyc_per_sec();
249
250 /* define assembly routines */
251
252
253 /*
254 * Inlines to get timestamp counter value.
255 */
256
257 inline static uint64_t
258 rdtsc_64(void)
259 {
260 uint64_t result;
261 asm volatile("rdtsc": "=A" (result));
262 return result;
263 }
264
265 // create_mul_quant_GHZ create a constant that can be used to multiply
266 // the TSC by to create nanoseconds. This is a 32 bit number
267 // and the TSC *MUST* have a frequency higher than 1000Mhz for this routine to work
268 //
269 // The theory here is that we know how many TSCs-per-sec the processor runs at. Normally to convert this
270 // to nanoseconds you would multiply the current time stamp by 1000000000 (a billion) then divide
271 // by TSCs-per-sec to get nanoseconds. Unfortunatly the TSC is 64 bits which would leave us with
272 // 96 bit intermediate results from the dultiply that must be divided by.
273 // usually thats
274 // uint96 = tsc * numer
275 // nanos = uint96 / denom
276 // Instead, we create this quant constant and it becomes the numerator, the denominator
277 // can then be 0x100000000 which makes our division as simple as forgetting the lower 32 bits
278 // of the result. We can also pass this number to user space as the numer and pass 0xFFFFFFFF
279 // as the denom to converting raw counts to nanos. the difference is so small as to be undetectable
280 // by anything.
281 // unfortunatly we can not do this for sub GHZ processors. In that case, all we do is pass the CPU
282 // speed in raw as the denom and we pass in 1000000000 as the numerator. No short cuts allowed
283
284 inline static uint32_t
285 create_mul_quant_GHZ(uint32_t quant)
286 {
287 return (uint32_t)((50000000ULL << 32) / quant);
288 }
289
290 // this routine takes a value of raw TSC ticks and applies the passed mul_quant
291 // generated by create_mul_quant() This is our internal routine for creating
292 // nanoseconds
293 // since we don't really have uint96_t this routine basically does this....
294 // uint96_t intermediate = (*value) * scale
295 // return (intermediate >> 32)
296 inline static uint64_t
297 fast_get_nano_from_abs(uint64_t value, int scale)
298 {
299 asm (" movl %%edx,%%esi \n\t"
300 " mull %%ecx \n\t"
301 " movl %%edx,%%edi \n\t"
302 " movl %%esi,%%eax \n\t"
303 " mull %%ecx \n\t"
304 " xorl %%ecx,%%ecx \n\t"
305 " addl %%edi,%%eax \n\t"
306 " adcl %%ecx,%%edx "
307 : "+A" (value)
308 : "c" (scale)
309 : "%esi", "%edi");
310 return value;
311 }
312
313 /*
314 * this routine basically does this...
315 * ts.tv_sec = nanos / 1000000000; create seconds
316 * ts.tv_nsec = nanos % 1000000000; create remainder nanos
317 */
318 inline static mach_timespec_t
319 nanos_to_timespec(uint64_t nanos)
320 {
321 union {
322 mach_timespec_t ts;
323 uint64_t u64;
324 } ret;
325 ret.u64 = nanos;
326 asm volatile("divl %1" : "+A" (ret.u64) : "r" (NSEC_PER_SEC));
327 return ret.ts;
328 }
329
330 // the following two routine perform the 96 bit arithmetic we need to
331 // convert generic absolute<->nanoseconds
332 // the multiply routine takes a uint64_t and a uint32_t and returns the result in a
333 // uint32_t[3] array. the dicide routine takes this uint32_t[3] array and
334 // divides it by a uint32_t returning a uint64_t
335 inline static void
336 longmul(uint64_t *abstime, uint32_t multiplicand, uint32_t *result)
337 {
338 asm volatile(
339 " pushl %%ebx \n\t"
340 " movl %%eax,%%ebx \n\t"
341 " movl (%%eax),%%eax \n\t"
342 " mull %%ecx \n\t"
343 " xchg %%eax,%%ebx \n\t"
344 " pushl %%edx \n\t"
345 " movl 4(%%eax),%%eax \n\t"
346 " mull %%ecx \n\t"
347 " movl %2,%%ecx \n\t"
348 " movl %%ebx,(%%ecx) \n\t"
349 " popl %%ebx \n\t"
350 " addl %%ebx,%%eax \n\t"
351 " popl %%ebx \n\t"
352 " movl %%eax,4(%%ecx) \n\t"
353 " adcl $0,%%edx \n\t"
354 " movl %%edx,8(%%ecx) // and save it"
355 : : "a"(abstime), "c"(multiplicand), "m"(result));
356
357 }
358
359 inline static uint64_t
360 longdiv(uint32_t *numer, uint32_t denom)
361 {
362 uint64_t result;
363 asm volatile(
364 " pushl %%ebx \n\t"
365 " movl %%eax,%%ebx \n\t"
366 " movl 8(%%eax),%%edx \n\t"
367 " movl 4(%%eax),%%eax \n\t"
368 " divl %%ecx \n\t"
369 " xchg %%ebx,%%eax \n\t"
370 " movl (%%eax),%%eax \n\t"
371 " divl %%ecx \n\t"
372 " xchg %%ebx,%%edx \n\t"
373 " popl %%ebx \n\t"
374 : "=A"(result) : "a"(numer),"c"(denom));
375 return result;
376 }
377
378 #define PIT_Mode4 0x08 /* turn on mode 4 one shot software trigger */
379
380 // Enable or disable timer 2.
381 inline static void
382 enable_PIT2()
383 {
384 asm volatile(
385 " inb $97,%%al \n\t"
386 " and $253,%%al \n\t"
387 " or $1,%%al \n\t"
388 " outb %%al,$97 \n\t"
389 : : : "%al" );
390 }
391
392 inline static void
393 disable_PIT2()
394 {
395 asm volatile(
396 " inb $97,%%al \n\t"
397 " and $253,%%al \n\t"
398 " outb %%al,$97 \n\t"
399 : : : "%al" );
400 }
401
402 // ctimeRDTSC() routine sets up counter 2 to count down 1/20 of a second
403 // it pauses until the value is latched in the counter
404 // and then reads the time stamp counter to return to the caller
405 // utility routine
406 // Code to calculate how many processor cycles are in a second...
407 inline static void
408 set_PIT2(int value)
409 {
410 // first, tell the clock we are going to write 16 bytes to the counter and enable one-shot mode
411 // then write the two bytes into the clock register.
412 // loop until the value is "realized" in the clock, this happens on the next tick
413 //
414 asm volatile(
415 " movb $184,%%al \n\t"
416 " outb %%al,$67 \n\t"
417 " movb %%dl,%%al \n\t"
418 " outb %%al,$66 \n\t"
419 " movb %%dh,%%al \n\t"
420 " outb %%al,$66 \n"
421 "1: inb $66,%%al \n\t"
422 " inb $66,%%al \n\t"
423 " cmp %%al,%%dh \n\t"
424 " jne 1b"
425 : : "d"(value) : "%al");
426 }
427
428 inline static uint64_t
429 get_PIT2(unsigned int *value)
430 {
431 // this routine first latches the time, then gets the time stamp so we know
432 // how long the read will take later. Reads
433 register uint64_t result;
434 asm volatile(
435 " xorl %%ecx,%%ecx \n\t"
436 " movb $128,%%al \n\t"
437 " outb %%al,$67 \n\t"
438 " rdtsc \n\t"
439 " pushl %%eax \n\t"
440 " inb $66,%%al \n\t"
441 " movb %%al,%%cl \n\t"
442 " inb $66,%%al \n\t"
443 " movb %%al,%%ch \n\t"
444 " popl %%eax "
445 : "=A"(result), "=c"(*value));
446 return result;
447 }
448
449 static uint32_t
450 timeRDTSC(void)
451 {
452 uint64_t latchTime;
453 uint64_t saveTime,intermediate;
454 unsigned int timerValue,x;
455 boolean_t int_enabled;
456 uint64_t fact[6] = { 2000011734ll,
457 2000045259ll,
458 2000078785ll,
459 2000112312ll,
460 2000145841ll,
461 2000179371ll};
462
463 int_enabled = ml_set_interrupts_enabled(FALSE);
464
465 enable_PIT2(); // turn on PIT2
466 set_PIT2(0); // reset timer 2 to be zero
467 latchTime = rdtsc_64(); // get the time stamp to time
468 latchTime = get_PIT2(&timerValue) - latchTime; // time how long this takes
469 set_PIT2(59658); // set up the timer to count 1/20th a second
470 saveTime = rdtsc_64(); // now time how ling a 20th a second is...
471 get_PIT2(&x);
472 do { get_PIT2(&timerValue); x = timerValue;} while (timerValue > x);
473 do {
474 intermediate = get_PIT2(&timerValue);
475 if (timerValue>x) printf("Hey we are going backwards! %d, %d\n",timerValue,x);
476 x = timerValue;
477 } while ((timerValue != 0) && (timerValue >5));
478 printf("Timer value:%d\n",timerValue);
479 printf("intermediate 0x%08x:0x%08x\n",intermediate);
480 printf("saveTime 0x%08x:0x%08x\n",saveTime);
481
482 intermediate = intermediate - saveTime; // raw # of tsc's it takes for about 1/20 second
483 intermediate = intermediate * fact[timerValue]; // actual time spent
484 intermediate = intermediate / 2000000000ll; // rescale so its exactly 1/20 a second
485 intermediate = intermediate + latchTime; // add on our save fudge
486 set_PIT2(0); // reset timer 2 to be zero
487 disable_PIT2(0); // turn off PIT 2
488 ml_set_interrupts_enabled(int_enabled);
489 return intermediate;
490 }
491
492 static uint64_t
493 rdtsctime_to_nanoseconds( void )
494 {
495 uint32_t numer;
496 uint32_t denom;
497 uint64_t abstime;
498
499 uint32_t intermediate[3];
500
501 numer = rtclock.timebase_const.numer;
502 denom = rtclock.timebase_const.denom;
503 abstime = rdtsc_64();
504 if (denom == 0xFFFFFFFF) {
505 abstime = fast_get_nano_from_abs(abstime, numer);
506 } else {
507 longmul(&abstime, numer, intermediate);
508 abstime = longdiv(intermediate, denom);
509 }
510 return abstime;
511 }
512
513 inline static mach_timespec_t
514 rdtsc_to_timespec(void)
515 {
516 uint64_t currNanos;
517 currNanos = rdtsctime_to_nanoseconds();
518 return nanos_to_timespec(currNanos);
519 }
520
521 /*
522 * Initialize non-zero clock structure values.
523 */
524 void
525 rtc_setvals(
526 unsigned int new_clknum,
527 clock_res_t new_ires
528 )
529 {
530 unsigned int timeperclk;
531 unsigned int scale0;
532 unsigned int scale1;
533 unsigned int res;
534
535 clknum = new_clknum;
536 rtc_intr_freq = (NSEC_PER_SEC / new_ires);
537 rtc_intr_hertz = rtc_intr_freq / HZ;
538 clks_per_int = (clknum + (rtc_intr_freq / 2)) / rtc_intr_freq;
539 clks_per_int_99 = clks_per_int - clks_per_int/100;
540
541 /*
542 * The following calculations are done with scaling integer operations
543 * in order that the integer results are accurate to the lsb.
544 */
545 timeperclk = div_scale(ZANO, clknum, &scale0); /* 838.105647 nsec */
546
547 time_per_clk = mul_scale(ZHZ, timeperclk, &scale1); /* 83810 */
548 if (scale0 > scale1)
549 time_per_clk >>= (scale0 - scale1);
550 else if (scale0 < scale1)
551 panic("rtc_clock: time_per_clk overflow\n");
552
553 /*
554 * Notice that rtclock.intr_nsec is signed ==> use unsigned int res
555 */
556 res = mul_scale(clks_per_int, timeperclk, &scale1); /* 10000276 */
557 if (scale0 > scale1)
558 rtclock.intr_nsec = res >> (scale0 - scale1);
559 else
560 panic("rtc_clock: rtclock.intr_nsec overflow\n");
561
562 rtc_intr_count = 1;
563 RtcDelt = rtclock.intr_nsec/2;
564 }
565
566 /*
567 * Configure the real-time clock device. Return success (1)
568 * or failure (0).
569 */
570
571 int
572 sysclk_config(void)
573 {
574 int RtcFlag;
575 int pic;
576
577 #if NCPUS > 1
578 mp_disable_preemption();
579 if (cpu_number() != master_cpu) {
580 mp_enable_preemption();
581 return(1);
582 }
583 mp_enable_preemption();
584 #endif
585 /*
586 * Setup device.
587 */
588 pic = 0; /* FIXME .. interrupt registration moved to AppleIntelClock */
589
590
591 /*
592 * We should attempt to test the real-time clock
593 * device here. If it were to fail, we should panic
594 * the system.
595 */
596 RtcFlag = /* test device */1;
597 printf("realtime clock configured\n");
598
599 simple_lock_init(&rtclock.lock, ETAP_NO_TRACE);
600 return (RtcFlag);
601 }
602
603 /*
604 * Initialize the real-time clock device. Return success (1)
605 * or failure (0). Since the real-time clock is required to
606 * provide canonical mapped time, we allocate a page to keep
607 * the clock time value. In addition, various variables used
608 * to support the clock are initialized. Note: the clock is
609 * not started until rtclock_reset is called.
610 */
611 int
612 sysclk_init(void)
613 {
614 vm_offset_t *vp;
615 #if NCPUS > 1
616 mp_disable_preemption();
617 if (cpu_number() != master_cpu) {
618 mp_enable_preemption();
619 return(1);
620 }
621 mp_enable_preemption();
622 #endif
623
624 RtcTime = &rtclock.time;
625 rtc_setvals( CLKNUM, RTC_MINRES ); /* compute constants */
626 rtc_set_cyc_per_sec(); /* compute number of tsc beats per second */
627 clock_timebase_init();
628 return (1);
629 }
630
631 static volatile unsigned int last_ival = 0;
632
633 /*
634 * Get the clock device time. This routine is responsible
635 * for converting the device's machine dependent time value
636 * into a canonical mach_timespec_t value.
637 */
638 kern_return_t
639 sysclk_gettime(
640 mach_timespec_t *cur_time) /* OUT */
641 {
642 if (!RtcTime) {
643 /* Uninitialized */
644 cur_time->tv_nsec = 0;
645 cur_time->tv_sec = 0;
646 return (KERN_SUCCESS);
647 }
648
649 *cur_time = rdtsc_to_timespec();
650 return (KERN_SUCCESS);
651 }
652
653 kern_return_t
654 sysclk_gettime_internal(
655 mach_timespec_t *cur_time) /* OUT */
656 {
657 if (!RtcTime) {
658 /* Uninitialized */
659 cur_time->tv_nsec = 0;
660 cur_time->tv_sec = 0;
661 return (KERN_SUCCESS);
662 }
663 *cur_time = rdtsc_to_timespec();
664 return (KERN_SUCCESS);
665 }
666
667 /*
668 * Get the clock device time when ALL interrupts are already disabled.
669 * Same as above except for turning interrupts off and on.
670 * This routine is responsible for converting the device's machine dependent
671 * time value into a canonical mach_timespec_t value.
672 */
673 void
674 sysclk_gettime_interrupts_disabled(
675 mach_timespec_t *cur_time) /* OUT */
676 {
677 if (!RtcTime) {
678 /* Uninitialized */
679 cur_time->tv_nsec = 0;
680 cur_time->tv_sec = 0;
681 return;
682 }
683 *cur_time = rdtsc_to_timespec();
684 }
685
686 // utility routine
687 // Code to calculate how many processor cycles are in a second...
688
689 static void
690 rtc_set_cyc_per_sec()
691 {
692
693 uint32_t twen_cycles;
694 uint32_t cycles;
695
696 twen_cycles = timeRDTSC();
697 if (twen_cycles> (1000000000/20)) {
698 // we create this value so that you can use just a "fast" multiply to get nanos
699 rtc_quant_scale = create_mul_quant_GHZ(twen_cycles);
700 rtclock.timebase_const.numer = rtc_quant_scale; // because ctimeRDTSC gives us 1/20 a seconds worth
701 rtclock.timebase_const.denom = 0xffffffff; // so that nanoseconds = (TSC * numer) / denom
702
703 } else {
704 rtclock.timebase_const.numer = 1000000000/20; // because ctimeRDTSC gives us 1/20 a seconds worth
705 rtclock.timebase_const.denom = twen_cycles; // so that nanoseconds = (TSC * numer) / denom
706 }
707 cycles = twen_cycles; // number of cycles in 1/20th a second
708 rtc_cyc_per_sec = cycles*20; // multiply it by 20 and we are done.. BUT we also want to calculate...
709
710 cycles = ((rtc_cyc_per_sec + UI_CPUFREQ_ROUNDING_FACTOR - 1) / UI_CPUFREQ_ROUNDING_FACTOR) * UI_CPUFREQ_ROUNDING_FACTOR;
711 gPEClockFrequencyInfo.cpu_clock_rate_hz = cycles;
712 DISPLAYVALUE(rtc_set_cyc_per_sec,rtc_cyc_per_sec);
713 DISPLAYEXIT(rtc_set_cyc_per_sec);
714 }
715
716 void
717 clock_get_system_microtime(
718 uint32_t *secs,
719 uint32_t *microsecs)
720 {
721 mach_timespec_t now;
722
723 sysclk_gettime(&now);
724
725 *secs = now.tv_sec;
726 *microsecs = now.tv_nsec / NSEC_PER_USEC;
727 }
728
729 void
730 clock_get_system_nanotime(
731 uint32_t *secs,
732 uint32_t *nanosecs)
733 {
734 mach_timespec_t now;
735
736 sysclk_gettime(&now);
737
738 *secs = now.tv_sec;
739 *nanosecs = now.tv_nsec;
740 }
741
742 /*
743 * Get clock device attributes.
744 */
745 kern_return_t
746 sysclk_getattr(
747 clock_flavor_t flavor,
748 clock_attr_t attr, /* OUT */
749 mach_msg_type_number_t *count) /* IN/OUT */
750 {
751 spl_t s;
752
753 if (*count != 1)
754 return (KERN_FAILURE);
755 switch (flavor) {
756
757 case CLOCK_GET_TIME_RES: /* >0 res */
758 #if (NCPUS == 1)
759 LOCK_RTC(s);
760 *(clock_res_t *) attr = 1000;
761 UNLOCK_RTC(s);
762 break;
763 #endif /* (NCPUS == 1) */
764 case CLOCK_ALARM_CURRES: /* =0 no alarm */
765 LOCK_RTC(s);
766 *(clock_res_t *) attr = rtclock.intr_nsec;
767 UNLOCK_RTC(s);
768 break;
769
770 case CLOCK_ALARM_MAXRES:
771 *(clock_res_t *) attr = RTC_MAXRES;
772 break;
773
774 case CLOCK_ALARM_MINRES:
775 *(clock_res_t *) attr = RTC_MINRES;
776 break;
777
778 default:
779 return (KERN_INVALID_VALUE);
780 }
781 return (KERN_SUCCESS);
782 }
783
784 /*
785 * Set clock device attributes.
786 */
787 kern_return_t
788 sysclk_setattr(
789 clock_flavor_t flavor,
790 clock_attr_t attr, /* IN */
791 mach_msg_type_number_t count) /* IN */
792 {
793 spl_t s;
794 int freq;
795 int adj;
796 clock_res_t new_ires;
797
798 if (count != 1)
799 return (KERN_FAILURE);
800 switch (flavor) {
801
802 case CLOCK_GET_TIME_RES:
803 case CLOCK_ALARM_MAXRES:
804 case CLOCK_ALARM_MINRES:
805 return (KERN_FAILURE);
806
807 case CLOCK_ALARM_CURRES:
808 new_ires = *(clock_res_t *) attr;
809
810 /*
811 * The new resolution must be within the predetermined
812 * range. If the desired resolution cannot be achieved
813 * to within 0.1%, an error is returned.
814 */
815 if (new_ires < RTC_MAXRES || new_ires > RTC_MINRES)
816 return (KERN_INVALID_VALUE);
817 freq = (NSEC_PER_SEC / new_ires);
818 adj = (((clknum % freq) * new_ires) / clknum);
819 if (adj > (new_ires / 1000))
820 return (KERN_INVALID_VALUE);
821 /*
822 * Record the new alarm resolution which will take effect
823 * on the next HZ aligned clock tick.
824 */
825 LOCK_RTC(s);
826 if ( freq != rtc_intr_freq ) {
827 rtclock.new_ires = new_ires;
828 new_clknum = clknum;
829 }
830 UNLOCK_RTC(s);
831 return (KERN_SUCCESS);
832
833 default:
834 return (KERN_INVALID_VALUE);
835 }
836 }
837
838 /*
839 * Set next alarm time for the clock device. This call
840 * always resets the time to deliver an alarm for the
841 * clock.
842 */
843 void
844 sysclk_setalarm(
845 mach_timespec_t *alarm_time)
846 {
847 spl_t s;
848
849 LOCK_RTC(s);
850 rtclock.alarm_time = *alarm_time;
851 RtcAlrm = &rtclock.alarm_time;
852 UNLOCK_RTC(s);
853 }
854
855 /*
856 * Configure the calendar clock.
857 */
858 int
859 calend_config(void)
860 {
861 return bbc_config();
862 }
863
864 /*
865 * Initialize calendar clock.
866 */
867 int
868 calend_init(void)
869 {
870 return (1);
871 }
872
873 /*
874 * Get the current clock time.
875 */
876 kern_return_t
877 calend_gettime(
878 mach_timespec_t *cur_time) /* OUT */
879 {
880 spl_t s;
881
882 LOCK_RTC(s);
883 if (!rtclock.calend_is_set) {
884 UNLOCK_RTC(s);
885 return (KERN_FAILURE);
886 }
887
888 (void) sysclk_gettime_internal(cur_time);
889 ADD_MACH_TIMESPEC(cur_time, &rtclock.calend_offset);
890 UNLOCK_RTC(s);
891
892 return (KERN_SUCCESS);
893 }
894
895 void
896 clock_get_calendar_microtime(
897 uint32_t *secs,
898 uint32_t *microsecs)
899 {
900 mach_timespec_t now;
901
902 calend_gettime(&now);
903
904 *secs = now.tv_sec;
905 *microsecs = now.tv_nsec / NSEC_PER_USEC;
906 }
907
908 void
909 clock_get_calendar_nanotime(
910 uint32_t *secs,
911 uint32_t *nanosecs)
912 {
913 mach_timespec_t now;
914
915 calend_gettime(&now);
916
917 *secs = now.tv_sec;
918 *nanosecs = now.tv_nsec;
919 }
920
921 void
922 clock_set_calendar_microtime(
923 uint32_t secs,
924 uint32_t microsecs)
925 {
926 mach_timespec_t new_time, curr_time;
927 spl_t s;
928
929 LOCK_RTC(s);
930 (void) sysclk_gettime_internal(&curr_time);
931 rtclock.calend_offset.tv_sec = new_time.tv_sec = secs;
932 rtclock.calend_offset.tv_nsec = new_time.tv_nsec = microsecs * NSEC_PER_USEC;
933 SUB_MACH_TIMESPEC(&rtclock.calend_offset, &curr_time);
934 rtclock.calend_is_set = TRUE;
935 UNLOCK_RTC(s);
936
937 (void) bbc_settime(&new_time);
938
939 host_notify_calendar_change();
940 }
941
942 /*
943 * Get clock device attributes.
944 */
945 kern_return_t
946 calend_getattr(
947 clock_flavor_t flavor,
948 clock_attr_t attr, /* OUT */
949 mach_msg_type_number_t *count) /* IN/OUT */
950 {
951 spl_t s;
952
953 if (*count != 1)
954 return (KERN_FAILURE);
955 switch (flavor) {
956
957 case CLOCK_GET_TIME_RES: /* >0 res */
958 #if (NCPUS == 1)
959 LOCK_RTC(s);
960 *(clock_res_t *) attr = 1000;
961 UNLOCK_RTC(s);
962 break;
963 #else /* (NCPUS == 1) */
964 LOCK_RTC(s);
965 *(clock_res_t *) attr = rtclock.intr_nsec;
966 UNLOCK_RTC(s);
967 break;
968 #endif /* (NCPUS == 1) */
969
970 case CLOCK_ALARM_CURRES: /* =0 no alarm */
971 case CLOCK_ALARM_MINRES:
972 case CLOCK_ALARM_MAXRES:
973 *(clock_res_t *) attr = 0;
974 break;
975
976 default:
977 return (KERN_INVALID_VALUE);
978 }
979 return (KERN_SUCCESS);
980 }
981
982 #define tickadj (40*NSEC_PER_USEC) /* "standard" skew, ns / tick */
983 #define bigadj (NSEC_PER_SEC) /* use 10x skew above bigadj ns */
984
985 uint32_t
986 clock_set_calendar_adjtime(
987 int32_t *secs,
988 int32_t *microsecs)
989 {
990 int64_t total, ototal;
991 uint32_t interval = 0;
992 spl_t s;
993
994 total = (int64_t)*secs * NSEC_PER_SEC + *microsecs * NSEC_PER_USEC;
995
996 LOCK_RTC(s);
997 ototal = rtclock.calend_adjtotal;
998
999 if (total != 0) {
1000 int32_t delta = tickadj;
1001
1002 if (total > 0) {
1003 if (total > bigadj)
1004 delta *= 10;
1005 if (delta > total)
1006 delta = total;
1007 }
1008 else {
1009 if (total < -bigadj)
1010 delta *= 10;
1011 delta = -delta;
1012 if (delta < total)
1013 delta = total;
1014 }
1015
1016 rtclock.calend_adjtotal = total;
1017 rtclock.calend_adjdelta = delta;
1018
1019 interval = (NSEC_PER_SEC / HZ);
1020 }
1021 else
1022 rtclock.calend_adjdelta = rtclock.calend_adjtotal = 0;
1023
1024 UNLOCK_RTC(s);
1025
1026 if (ototal == 0)
1027 *secs = *microsecs = 0;
1028 else {
1029 *secs = ototal / NSEC_PER_SEC;
1030 *microsecs = ototal % NSEC_PER_SEC;
1031 }
1032
1033 return (interval);
1034 }
1035
1036 uint32_t
1037 clock_adjust_calendar(void)
1038 {
1039 uint32_t interval = 0;
1040 int32_t delta;
1041 spl_t s;
1042
1043 LOCK_RTC(s);
1044 delta = rtclock.calend_adjdelta;
1045 ADD_MACH_TIMESPEC_NSEC(&rtclock.calend_offset, delta);
1046
1047 rtclock.calend_adjtotal -= delta;
1048
1049 if (delta > 0) {
1050 if (delta > rtclock.calend_adjtotal)
1051 rtclock.calend_adjdelta = rtclock.calend_adjtotal;
1052 }
1053 else
1054 if (delta < 0) {
1055 if (delta < rtclock.calend_adjtotal)
1056 rtclock.calend_adjdelta = rtclock.calend_adjtotal;
1057 }
1058
1059 if (rtclock.calend_adjdelta != 0)
1060 interval = (NSEC_PER_SEC / HZ);
1061
1062 UNLOCK_RTC(s);
1063
1064 return (interval);
1065 }
1066
1067 void
1068 clock_initialize_calendar(void)
1069 {
1070 mach_timespec_t bbc_time, curr_time;
1071 spl_t s;
1072
1073 if (bbc_gettime(&bbc_time) != KERN_SUCCESS)
1074 return;
1075
1076 LOCK_RTC(s);
1077 if (!rtclock.calend_is_set) {
1078 (void) sysclk_gettime_internal(&curr_time);
1079 rtclock.calend_offset = bbc_time;
1080 SUB_MACH_TIMESPEC(&rtclock.calend_offset, &curr_time);
1081 rtclock.calend_is_set = TRUE;
1082 }
1083 UNLOCK_RTC(s);
1084
1085 host_notify_calendar_change();
1086 }
1087
1088 void
1089 clock_timebase_info(
1090 mach_timebase_info_t info)
1091 {
1092 spl_t s;
1093
1094 LOCK_RTC(s);
1095 if (rtclock.timebase_const.denom == 0xFFFFFFFF) {
1096 info->numer = info->denom = rtc_quant_scale;
1097 } else {
1098 info->numer = info->denom = 1;
1099 }
1100 UNLOCK_RTC(s);
1101 }
1102
1103 void
1104 clock_set_timer_deadline(
1105 uint64_t deadline)
1106 {
1107 spl_t s;
1108
1109 LOCK_RTC(s);
1110 rtclock.timer_deadline = deadline;
1111 rtclock.timer_is_set = TRUE;
1112 UNLOCK_RTC(s);
1113 }
1114
1115 void
1116 clock_set_timer_func(
1117 clock_timer_func_t func)
1118 {
1119 spl_t s;
1120
1121 LOCK_RTC(s);
1122 if (rtclock.timer_expire == NULL)
1123 rtclock.timer_expire = func;
1124 UNLOCK_RTC(s);
1125 }
1126
1127
1128
1129 /*
1130 * Load the count register and start the clock.
1131 */
1132 #define RTCLOCK_RESET() { \
1133 outb(PITCTL_PORT, PIT_C0|PIT_NDIVMODE|PIT_READMODE); \
1134 outb(PITCTR0_PORT, (clks_per_int & 0xff)); \
1135 outb(PITCTR0_PORT, (clks_per_int >> 8)); \
1136 }
1137
1138 /*
1139 * Reset the clock device. This causes the realtime clock
1140 * device to reload its mode and count value (frequency).
1141 * Note: the CPU should be calibrated
1142 * before starting the clock for the first time.
1143 */
1144
1145 void
1146 rtclock_reset(void)
1147 {
1148 int s;
1149
1150 #if NCPUS > 1
1151 mp_disable_preemption();
1152 if (cpu_number() != master_cpu) {
1153 mp_enable_preemption();
1154 return;
1155 }
1156 mp_enable_preemption();
1157 #endif /* NCPUS > 1 */
1158 LOCK_RTC(s);
1159 RTCLOCK_RESET();
1160 UNLOCK_RTC(s);
1161 }
1162
1163 /*
1164 * Real-time clock device interrupt. Called only on the
1165 * master processor. Updates the clock time and upcalls
1166 * into the higher level clock code to deliver alarms.
1167 */
1168 int
1169 rtclock_intr(struct i386_interrupt_state *regs)
1170 {
1171 uint64_t abstime;
1172 mach_timespec_t clock_time;
1173 int i;
1174 spl_t s;
1175 boolean_t usermode;
1176
1177 /*
1178 * Update clock time. Do the update so that the macro
1179 * MTS_TO_TS() for reading the mapped time works (e.g.
1180 * update in order: mtv_csec, mtv_time.tv_nsec, mtv_time.tv_sec).
1181 */
1182 LOCK_RTC(s);
1183 abstime = rdtsctime_to_nanoseconds(); // get the time as of the TSC
1184 clock_time = nanos_to_timespec(abstime); // turn it into a timespec
1185 rtclock.time.tv_nsec = clock_time.tv_nsec;
1186 rtclock.time.tv_sec = clock_time.tv_sec;
1187 rtclock.abstime = abstime;
1188
1189 /* note time now up to date */
1190 last_ival = 0;
1191
1192 /*
1193 * On a HZ-tick boundary: return 0 and adjust the clock
1194 * alarm resolution (if requested). Otherwise return a
1195 * non-zero value.
1196 */
1197 if ((i = --rtc_intr_count) == 0) {
1198 if (rtclock.new_ires) {
1199 rtc_setvals(new_clknum, rtclock.new_ires);
1200 RTCLOCK_RESET(); /* lock clock register */
1201 rtclock.new_ires = 0;
1202 }
1203 rtc_intr_count = rtc_intr_hertz;
1204 UNLOCK_RTC(s);
1205 usermode = (regs->efl & EFL_VM) || ((regs->cs & 0x03) != 0);
1206 hertz_tick(usermode, regs->eip);
1207 LOCK_RTC(s);
1208 }
1209
1210 if ( rtclock.timer_is_set &&
1211 rtclock.timer_deadline <= abstime ) {
1212 rtclock.timer_is_set = FALSE;
1213 UNLOCK_RTC(s);
1214
1215 (*rtclock.timer_expire)(abstime);
1216
1217 LOCK_RTC(s);
1218 }
1219
1220 /*
1221 * Perform alarm clock processing if needed. The time
1222 * passed up is incremented by a half-interrupt tick
1223 * to trigger alarms closest to their desired times.
1224 * The clock_alarm_intr() routine calls sysclk_setalrm()
1225 * before returning if later alarms are pending.
1226 */
1227
1228 if (RtcAlrm && (RtcAlrm->tv_sec < RtcTime->tv_sec ||
1229 (RtcAlrm->tv_sec == RtcTime->tv_sec &&
1230 RtcDelt >= RtcAlrm->tv_nsec - RtcTime->tv_nsec))) {
1231 clock_time.tv_sec = 0;
1232 clock_time.tv_nsec = RtcDelt;
1233 ADD_MACH_TIMESPEC (&clock_time, RtcTime);
1234 RtcAlrm = 0;
1235 UNLOCK_RTC(s);
1236 /*
1237 * Call clock_alarm_intr() without RTC-lock.
1238 * The lock ordering is always CLOCK-lock
1239 * before RTC-lock.
1240 */
1241 clock_alarm_intr(SYSTEM_CLOCK, &clock_time);
1242 LOCK_RTC(s);
1243 }
1244
1245 UNLOCK_RTC(s);
1246 return (i);
1247 }
1248
1249 void
1250 clock_get_uptime(
1251 uint64_t *result)
1252 {
1253 *result = rdtsctime_to_nanoseconds();
1254 }
1255
1256 uint64_t
1257 mach_absolute_time(void)
1258 {
1259 return rdtsctime_to_nanoseconds();
1260 }
1261
1262 void
1263 clock_interval_to_deadline(
1264 uint32_t interval,
1265 uint32_t scale_factor,
1266 uint64_t *result)
1267 {
1268 uint64_t abstime;
1269
1270 clock_get_uptime(result);
1271
1272 clock_interval_to_absolutetime_interval(interval, scale_factor, &abstime);
1273
1274 *result += abstime;
1275 }
1276
1277 void
1278 clock_interval_to_absolutetime_interval(
1279 uint32_t interval,
1280 uint32_t scale_factor,
1281 uint64_t *result)
1282 {
1283 *result = (uint64_t)interval * scale_factor;
1284 }
1285
1286 void
1287 clock_absolutetime_interval_to_deadline(
1288 uint64_t abstime,
1289 uint64_t *result)
1290 {
1291 clock_get_uptime(result);
1292
1293 *result += abstime;
1294 }
1295
1296 void
1297 absolutetime_to_nanoseconds(
1298 uint64_t abstime,
1299 uint64_t *result)
1300 {
1301 *result = abstime;
1302 }
1303
1304 void
1305 nanoseconds_to_absolutetime(
1306 uint64_t nanoseconds,
1307 uint64_t *result)
1308 {
1309 *result = nanoseconds;
1310 }
1311
1312 /*
1313 * Spin-loop delay primitives.
1314 */
1315 void
1316 delay_for_interval(
1317 uint32_t interval,
1318 uint32_t scale_factor)
1319 {
1320 uint64_t now, end;
1321
1322 clock_interval_to_deadline(interval, scale_factor, &end);
1323
1324 do {
1325 cpu_pause();
1326 now = mach_absolute_time();
1327 } while (now < end);
1328 }
1329
1330 void
1331 clock_delay_until(
1332 uint64_t deadline)
1333 {
1334 uint64_t now;
1335
1336 do {
1337 cpu_pause();
1338 now = mach_absolute_time();
1339 } while (now < deadline);
1340 }
1341
1342 void
1343 delay(
1344 int usec)
1345 {
1346 delay_for_interval((usec < 0)? -usec: usec, NSEC_PER_USEC);
1347 }
Cache object: 8dc601d2c677eb52ad666a8426fec626
|