1 /*
2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31
32 /*
33 * File: i386/rtclock.c
34 * Purpose: Routines for handling the machine dependent
35 * real-time clock. Historically, this clock is
36 * generated by the Intel 8254 Programmable Interval
37 * Timer, but local apic timers are now used for
38 * this purpose with the master time reference being
39 * the cpu clock counted by the timestamp MSR.
40 */
41
42 #include <platforms.h>
43 #include <mach_kdb.h>
44
45 #include <mach/mach_types.h>
46
47 #include <kern/cpu_data.h>
48 #include <kern/cpu_number.h>
49 #include <kern/clock.h>
50 #include <kern/host_notify.h>
51 #include <kern/macro_help.h>
52 #include <kern/misc_protos.h>
53 #include <kern/spl.h>
54 #include <kern/assert.h>
55 #include <mach/vm_prot.h>
56 #include <vm/pmap.h>
57 #include <vm/vm_kern.h> /* for kernel_map */
58 #include <i386/ipl.h>
59 #include <architecture/i386/pio.h>
60 #include <i386/misc_protos.h>
61 #include <i386/proc_reg.h>
62 #include <i386/machine_cpu.h>
63 #include <i386/mp.h>
64 #include <i386/cpuid.h>
65 #include <i386/cpu_data.h>
66 #include <i386/cpu_threads.h>
67 #include <i386/perfmon.h>
68 #include <i386/machine_routines.h>
69 #include <pexpert/pexpert.h>
70 #include <machine/limits.h>
71 #include <machine/commpage.h>
72 #include <sys/kdebug.h>
73 #include <i386/tsc.h>
74 #include <i386/hpet.h>
75 #include <i386/rtclock.h>
76
77 #define NSEC_PER_HZ (NSEC_PER_SEC / 100) /* nsec per tick */
78
79 #define UI_CPUFREQ_ROUNDING_FACTOR 10000000
80
81 int rtclock_config(void);
82
83 int rtclock_init(void);
84
85 uint64_t rtc_decrementer_min;
86
87 void rtclock_intr(x86_saved_state_t *regs);
88 static uint64_t maxDec; /* longest interval our hardware timer can handle (nsec) */
89
90 /* XXX this should really be in a header somewhere */
91 extern clock_timer_func_t rtclock_timer_expire;
92
93 static void rtc_set_timescale(uint64_t cycles);
94 static uint64_t rtc_export_speed(uint64_t cycles);
95
96 extern void _rtc_nanotime_store(
97 uint64_t tsc,
98 uint64_t nsec,
99 uint32_t scale,
100 uint32_t shift,
101 rtc_nanotime_t *dst);
102
103 extern uint64_t _rtc_nanotime_read(
104 rtc_nanotime_t *rntp,
105 int slow );
106
107 rtc_nanotime_t rtc_nanotime_info = {0,0,0,0,1,0};
108
109
110 static uint32_t
111 deadline_to_decrementer(
112 uint64_t deadline,
113 uint64_t now)
114 {
115 uint64_t delta;
116
117 if (deadline <= now)
118 return rtc_decrementer_min;
119 else {
120 delta = deadline - now;
121 return MIN(MAX(rtc_decrementer_min,delta),maxDec);
122 }
123 }
124
125 void
126 rtc_lapic_start_ticking(void)
127 {
128 x86_lcpu_t *lcpu = x86_lcpu();
129
130 /*
131 * Force a complete re-evaluation of timer deadlines.
132 */
133 lcpu->rtcPop = EndOfAllTime;
134 etimer_resync_deadlines();
135 }
136
137 /*
138 * Configure the real-time clock device. Return success (1)
139 * or failure (0).
140 */
141
142 int
143 rtclock_config(void)
144 {
145 /* nothing to do */
146 return (1);
147 }
148
149
150 /*
151 * Nanotime/mach_absolutime_time
152 * -----------------------------
153 * The timestamp counter (TSC) - which counts cpu clock cycles and can be read
154 * efficiently by the kernel and in userspace - is the reference for all timing.
155 * The cpu clock rate is platform-dependent and may stop or be reset when the
156 * processor is napped/slept. As a result, nanotime is the software abstraction
157 * used to maintain a monotonic clock, adjusted from an outside reference as needed.
158 *
159 * The kernel maintains nanotime information recording:
160 * - the ratio of tsc to nanoseconds
161 * with this ratio expressed as a 32-bit scale and shift
162 * (power of 2 divider);
163 * - { tsc_base, ns_base } pair of corresponding timestamps.
164 *
165 * The tuple {tsc_base, ns_base, scale, shift} is exported in the commpage
166 * for the userspace nanotime routine to read.
167 *
168 * All of the routines which update the nanotime data are non-reentrant. This must
169 * be guaranteed by the caller.
170 */
171 static inline void
172 rtc_nanotime_set_commpage(rtc_nanotime_t *rntp)
173 {
174 commpage_set_nanotime(rntp->tsc_base, rntp->ns_base, rntp->scale, rntp->shift);
175 }
176
177 /*
178 * rtc_nanotime_init:
179 *
180 * Intialize the nanotime info from the base time.
181 */
182 static inline void
183 _rtc_nanotime_init(rtc_nanotime_t *rntp, uint64_t base)
184 {
185 uint64_t tsc = rdtsc64();
186
187 _rtc_nanotime_store(tsc, base, rntp->scale, rntp->shift, rntp);
188 }
189
190 static void
191 rtc_nanotime_init(uint64_t base)
192 {
193 rtc_nanotime_t *rntp = &rtc_nanotime_info;
194
195 _rtc_nanotime_init(rntp, base);
196 rtc_nanotime_set_commpage(rntp);
197 }
198
199 /*
200 * rtc_nanotime_init_commpage:
201 *
202 * Call back from the commpage initialization to
203 * cause the commpage data to be filled in once the
204 * commpages have been created.
205 */
206 void
207 rtc_nanotime_init_commpage(void)
208 {
209 spl_t s = splclock();
210
211 rtc_nanotime_set_commpage(&rtc_nanotime_info);
212
213 splx(s);
214 }
215
216 /*
217 * rtc_nanotime_read:
218 *
219 * Returns the current nanotime value, accessable from any
220 * context.
221 */
222 static inline uint64_t
223 rtc_nanotime_read(void)
224 {
225
226 #if CONFIG_EMBEDDED
227 if (gPEClockFrequencyInfo.timebase_frequency_hz > SLOW_TSC_THRESHOLD)
228 return _rtc_nanotime_read( &rtc_nanotime_info, 1 ); /* slow processor */
229 else
230 #endif
231 return _rtc_nanotime_read( &rtc_nanotime_info, 0 ); /* assume fast processor */
232 }
233
234 /*
235 * rtc_clock_napped:
236 *
237 * Invoked from power manangement when we have awoken from a nap (C3/C4)
238 * during which the TSC lost counts. The nanotime data is updated according
239 * to the provided value which indicates the number of nanoseconds that the
240 * TSC was not counting.
241 *
242 * The caller must guarantee non-reentrancy.
243 */
244 void
245 rtc_clock_napped(
246 uint64_t delta)
247 {
248 rtc_nanotime_t *rntp = &rtc_nanotime_info;
249 uint32_t generation;
250
251 assert(!ml_get_interrupts_enabled());
252 generation = rntp->generation;
253 rntp->generation = 0;
254 rntp->ns_base += delta;
255 rntp->generation = ((generation + 1) != 0) ? (generation + 1) : 1;
256 rtc_nanotime_set_commpage(rntp);
257 }
258
259 void
260 rtc_clock_stepping(__unused uint32_t new_frequency,
261 __unused uint32_t old_frequency)
262 {
263 panic("rtc_clock_stepping unsupported");
264 }
265
266 void
267 rtc_clock_stepped(__unused uint32_t new_frequency,
268 __unused uint32_t old_frequency)
269 {
270 panic("rtc_clock_stepped unsupported");
271 }
272
273 /*
274 * rtc_sleep_wakeup:
275 *
276 * Invoked from power manageent when we have awoken from a sleep (S3)
277 * and the TSC has been reset. The nanotime data is updated based on
278 * the passed in value.
279 *
280 * The caller must guarantee non-reentrancy.
281 */
282 void
283 rtc_sleep_wakeup(
284 uint64_t base)
285 {
286 /*
287 * Reset nanotime.
288 * The timestamp counter will have been reset
289 * but nanotime (uptime) marches onward.
290 */
291 rtc_nanotime_init(base);
292 }
293
294 /*
295 * Initialize the real-time clock device.
296 * In addition, various variables used to support the clock are initialized.
297 */
298 int
299 rtclock_init(void)
300 {
301 uint64_t cycles;
302
303 assert(!ml_get_interrupts_enabled());
304
305 if (cpu_number() == master_cpu) {
306
307 assert(tscFreq);
308 rtc_set_timescale(tscFreq);
309
310 /*
311 * Adjust and set the exported cpu speed.
312 */
313 cycles = rtc_export_speed(tscFreq);
314
315 /*
316 * Set min/max to actual.
317 * ACPI may update these later if speed-stepping is detected.
318 */
319 gPEClockFrequencyInfo.cpu_frequency_min_hz = cycles;
320 gPEClockFrequencyInfo.cpu_frequency_max_hz = cycles;
321
322 /*
323 * Compute the longest interval we can represent.
324 */
325 maxDec = tmrCvt(0x7fffffffULL, busFCvtt2n);
326 kprintf("maxDec: %lld\n", maxDec);
327
328 /* Minimum interval is 1usec */
329 rtc_decrementer_min = deadline_to_decrementer(NSEC_PER_USEC, 0ULL);
330 /* Point LAPIC interrupts to hardclock() */
331 lapic_set_timer_func((i386_intr_func_t) rtclock_intr);
332
333 clock_timebase_init();
334 ml_init_lock_timeout();
335 }
336
337 rtc_lapic_start_ticking();
338
339 return (1);
340 }
341
342 // utility routine
343 // Code to calculate how many processor cycles are in a second...
344
345 static void
346 rtc_set_timescale(uint64_t cycles)
347 {
348 rtc_nanotime_info.scale = ((uint64_t)NSEC_PER_SEC << 32) / cycles;
349
350 if (cycles <= SLOW_TSC_THRESHOLD)
351 rtc_nanotime_info.shift = cycles;
352 else
353 rtc_nanotime_info.shift = 32;
354
355 rtc_nanotime_init(0);
356 }
357
358 static uint64_t
359 rtc_export_speed(uint64_t cyc_per_sec)
360 {
361 uint64_t cycles;
362
363 /* Round: */
364 cycles = ((cyc_per_sec + (UI_CPUFREQ_ROUNDING_FACTOR/2))
365 / UI_CPUFREQ_ROUNDING_FACTOR)
366 * UI_CPUFREQ_ROUNDING_FACTOR;
367
368 /*
369 * Set current measured speed.
370 */
371 if (cycles >= 0x100000000ULL) {
372 gPEClockFrequencyInfo.cpu_clock_rate_hz = 0xFFFFFFFFUL;
373 } else {
374 gPEClockFrequencyInfo.cpu_clock_rate_hz = (unsigned long)cycles;
375 }
376 gPEClockFrequencyInfo.cpu_frequency_hz = cycles;
377
378 kprintf("[RTCLOCK] frequency %llu (%llu)\n", cycles, cyc_per_sec);
379 return(cycles);
380 }
381
382 void
383 clock_get_system_microtime(
384 uint32_t *secs,
385 uint32_t *microsecs)
386 {
387 uint64_t now = rtc_nanotime_read();
388 uint32_t remain;
389
390 asm volatile(
391 "divl %3"
392 : "=a" (*secs), "=d" (remain)
393 : "A" (now), "r" (NSEC_PER_SEC));
394 asm volatile(
395 "divl %3"
396 : "=a" (*microsecs)
397 : "" (remain), "d" (0), "r" (NSEC_PER_USEC));
398 }
399
400 void
401 clock_get_system_nanotime(
402 uint32_t *secs,
403 uint32_t *nanosecs)
404 {
405 uint64_t now = rtc_nanotime_read();
406
407 asm volatile(
408 "divl %3"
409 : "=a" (*secs), "=d" (*nanosecs)
410 : "A" (now), "r" (NSEC_PER_SEC));
411 }
412
413 void
414 clock_gettimeofday_set_commpage(
415 uint64_t abstime,
416 uint64_t epoch,
417 uint64_t offset,
418 uint32_t *secs,
419 uint32_t *microsecs)
420 {
421 uint64_t now = abstime;
422 uint32_t remain;
423
424 now += offset;
425
426 asm volatile(
427 "divl %3"
428 : "=a" (*secs), "=d" (remain)
429 : "A" (now), "r" (NSEC_PER_SEC));
430 asm volatile(
431 "divl %3"
432 : "=a" (*microsecs)
433 : "" (remain), "d" (0), "r" (NSEC_PER_USEC));
434
435 *secs += epoch;
436
437 commpage_set_timestamp(abstime - remain, *secs);
438 }
439
440 void
441 clock_timebase_info(
442 mach_timebase_info_t info)
443 {
444 info->numer = info->denom = 1;
445 }
446
447 void
448 clock_set_timer_func(
449 clock_timer_func_t func)
450 {
451 if (rtclock_timer_expire == NULL)
452 rtclock_timer_expire = func;
453 }
454
455 /*
456 * Real-time clock device interrupt.
457 */
458 void
459 rtclock_intr(
460 x86_saved_state_t *tregs)
461 {
462 uint64_t rip;
463 boolean_t user_mode = FALSE;
464 uint64_t abstime;
465 uint32_t latency;
466 x86_lcpu_t *lcpu = x86_lcpu();
467
468 assert(get_preemption_level() > 0);
469 assert(!ml_get_interrupts_enabled());
470
471 abstime = rtc_nanotime_read();
472 latency = (uint32_t)(abstime - lcpu->rtcDeadline);
473 if (abstime < lcpu->rtcDeadline)
474 latency = 1;
475
476 if (is_saved_state64(tregs) == TRUE) {
477 x86_saved_state64_t *regs;
478
479 regs = saved_state64(tregs);
480
481 user_mode = TRUE;
482 rip = regs->isf.rip;
483 } else {
484 x86_saved_state32_t *regs;
485
486 regs = saved_state32(tregs);
487
488 if (regs->cs & 0x03)
489 user_mode = TRUE;
490 rip = regs->eip;
491 }
492
493 /* Log the interrupt service latency (-ve value expected by tool) */
494 KERNEL_DEBUG_CONSTANT(
495 MACHDBG_CODE(DBG_MACH_EXCP_DECI, 0) | DBG_FUNC_NONE,
496 -latency, (uint32_t)rip, user_mode, 0, 0);
497
498 /* call the generic etimer */
499 etimer_intr(user_mode, rip);
500 }
501
502 /*
503 * Request timer pop from the hardware
504 */
505
506 int
507 setPop(
508 uint64_t time)
509 {
510 uint64_t now;
511 uint32_t decr;
512 uint64_t count;
513
514 now = rtc_nanotime_read(); /* The time in nanoseconds */
515 decr = deadline_to_decrementer(time, now);
516
517 count = tmrCvt(decr, busFCvtn2t);
518 lapic_set_timer(TRUE, one_shot, divide_by_1, (uint32_t) count);
519
520 return decr; /* Pass back what we set */
521 }
522
523
524 uint64_t
525 mach_absolute_time(void)
526 {
527 return rtc_nanotime_read();
528 }
529
530 void
531 clock_interval_to_absolutetime_interval(
532 uint32_t interval,
533 uint32_t scale_factor,
534 uint64_t *result)
535 {
536 *result = (uint64_t)interval * scale_factor;
537 }
538
539 void
540 absolutetime_to_microtime(
541 uint64_t abstime,
542 uint32_t *secs,
543 uint32_t *microsecs)
544 {
545 uint32_t remain;
546
547 asm volatile(
548 "divl %3"
549 : "=a" (*secs), "=d" (remain)
550 : "A" (abstime), "r" (NSEC_PER_SEC));
551 asm volatile(
552 "divl %3"
553 : "=a" (*microsecs)
554 : "" (remain), "d" (0), "r" (NSEC_PER_USEC));
555 }
556
557 void
558 absolutetime_to_nanotime(
559 uint64_t abstime,
560 uint32_t *secs,
561 uint32_t *nanosecs)
562 {
563 asm volatile(
564 "divl %3"
565 : "=a" (*secs), "=d" (*nanosecs)
566 : "A" (abstime), "r" (NSEC_PER_SEC));
567 }
568
569 void
570 nanotime_to_absolutetime(
571 uint32_t secs,
572 uint32_t nanosecs,
573 uint64_t *result)
574 {
575 *result = ((uint64_t)secs * NSEC_PER_SEC) + nanosecs;
576 }
577
578 void
579 absolutetime_to_nanoseconds(
580 uint64_t abstime,
581 uint64_t *result)
582 {
583 *result = abstime;
584 }
585
586 void
587 nanoseconds_to_absolutetime(
588 uint64_t nanoseconds,
589 uint64_t *result)
590 {
591 *result = nanoseconds;
592 }
593
594 void
595 machine_delay_until(
596 uint64_t deadline)
597 {
598 uint64_t now;
599
600 do {
601 cpu_pause();
602 now = mach_absolute_time();
603 } while (now < deadline);
604 }
Cache object: d2799e711fb23e22b6aa4a89334ddbde
|