The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/osfmk/i386/rtclock.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*
    2  * Copyright (c) 2000-2010 Apple Inc. All rights reserved.
    3  *
    4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
    5  * 
    6  * This file contains Original Code and/or Modifications of Original Code
    7  * as defined in and that are subject to the Apple Public Source License
    8  * Version 2.0 (the 'License'). You may not use this file except in
    9  * compliance with the License. The rights granted to you under the License
   10  * may not be used to create, or enable the creation or redistribution of,
   11  * unlawful or unlicensed copies of an Apple operating system, or to
   12  * circumvent, violate, or enable the circumvention or violation of, any
   13  * terms of an Apple operating system software license agreement.
   14  * 
   15  * Please obtain a copy of the License at
   16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
   17  * 
   18  * The Original Code and all software distributed under the License are
   19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
   20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
   21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
   22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
   23  * Please see the License for the specific language governing rights and
   24  * limitations under the License.
   25  * 
   26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
   27  */
   28 /*
   29  * @OSF_COPYRIGHT@
   30  */
   31 
   32 /*
   33  *      File:           i386/rtclock.c
   34  *      Purpose:        Routines for handling the machine dependent
   35  *                      real-time clock. Historically, this clock is
   36  *                      generated by the Intel 8254 Programmable Interval
   37  *                      Timer, but local apic timers are now used for
   38  *                      this purpose with the master time reference being
   39  *                      the cpu clock counted by the timestamp MSR.
   40  */
   41 
   42 #include <platforms.h>
   43 #include <mach_kdb.h>
   44 
   45 #include <mach/mach_types.h>
   46 
   47 #include <kern/cpu_data.h>
   48 #include <kern/cpu_number.h>
   49 #include <kern/clock.h>
   50 #include <kern/host_notify.h>
   51 #include <kern/macro_help.h>
   52 #include <kern/misc_protos.h>
   53 #include <kern/spl.h>
   54 #include <kern/assert.h>
   55 #include <kern/etimer.h>
   56 #include <mach/vm_prot.h>
   57 #include <vm/pmap.h>
   58 #include <vm/vm_kern.h>         /* for kernel_map */
   59 #include <architecture/i386/pio.h>
   60 #include <i386/machine_cpu.h>
   61 #include <i386/cpuid.h>
   62 #include <i386/cpu_threads.h>
   63 #include <i386/mp.h>
   64 #include <i386/machine_routines.h>
   65 #include <i386/pal_routines.h>
   66 #include <i386/proc_reg.h>
   67 #include <i386/misc_protos.h>
   68 #include <pexpert/pexpert.h>
   69 #include <machine/limits.h>
   70 #include <machine/commpage.h>
   71 #include <sys/kdebug.h>
   72 #include <i386/tsc.h>
   73 #include <i386/rtclock_protos.h>
   74 
   75 #define UI_CPUFREQ_ROUNDING_FACTOR      10000000
   76 
   77 int             rtclock_config(void);
   78 
   79 int             rtclock_init(void);
   80 
   81 uint64_t        tsc_rebase_abs_time = 0;
   82 
   83 static void     rtc_set_timescale(uint64_t cycles);
   84 static uint64_t rtc_export_speed(uint64_t cycles);
   85 
   86 void
   87 rtc_timer_start(void)
   88 {
   89         /*
   90          * Force a complete re-evaluation of timer deadlines.
   91          */
   92         etimer_resync_deadlines();
   93 }
   94 
   95 /*
   96  * tsc_to_nanoseconds:
   97  *
   98  * Basic routine to convert a raw 64 bit TSC value to a
   99  * 64 bit nanosecond value.  The conversion is implemented
  100  * based on the scale factor and an implicit 32 bit shift.
  101  */
  102 static inline uint64_t
  103 _tsc_to_nanoseconds(uint64_t value)
  104 {
  105 #if defined(__i386__)
  106     asm volatile("movl  %%edx,%%esi     ;"
  107                  "mull  %%ecx           ;"
  108                  "movl  %%edx,%%edi     ;"
  109                  "movl  %%esi,%%eax     ;"
  110                  "mull  %%ecx           ;"
  111                  "addl  %%edi,%%eax     ;"      
  112                  "adcl  $0,%%edx         "
  113                  : "+A" (value)
  114                  : "c" (pal_rtc_nanotime_info.scale)
  115                  : "esi", "edi");
  116 #elif defined(__x86_64__)
  117     asm volatile("mul %%rcx;"
  118                  "shrq $32, %%rax;"
  119                  "shlq $32, %%rdx;"
  120                  "orq %%rdx, %%rax;"
  121                  : "=a"(value)
  122                  : "a"(value), "c"(pal_rtc_nanotime_info.scale)
  123                  : "rdx", "cc" );
  124 #else
  125 #error Unsupported architecture
  126 #endif
  127 
  128     return (value);
  129 }
  130 
  131 static inline uint32_t
  132 _absolutetime_to_microtime(uint64_t abstime, clock_sec_t *secs, clock_usec_t *microsecs)
  133 {
  134         uint32_t remain;
  135 #if defined(__i386__)
  136         asm volatile(
  137                         "divl %3"
  138                                 : "=a" (*secs), "=d" (remain)
  139                                 : "A" (abstime), "r" (NSEC_PER_SEC));
  140         asm volatile(
  141                         "divl %3"
  142                                 : "=a" (*microsecs)
  143                                 : "" (remain), "d" (0), "r" (NSEC_PER_USEC));
  144 #elif defined(__x86_64__)
  145         *secs = abstime / (uint64_t)NSEC_PER_SEC;
  146         remain = (uint32_t)(abstime % (uint64_t)NSEC_PER_SEC);
  147         *microsecs = remain / NSEC_PER_USEC;
  148 #else
  149 #error Unsupported architecture
  150 #endif
  151         return remain;
  152 }
  153 
  154 static inline void
  155 _absolutetime_to_nanotime(uint64_t abstime, clock_sec_t *secs, clock_usec_t *nanosecs)
  156 {
  157 #if defined(__i386__)
  158         asm volatile(
  159                         "divl %3"
  160                         : "=a" (*secs), "=d" (*nanosecs)
  161                         : "A" (abstime), "r" (NSEC_PER_SEC));
  162 #elif defined(__x86_64__)
  163         *secs = abstime / (uint64_t)NSEC_PER_SEC;
  164         *nanosecs = (clock_usec_t)(abstime % (uint64_t)NSEC_PER_SEC);
  165 #else
  166 #error Unsupported architecture
  167 #endif
  168 }
  169 
  170 /*
  171  * Configure the real-time clock device. Return success (1)
  172  * or failure (0).
  173  */
  174 
  175 int
  176 rtclock_config(void)
  177 {
  178         /* nothing to do */
  179         return (1);
  180 }
  181 
  182 
  183 /*
  184  * Nanotime/mach_absolutime_time
  185  * -----------------------------
  186  * The timestamp counter (TSC) - which counts cpu clock cycles and can be read
  187  * efficiently by the kernel and in userspace - is the reference for all timing.
  188  * The cpu clock rate is platform-dependent and may stop or be reset when the
  189  * processor is napped/slept.  As a result, nanotime is the software abstraction
  190  * used to maintain a monotonic clock, adjusted from an outside reference as needed.
  191  *
  192  * The kernel maintains nanotime information recording:
  193  *      - the ratio of tsc to nanoseconds
  194  *        with this ratio expressed as a 32-bit scale and shift
  195  *        (power of 2 divider);
  196  *      - { tsc_base, ns_base } pair of corresponding timestamps.
  197  *
  198  * The tuple {tsc_base, ns_base, scale, shift} is exported in the commpage 
  199  * for the userspace nanotime routine to read.
  200  *
  201  * All of the routines which update the nanotime data are non-reentrant.  This must
  202  * be guaranteed by the caller.
  203  */
  204 static inline void
  205 rtc_nanotime_set_commpage(pal_rtc_nanotime_t *rntp)
  206 {
  207         commpage_set_nanotime(rntp->tsc_base, rntp->ns_base, rntp->scale, rntp->shift);
  208 }
  209 
  210 /*
  211  * rtc_nanotime_init:
  212  *
  213  * Intialize the nanotime info from the base time.
  214  */
  215 static inline void
  216 _rtc_nanotime_init(pal_rtc_nanotime_t *rntp, uint64_t base)
  217 {
  218         uint64_t        tsc = rdtsc64();
  219 
  220         _pal_rtc_nanotime_store(tsc, base, rntp->scale, rntp->shift, rntp);
  221 }
  222 
  223 static void
  224 rtc_nanotime_init(uint64_t base)
  225 {
  226         _rtc_nanotime_init(&pal_rtc_nanotime_info, base);
  227         rtc_nanotime_set_commpage(&pal_rtc_nanotime_info);
  228 }
  229 
  230 /*
  231  * rtc_nanotime_init_commpage:
  232  *
  233  * Call back from the commpage initialization to
  234  * cause the commpage data to be filled in once the
  235  * commpages have been created.
  236  */
  237 void
  238 rtc_nanotime_init_commpage(void)
  239 {
  240         spl_t                   s = splclock();
  241 
  242         rtc_nanotime_set_commpage(&pal_rtc_nanotime_info);
  243         splx(s);
  244 }
  245 
  246 /*
  247  * rtc_nanotime_read:
  248  *
  249  * Returns the current nanotime value, accessable from any
  250  * context.
  251  */
  252 static inline uint64_t
  253 rtc_nanotime_read(void)
  254 {
  255         
  256 #if CONFIG_EMBEDDED
  257         if (gPEClockFrequencyInfo.timebase_frequency_hz > SLOW_TSC_THRESHOLD)
  258                 return  _rtc_nanotime_read(&rtc_nanotime_info, 1);      /* slow processor */
  259         else
  260 #endif
  261         return  _rtc_nanotime_read(&pal_rtc_nanotime_info, 0);  /* assume fast processor */
  262 }
  263 
  264 /*
  265  * rtc_clock_napped:
  266  *
  267  * Invoked from power management when we exit from a low C-State (>= C4)
  268  * and the TSC has stopped counting.  The nanotime data is updated according
  269  * to the provided value which represents the new value for nanotime.
  270  */
  271 void
  272 rtc_clock_napped(uint64_t base, uint64_t tsc_base)
  273 {
  274         pal_rtc_nanotime_t      *rntp = &pal_rtc_nanotime_info;
  275         uint64_t        oldnsecs;
  276         uint64_t        newnsecs;
  277         uint64_t        tsc;
  278 
  279         assert(!ml_get_interrupts_enabled());
  280         tsc = rdtsc64();
  281         oldnsecs = rntp->ns_base + _tsc_to_nanoseconds(tsc - rntp->tsc_base);
  282         newnsecs = base + _tsc_to_nanoseconds(tsc - tsc_base);
  283         
  284         /*
  285          * Only update the base values if time using the new base values
  286          * is later than the time using the old base values.
  287          */
  288         if (oldnsecs < newnsecs) {
  289             _pal_rtc_nanotime_store(tsc_base, base, rntp->scale, rntp->shift, rntp);
  290             rtc_nanotime_set_commpage(rntp);
  291                 trace_set_timebases(tsc_base, base);
  292         }
  293 }
  294 
  295 /*
  296  * Invoked from power management to correct the SFLM TSC entry drift problem:
  297  * a small delta is added to the tsc_base.  This is equivalent to nudgin time
  298  * backwards.  We require this to be on the order of a TSC quantum which won't
  299  * cause callers of mach_absolute_time() to see time going backwards!
  300  */
  301 void
  302 rtc_clock_adjust(uint64_t tsc_base_delta)
  303 {
  304     pal_rtc_nanotime_t  *rntp = &pal_rtc_nanotime_info;
  305 
  306     assert(!ml_get_interrupts_enabled());
  307     assert(tsc_base_delta < 100ULL);    /* i.e. it's small */
  308     _rtc_nanotime_adjust(tsc_base_delta, rntp);
  309     rtc_nanotime_set_commpage(rntp);
  310 }
  311 
  312 void
  313 rtc_clock_stepping(__unused uint32_t new_frequency,
  314                    __unused uint32_t old_frequency)
  315 {
  316         panic("rtc_clock_stepping unsupported");
  317 }
  318 
  319 void
  320 rtc_clock_stepped(__unused uint32_t new_frequency,
  321                   __unused uint32_t old_frequency)
  322 {
  323         panic("rtc_clock_stepped unsupported");
  324 }
  325 
  326 /*
  327  * rtc_sleep_wakeup:
  328  *
  329  * Invoked from power management when we have awoken from a sleep (S3)
  330  * and the TSC has been reset.  The nanotime data is updated based on
  331  * the passed in value.
  332  *
  333  * The caller must guarantee non-reentrancy.
  334  */
  335 void
  336 rtc_sleep_wakeup(
  337         uint64_t                base)
  338 {
  339         /* Set fixed configuration for lapic timers */
  340         rtc_timer->config();
  341 
  342         /*
  343          * Reset nanotime.
  344          * The timestamp counter will have been reset
  345          * but nanotime (uptime) marches onward.
  346          */
  347         rtc_nanotime_init(base);
  348 }
  349 
  350 /*
  351  * Initialize the real-time clock device.
  352  * In addition, various variables used to support the clock are initialized.
  353  */
  354 int
  355 rtclock_init(void)
  356 {
  357         uint64_t        cycles;
  358 
  359         assert(!ml_get_interrupts_enabled());
  360 
  361         if (cpu_number() == master_cpu) {
  362 
  363                 assert(tscFreq);
  364                 rtc_set_timescale(tscFreq);
  365 
  366                 /*
  367                  * Adjust and set the exported cpu speed.
  368                  */
  369                 cycles = rtc_export_speed(tscFreq);
  370 
  371                 /*
  372                  * Set min/max to actual.
  373                  * ACPI may update these later if speed-stepping is detected.
  374                  */
  375                 gPEClockFrequencyInfo.cpu_frequency_min_hz = cycles;
  376                 gPEClockFrequencyInfo.cpu_frequency_max_hz = cycles;
  377 
  378                 rtc_timer_init();
  379                 clock_timebase_init();
  380                 ml_init_lock_timeout();
  381         }
  382 
  383         /* Set fixed configuration for lapic timers */
  384         rtc_timer->config();
  385         rtc_timer_start();
  386 
  387         return (1);
  388 }
  389 
  390 // utility routine 
  391 // Code to calculate how many processor cycles are in a second...
  392 
  393 static void
  394 rtc_set_timescale(uint64_t cycles)
  395 {
  396         pal_rtc_nanotime_t      *rntp = &pal_rtc_nanotime_info;
  397         rntp->scale = (uint32_t)(((uint64_t)NSEC_PER_SEC << 32) / cycles);
  398 
  399 #if CONFIG_EMBEDDED
  400         if (cycles <= SLOW_TSC_THRESHOLD)
  401                 rntp->shift = (uint32_t)cycles;
  402         else
  403 #endif
  404                 rntp->shift = 32;
  405 
  406         if (tsc_rebase_abs_time == 0)
  407                 tsc_rebase_abs_time = mach_absolute_time();
  408 
  409         rtc_nanotime_init(0);
  410 }
  411 
  412 static uint64_t
  413 rtc_export_speed(uint64_t cyc_per_sec)
  414 {
  415         uint64_t        cycles;
  416 
  417         /* Round: */
  418         cycles = ((cyc_per_sec + (UI_CPUFREQ_ROUNDING_FACTOR/2))
  419                         / UI_CPUFREQ_ROUNDING_FACTOR)
  420                                 * UI_CPUFREQ_ROUNDING_FACTOR;
  421 
  422         /*
  423          * Set current measured speed.
  424          */
  425         if (cycles >= 0x100000000ULL) {
  426             gPEClockFrequencyInfo.cpu_clock_rate_hz = 0xFFFFFFFFUL;
  427         } else {
  428             gPEClockFrequencyInfo.cpu_clock_rate_hz = (unsigned long)cycles;
  429         }
  430         gPEClockFrequencyInfo.cpu_frequency_hz = cycles;
  431 
  432         kprintf("[RTCLOCK] frequency %llu (%llu)\n", cycles, cyc_per_sec);
  433         return(cycles);
  434 }
  435 
  436 void
  437 clock_get_system_microtime(
  438         clock_sec_t                     *secs,
  439         clock_usec_t            *microsecs)
  440 {
  441         uint64_t        now = rtc_nanotime_read();
  442 
  443         _absolutetime_to_microtime(now, secs, microsecs);
  444 }
  445 
  446 void
  447 clock_get_system_nanotime(
  448         clock_sec_t                     *secs,
  449         clock_nsec_t            *nanosecs)
  450 {
  451         uint64_t        now = rtc_nanotime_read();
  452 
  453         _absolutetime_to_nanotime(now, secs, nanosecs);
  454 }
  455 
  456 void
  457 clock_gettimeofday_set_commpage(
  458         uint64_t                                abstime,
  459         uint64_t                                epoch,
  460         uint64_t                                offset,
  461         clock_sec_t                             *secs,
  462         clock_usec_t                    *microsecs)
  463 {
  464         uint64_t        now = abstime + offset;
  465         uint32_t        remain;
  466 
  467         remain = _absolutetime_to_microtime(now, secs, microsecs);
  468 
  469         *secs += (clock_sec_t)epoch;
  470 
  471         commpage_set_timestamp(abstime - remain, *secs);
  472 }
  473 
  474 void
  475 clock_timebase_info(
  476         mach_timebase_info_t    info)
  477 {
  478         info->numer = info->denom =  1;
  479 }       
  480 
  481 /*
  482  * Real-time clock device interrupt.
  483  */
  484 void
  485 rtclock_intr(
  486         x86_saved_state_t       *tregs)
  487 {
  488         uint64_t        rip;
  489         boolean_t       user_mode = FALSE;
  490 
  491         assert(get_preemption_level() > 0);
  492         assert(!ml_get_interrupts_enabled());
  493 
  494         if (is_saved_state64(tregs) == TRUE) {
  495                 x86_saved_state64_t     *regs;
  496                   
  497                 regs = saved_state64(tregs);
  498 
  499                 if (regs->isf.cs & 0x03)
  500                         user_mode = TRUE;
  501                 rip = regs->isf.rip;
  502         } else {
  503                 x86_saved_state32_t     *regs;
  504 
  505                 regs = saved_state32(tregs);
  506 
  507                 if (regs->cs & 0x03)
  508                         user_mode = TRUE;
  509                 rip = regs->eip;
  510         }
  511 
  512         /* call the generic etimer */
  513         etimer_intr(user_mode, rip);
  514 }
  515 
  516 
  517 /*
  518  *      Request timer pop from the hardware 
  519  */
  520 
  521 uint64_t
  522 setPop(
  523         uint64_t time)
  524 {
  525         uint64_t        now;
  526         uint64_t        pop;
  527 
  528         /* 0 and EndOfAllTime are special-cases for "clear the timer" */
  529         if (time == 0 || time == EndOfAllTime ) {
  530                 time = EndOfAllTime;
  531                 now = 0;
  532                 pop = rtc_timer->set(0, 0);
  533         } else {
  534                 now = rtc_nanotime_read();      /* The time in nanoseconds */
  535                 pop = rtc_timer->set(time, now);
  536         }
  537 
  538         /* Record requested and actual deadlines set */
  539         x86_lcpu()->rtcDeadline = time;
  540         x86_lcpu()->rtcPop      = pop;
  541 
  542         return pop - now;
  543 }
  544 
  545 uint64_t
  546 mach_absolute_time(void)
  547 {
  548         return rtc_nanotime_read();
  549 }
  550 
  551 void
  552 clock_interval_to_absolutetime_interval(
  553         uint32_t                interval,
  554         uint32_t                scale_factor,
  555         uint64_t                *result)
  556 {
  557         *result = (uint64_t)interval * scale_factor;
  558 }
  559 
  560 void
  561 absolutetime_to_microtime(
  562         uint64_t                        abstime,
  563         clock_sec_t                     *secs,
  564         clock_usec_t            *microsecs)
  565 {
  566         _absolutetime_to_microtime(abstime, secs, microsecs);
  567 }
  568 
  569 void
  570 absolutetime_to_nanotime(
  571         uint64_t                        abstime,
  572         clock_sec_t                     *secs,
  573         clock_nsec_t            *nanosecs)
  574 {
  575         _absolutetime_to_nanotime(abstime, secs, nanosecs);
  576 }
  577 
  578 void
  579 nanotime_to_absolutetime(
  580         clock_sec_t                     secs,
  581         clock_nsec_t            nanosecs,
  582         uint64_t                        *result)
  583 {
  584         *result = ((uint64_t)secs * NSEC_PER_SEC) + nanosecs;
  585 }
  586 
  587 void
  588 absolutetime_to_nanoseconds(
  589         uint64_t                abstime,
  590         uint64_t                *result)
  591 {
  592         *result = abstime;
  593 }
  594 
  595 void
  596 nanoseconds_to_absolutetime(
  597         uint64_t                nanoseconds,
  598         uint64_t                *result)
  599 {
  600         *result = nanoseconds;
  601 }
  602 
  603 void
  604 machine_delay_until(
  605         uint64_t                deadline)
  606 {
  607         uint64_t                now;
  608 
  609         do {
  610                 cpu_pause();
  611                 now = mach_absolute_time();
  612         } while (now < deadline);
  613 }

Cache object: 4e3c3defb6882ebcc01a976abd3738bc


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.