The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/osfmk/i386/rtclock.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*
    2  * Copyright (c) 2000-2009 Apple Inc. All rights reserved.
    3  *
    4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
    5  * 
    6  * This file contains Original Code and/or Modifications of Original Code
    7  * as defined in and that are subject to the Apple Public Source License
    8  * Version 2.0 (the 'License'). You may not use this file except in
    9  * compliance with the License. The rights granted to you under the License
   10  * may not be used to create, or enable the creation or redistribution of,
   11  * unlawful or unlicensed copies of an Apple operating system, or to
   12  * circumvent, violate, or enable the circumvention or violation of, any
   13  * terms of an Apple operating system software license agreement.
   14  * 
   15  * Please obtain a copy of the License at
   16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
   17  * 
   18  * The Original Code and all software distributed under the License are
   19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
   20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
   21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
   22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
   23  * Please see the License for the specific language governing rights and
   24  * limitations under the License.
   25  * 
   26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
   27  */
   28 /*
   29  * @OSF_COPYRIGHT@
   30  */
   31 
   32 /*
   33  *      File:           i386/rtclock.c
   34  *      Purpose:        Routines for handling the machine dependent
   35  *                      real-time clock. Historically, this clock is
   36  *                      generated by the Intel 8254 Programmable Interval
   37  *                      Timer, but local apic timers are now used for
   38  *                      this purpose with the master time reference being
   39  *                      the cpu clock counted by the timestamp MSR.
   40  */
   41 
   42 #include <platforms.h>
   43 #include <mach_kdb.h>
   44 
   45 #include <mach/mach_types.h>
   46 
   47 #include <kern/cpu_data.h>
   48 #include <kern/cpu_number.h>
   49 #include <kern/clock.h>
   50 #include <kern/host_notify.h>
   51 #include <kern/macro_help.h>
   52 #include <kern/misc_protos.h>
   53 #include <kern/spl.h>
   54 #include <kern/assert.h>
   55 #include <mach/vm_prot.h>
   56 #include <vm/pmap.h>
   57 #include <vm/vm_kern.h>         /* for kernel_map */
   58 #include <i386/ipl.h>
   59 #include <architecture/i386/pio.h>
   60 #include <i386/machine_cpu.h>
   61 #include <i386/cpuid.h>
   62 #include <i386/cpu_threads.h>
   63 #include <i386/mp.h>
   64 #include <i386/machine_routines.h>
   65 #include <i386/proc_reg.h>
   66 #include <i386/misc_protos.h>
   67 #include <i386/lapic.h>
   68 #include <pexpert/pexpert.h>
   69 #include <machine/limits.h>
   70 #include <machine/commpage.h>
   71 #include <sys/kdebug.h>
   72 #include <i386/tsc.h>
   73 #include <i386/rtclock.h>
   74 
   75 #define NSEC_PER_HZ                     (NSEC_PER_SEC / 100) /* nsec per tick */
   76 
   77 #define UI_CPUFREQ_ROUNDING_FACTOR      10000000
   78 
   79 int             rtclock_config(void);
   80 
   81 int             rtclock_init(void);
   82 
   83 uint64_t        rtc_decrementer_min;
   84 
   85 uint64_t        tsc_rebase_abs_time = 0;
   86 
   87 void                    rtclock_intr(x86_saved_state_t *regs);
   88 static uint64_t         maxDec;                 /* longest interval our hardware timer can handle (nsec) */
   89 
   90 static void     rtc_set_timescale(uint64_t cycles);
   91 static uint64_t rtc_export_speed(uint64_t cycles);
   92 
   93 rtc_nanotime_t  rtc_nanotime_info = {0,0,0,0,1,0};
   94 
   95 /*
   96  * tsc_to_nanoseconds:
   97  *
   98  * Basic routine to convert a raw 64 bit TSC value to a
   99  * 64 bit nanosecond value.  The conversion is implemented
  100  * based on the scale factor and an implicit 32 bit shift.
  101  */
  102 static inline uint64_t
  103 _tsc_to_nanoseconds(uint64_t value)
  104 {
  105 #if defined(__i386__)
  106     asm volatile("movl  %%edx,%%esi     ;"
  107                  "mull  %%ecx           ;"
  108                  "movl  %%edx,%%edi     ;"
  109                  "movl  %%esi,%%eax     ;"
  110                  "mull  %%ecx           ;"
  111                  "addl  %%edi,%%eax     ;"      
  112                  "adcl  $0,%%edx         "
  113                  : "+A" (value)
  114                  : "c" (current_cpu_datap()->cpu_nanotime->scale)
  115                  : "esi", "edi");
  116 #elif defined(__x86_64__)
  117     asm volatile("mul %%rcx;"
  118                  "shrq $32, %%rax;"
  119                  "shlq $32, %%rdx;"
  120                  "orq %%rdx, %%rax;"
  121                  : "=a"(value)
  122                  : "a"(value), "c"(rtc_nanotime_info.scale)
  123                  : "rdx", "cc" );
  124 #else
  125 #error Unsupported architecture
  126 #endif
  127 
  128     return (value);
  129 }
  130 
  131 static inline uint32_t
  132 _absolutetime_to_microtime(uint64_t abstime, clock_sec_t *secs, clock_usec_t *microsecs)
  133 {
  134         uint32_t remain;
  135 #if defined(__i386__)
  136         asm volatile(
  137                         "divl %3"
  138                                 : "=a" (*secs), "=d" (remain)
  139                                 : "A" (abstime), "r" (NSEC_PER_SEC));
  140         asm volatile(
  141                         "divl %3"
  142                                 : "=a" (*microsecs)
  143                                 : "" (remain), "d" (0), "r" (NSEC_PER_USEC));
  144 #elif defined(__x86_64__)
  145         *secs = abstime / (uint64_t)NSEC_PER_SEC;
  146         remain = (uint32_t)(abstime % (uint64_t)NSEC_PER_SEC);
  147         *microsecs = remain / NSEC_PER_USEC;
  148 #else
  149 #error Unsupported architecture
  150 #endif
  151         return remain;
  152 }
  153 
  154 static inline void
  155 _absolutetime_to_nanotime(uint64_t abstime, clock_sec_t *secs, clock_usec_t *nanosecs)
  156 {
  157 #if defined(__i386__)
  158         asm volatile(
  159                         "divl %3"
  160                         : "=a" (*secs), "=d" (*nanosecs)
  161                         : "A" (abstime), "r" (NSEC_PER_SEC));
  162 #elif defined(__x86_64__)
  163         *secs = abstime / (uint64_t)NSEC_PER_SEC;
  164         *nanosecs = (clock_usec_t)(abstime % (uint64_t)NSEC_PER_SEC);
  165 #else
  166 #error Unsupported architecture
  167 #endif
  168 }
  169 
  170 static uint32_t
  171 deadline_to_decrementer(
  172         uint64_t        deadline,
  173         uint64_t        now)
  174 {
  175         uint64_t        delta;
  176 
  177         if (deadline <= now)
  178                 return (uint32_t)rtc_decrementer_min;
  179         else {
  180                 delta = deadline - now;
  181                 return (uint32_t)MIN(MAX(rtc_decrementer_min,delta),maxDec); 
  182         }
  183 }
  184 
  185 void
  186 rtc_lapic_start_ticking(void)
  187 {
  188         x86_lcpu_t      *lcpu = x86_lcpu();
  189 
  190         /*
  191          * Force a complete re-evaluation of timer deadlines.
  192          */
  193         lcpu->rtcPop = EndOfAllTime;
  194         etimer_resync_deadlines();
  195 }
  196 
  197 /*
  198  * Configure the real-time clock device. Return success (1)
  199  * or failure (0).
  200  */
  201 
  202 int
  203 rtclock_config(void)
  204 {
  205         /* nothing to do */
  206         return (1);
  207 }
  208 
  209 
  210 /*
  211  * Nanotime/mach_absolutime_time
  212  * -----------------------------
  213  * The timestamp counter (TSC) - which counts cpu clock cycles and can be read
  214  * efficiently by the kernel and in userspace - is the reference for all timing.
  215  * The cpu clock rate is platform-dependent and may stop or be reset when the
  216  * processor is napped/slept.  As a result, nanotime is the software abstraction
  217  * used to maintain a monotonic clock, adjusted from an outside reference as needed.
  218  *
  219  * The kernel maintains nanotime information recording:
  220  *      - the ratio of tsc to nanoseconds
  221  *        with this ratio expressed as a 32-bit scale and shift
  222  *        (power of 2 divider);
  223  *      - { tsc_base, ns_base } pair of corresponding timestamps.
  224  *
  225  * The tuple {tsc_base, ns_base, scale, shift} is exported in the commpage 
  226  * for the userspace nanotime routine to read.
  227  *
  228  * All of the routines which update the nanotime data are non-reentrant.  This must
  229  * be guaranteed by the caller.
  230  */
  231 static inline void
  232 rtc_nanotime_set_commpage(rtc_nanotime_t *rntp)
  233 {
  234         commpage_set_nanotime(rntp->tsc_base, rntp->ns_base, rntp->scale, rntp->shift);
  235 }
  236 
  237 /*
  238  * rtc_nanotime_init:
  239  *
  240  * Intialize the nanotime info from the base time.
  241  */
  242 static inline void
  243 _rtc_nanotime_init(rtc_nanotime_t *rntp, uint64_t base)
  244 {
  245         uint64_t        tsc = rdtsc64();
  246 
  247         _rtc_nanotime_store(tsc, base, rntp->scale, rntp->shift, rntp);
  248 }
  249 
  250 static void
  251 rtc_nanotime_init(uint64_t base)
  252 {
  253         rtc_nanotime_t  *rntp = current_cpu_datap()->cpu_nanotime;
  254 
  255         _rtc_nanotime_init(rntp, base);
  256         rtc_nanotime_set_commpage(rntp);
  257 }
  258 
  259 /*
  260  * rtc_nanotime_init_commpage:
  261  *
  262  * Call back from the commpage initialization to
  263  * cause the commpage data to be filled in once the
  264  * commpages have been created.
  265  */
  266 void
  267 rtc_nanotime_init_commpage(void)
  268 {
  269         spl_t                   s = splclock();
  270 
  271         rtc_nanotime_set_commpage(current_cpu_datap()->cpu_nanotime);
  272 
  273         splx(s);
  274 }
  275 
  276 /*
  277  * rtc_nanotime_read:
  278  *
  279  * Returns the current nanotime value, accessable from any
  280  * context.
  281  */
  282 static inline uint64_t
  283 rtc_nanotime_read(void)
  284 {
  285         
  286 #if CONFIG_EMBEDDED
  287         if (gPEClockFrequencyInfo.timebase_frequency_hz > SLOW_TSC_THRESHOLD)
  288                 return  _rtc_nanotime_read(current_cpu_datap()->cpu_nanotime, 1);       /* slow processor */
  289         else
  290 #endif
  291         return  _rtc_nanotime_read(current_cpu_datap()->cpu_nanotime, 0);       /* assume fast processor */
  292 }
  293 
  294 /*
  295  * rtc_clock_napped:
  296  *
  297  * Invoked from power management when we exit from a low C-State (>= C4)
  298  * and the TSC has stopped counting.  The nanotime data is updated according
  299  * to the provided value which represents the new value for nanotime.
  300  */
  301 void
  302 rtc_clock_napped(uint64_t base, uint64_t tsc_base)
  303 {
  304         rtc_nanotime_t  *rntp = current_cpu_datap()->cpu_nanotime;
  305         uint64_t        oldnsecs;
  306         uint64_t        newnsecs;
  307         uint64_t        tsc;
  308 
  309         assert(!ml_get_interrupts_enabled());
  310         tsc = rdtsc64();
  311         oldnsecs = rntp->ns_base + _tsc_to_nanoseconds(tsc - rntp->tsc_base);
  312         newnsecs = base + _tsc_to_nanoseconds(tsc - tsc_base);
  313         
  314         /*
  315          * Only update the base values if time using the new base values
  316          * is later than the time using the old base values.
  317          */
  318         if (oldnsecs < newnsecs) {
  319             _rtc_nanotime_store(tsc_base, base, rntp->scale, rntp->shift, rntp);
  320             rtc_nanotime_set_commpage(rntp);
  321         }
  322 }
  323 
  324 void
  325 rtc_clock_stepping(__unused uint32_t new_frequency,
  326                    __unused uint32_t old_frequency)
  327 {
  328         panic("rtc_clock_stepping unsupported");
  329 }
  330 
  331 void
  332 rtc_clock_stepped(__unused uint32_t new_frequency,
  333                   __unused uint32_t old_frequency)
  334 {
  335         panic("rtc_clock_stepped unsupported");
  336 }
  337 
  338 /*
  339  * rtc_sleep_wakeup:
  340  *
  341  * Invoked from power manageent when we have awoken from a sleep (S3)
  342  * and the TSC has been reset.  The nanotime data is updated based on
  343  * the passed in value.
  344  *
  345  * The caller must guarantee non-reentrancy.
  346  */
  347 void
  348 rtc_sleep_wakeup(
  349         uint64_t                base)
  350 {
  351         /*
  352          * Reset nanotime.
  353          * The timestamp counter will have been reset
  354          * but nanotime (uptime) marches onward.
  355          */
  356         rtc_nanotime_init(base);
  357 }
  358 
  359 /*
  360  * Initialize the real-time clock device.
  361  * In addition, various variables used to support the clock are initialized.
  362  */
  363 int
  364 rtclock_init(void)
  365 {
  366         uint64_t        cycles;
  367 
  368         assert(!ml_get_interrupts_enabled());
  369 
  370         if (cpu_number() == master_cpu) {
  371 
  372                 assert(tscFreq);
  373                 rtc_set_timescale(tscFreq);
  374 
  375                 /*
  376                  * Adjust and set the exported cpu speed.
  377                  */
  378                 cycles = rtc_export_speed(tscFreq);
  379 
  380                 /*
  381                  * Set min/max to actual.
  382                  * ACPI may update these later if speed-stepping is detected.
  383                  */
  384                 gPEClockFrequencyInfo.cpu_frequency_min_hz = cycles;
  385                 gPEClockFrequencyInfo.cpu_frequency_max_hz = cycles;
  386 
  387                 /*
  388                  * Compute the longest interval we can represent.
  389                  */
  390                 maxDec = tmrCvt(0x7fffffffULL, busFCvtt2n);
  391                 kprintf("maxDec: %lld\n", maxDec);
  392 
  393                 /* Minimum interval is 1usec */
  394                 rtc_decrementer_min = deadline_to_decrementer(NSEC_PER_USEC, 0ULL);
  395                 /* Point LAPIC interrupts to hardclock() */
  396                 lapic_set_timer_func((i386_intr_func_t) rtclock_intr);
  397 
  398                 clock_timebase_init();
  399                 ml_init_lock_timeout();
  400         }
  401 
  402         rtc_lapic_start_ticking();
  403 
  404         return (1);
  405 }
  406 
  407 // utility routine 
  408 // Code to calculate how many processor cycles are in a second...
  409 
  410 static void
  411 rtc_set_timescale(uint64_t cycles)
  412 {
  413         rtc_nanotime_t  *rntp = current_cpu_datap()->cpu_nanotime;
  414         rntp->scale = (uint32_t)(((uint64_t)NSEC_PER_SEC << 32) / cycles);
  415 
  416         if (cycles <= SLOW_TSC_THRESHOLD)
  417                 rntp->shift = (uint32_t)cycles;
  418         else
  419                 rntp->shift = 32;
  420 
  421         if (tsc_rebase_abs_time == 0)
  422                 tsc_rebase_abs_time = mach_absolute_time();
  423 
  424         rtc_nanotime_init(0);
  425 }
  426 
  427 static uint64_t
  428 rtc_export_speed(uint64_t cyc_per_sec)
  429 {
  430         uint64_t        cycles;
  431 
  432         /* Round: */
  433         cycles = ((cyc_per_sec + (UI_CPUFREQ_ROUNDING_FACTOR/2))
  434                         / UI_CPUFREQ_ROUNDING_FACTOR)
  435                                 * UI_CPUFREQ_ROUNDING_FACTOR;
  436 
  437         /*
  438          * Set current measured speed.
  439          */
  440         if (cycles >= 0x100000000ULL) {
  441             gPEClockFrequencyInfo.cpu_clock_rate_hz = 0xFFFFFFFFUL;
  442         } else {
  443             gPEClockFrequencyInfo.cpu_clock_rate_hz = (unsigned long)cycles;
  444         }
  445         gPEClockFrequencyInfo.cpu_frequency_hz = cycles;
  446 
  447         kprintf("[RTCLOCK] frequency %llu (%llu)\n", cycles, cyc_per_sec);
  448         return(cycles);
  449 }
  450 
  451 void
  452 clock_get_system_microtime(
  453         clock_sec_t                     *secs,
  454         clock_usec_t            *microsecs)
  455 {
  456         uint64_t        now = rtc_nanotime_read();
  457 
  458         _absolutetime_to_microtime(now, secs, microsecs);
  459 }
  460 
  461 void
  462 clock_get_system_nanotime(
  463         clock_sec_t                     *secs,
  464         clock_nsec_t            *nanosecs)
  465 {
  466         uint64_t        now = rtc_nanotime_read();
  467 
  468         _absolutetime_to_nanotime(now, secs, nanosecs);
  469 }
  470 
  471 void
  472 clock_gettimeofday_set_commpage(
  473         uint64_t                                abstime,
  474         uint64_t                                epoch,
  475         uint64_t                                offset,
  476         clock_sec_t                             *secs,
  477         clock_usec_t                    *microsecs)
  478 {
  479         uint64_t        now = abstime + offset;
  480         uint32_t        remain;
  481 
  482         remain = _absolutetime_to_microtime(now, secs, microsecs);
  483 
  484         *secs += (clock_sec_t)epoch;
  485 
  486         commpage_set_timestamp(abstime - remain, *secs);
  487 }
  488 
  489 void
  490 clock_timebase_info(
  491         mach_timebase_info_t    info)
  492 {
  493         info->numer = info->denom =  1;
  494 }       
  495 
  496 /*
  497  * Real-time clock device interrupt.
  498  */
  499 void
  500 rtclock_intr(
  501         x86_saved_state_t       *tregs)
  502 {
  503         uint64_t        rip;
  504         boolean_t       user_mode = FALSE;
  505         uint64_t        abstime;
  506         uint32_t        latency;
  507         x86_lcpu_t      *lcpu = x86_lcpu();
  508 
  509         assert(get_preemption_level() > 0);
  510         assert(!ml_get_interrupts_enabled());
  511 
  512         abstime = rtc_nanotime_read();
  513         latency = (uint32_t)(abstime - lcpu->rtcDeadline);
  514         if (abstime < lcpu->rtcDeadline)
  515                 latency = 1;
  516 
  517         if (is_saved_state64(tregs) == TRUE) {
  518                 x86_saved_state64_t     *regs;
  519                   
  520                 regs = saved_state64(tregs);
  521 
  522                 if (regs->isf.cs & 0x03)
  523                         user_mode = TRUE;
  524                 rip = regs->isf.rip;
  525         } else {
  526                 x86_saved_state32_t     *regs;
  527 
  528                 regs = saved_state32(tregs);
  529 
  530                 if (regs->cs & 0x03)
  531                         user_mode = TRUE;
  532                 rip = regs->eip;
  533         }
  534 
  535         /* Log the interrupt service latency (-ve value expected by tool) */
  536         KERNEL_DEBUG_CONSTANT(
  537                 MACHDBG_CODE(DBG_MACH_EXCP_DECI, 0) | DBG_FUNC_NONE,
  538                 -(int32_t)latency, (uint32_t)rip, user_mode, 0, 0);
  539 
  540         /* call the generic etimer */
  541         etimer_intr(user_mode, rip);
  542 }
  543 
  544 /*
  545  *      Request timer pop from the hardware 
  546  */
  547 
  548 
  549 int
  550 setPop(
  551         uint64_t time)
  552 {
  553         uint64_t now;
  554         uint32_t decr;
  555         uint64_t count;
  556         
  557         now = rtc_nanotime_read();              /* The time in nanoseconds */
  558         decr = deadline_to_decrementer(time, now);
  559 
  560         count = tmrCvt(decr, busFCvtn2t);
  561         lapic_set_timer(TRUE, one_shot, divide_by_1, (uint32_t) count);
  562 
  563         return decr;                            /* Pass back what we set */
  564 }
  565 
  566 
  567 uint64_t
  568 mach_absolute_time(void)
  569 {
  570         return rtc_nanotime_read();
  571 }
  572 
  573 void
  574 clock_interval_to_absolutetime_interval(
  575         uint32_t                interval,
  576         uint32_t                scale_factor,
  577         uint64_t                *result)
  578 {
  579         *result = (uint64_t)interval * scale_factor;
  580 }
  581 
  582 void
  583 absolutetime_to_microtime(
  584         uint64_t                        abstime,
  585         clock_sec_t                     *secs,
  586         clock_usec_t            *microsecs)
  587 {
  588         _absolutetime_to_microtime(abstime, secs, microsecs);
  589 }
  590 
  591 void
  592 absolutetime_to_nanotime(
  593         uint64_t                        abstime,
  594         clock_sec_t                     *secs,
  595         clock_nsec_t            *nanosecs)
  596 {
  597         _absolutetime_to_nanotime(abstime, secs, nanosecs);
  598 }
  599 
  600 void
  601 nanotime_to_absolutetime(
  602         clock_sec_t                     secs,
  603         clock_nsec_t            nanosecs,
  604         uint64_t                        *result)
  605 {
  606         *result = ((uint64_t)secs * NSEC_PER_SEC) + nanosecs;
  607 }
  608 
  609 void
  610 absolutetime_to_nanoseconds(
  611         uint64_t                abstime,
  612         uint64_t                *result)
  613 {
  614         *result = abstime;
  615 }
  616 
  617 void
  618 nanoseconds_to_absolutetime(
  619         uint64_t                nanoseconds,
  620         uint64_t                *result)
  621 {
  622         *result = nanoseconds;
  623 }
  624 
  625 void
  626 machine_delay_until(
  627         uint64_t                deadline)
  628 {
  629         uint64_t                now;
  630 
  631         do {
  632                 cpu_pause();
  633                 now = mach_absolute_time();
  634         } while (now < deadline);
  635 }

Cache object: daee9590b26e7e0fa98434bd8de20a7f


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.