The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_time.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*
    2  * Copyright (c) 1982, 1986, 1989, 1993
    3  *      The Regents of the University of California.  All rights reserved.
    4  *
    5  * Redistribution and use in source and binary forms, with or without
    6  * modification, are permitted provided that the following conditions
    7  * are met:
    8  * 1. Redistributions of source code must retain the above copyright
    9  *    notice, this list of conditions and the following disclaimer.
   10  * 2. Redistributions in binary form must reproduce the above copyright
   11  *    notice, this list of conditions and the following disclaimer in the
   12  *    documentation and/or other materials provided with the distribution.
   13  * 3. Neither the name of the University nor the names of its contributors
   14  *    may be used to endorse or promote products derived from this software
   15  *    without specific prior written permission.
   16  *
   17  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
   18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   27  * SUCH DAMAGE.
   28  *
   29  *      @(#)kern_time.c 8.1 (Berkeley) 6/10/93
   30  * $FreeBSD: src/sys/kern/kern_time.c,v 1.68.2.1 2002/10/01 08:00:41 bde Exp $
   31  */
   32 
   33 #include <sys/param.h>
   34 #include <sys/systm.h>
   35 #include <sys/buf.h>
   36 #include <sys/sysproto.h>
   37 #include <sys/resourcevar.h>
   38 #include <sys/signalvar.h>
   39 #include <sys/kernel.h>
   40 #include <sys/sysent.h>
   41 #include <sys/sysunion.h>
   42 #include <sys/proc.h>
   43 #include <sys/priv.h>
   44 #include <sys/time.h>
   45 #include <sys/vnode.h>
   46 #include <sys/sysctl.h>
   47 #include <sys/kern_syscall.h>
   48 #include <vm/vm.h>
   49 #include <vm/vm_extern.h>
   50 
   51 #include <sys/msgport2.h>
   52 #include <sys/thread2.h>
   53 #include <sys/mplock2.h>
   54 
   55 struct timezone tz;
   56 
   57 /*
   58  * Time of day and interval timer support.
   59  *
   60  * These routines provide the kernel entry points to get and set
   61  * the time-of-day and per-process interval timers.  Subroutines
   62  * here provide support for adding and subtracting timeval structures
   63  * and decrementing interval timers, optionally reloading the interval
   64  * timers when they expire.
   65  */
   66 
   67 static int      settime(struct timeval *);
   68 static void     timevalfix(struct timeval *);
   69 
   70 /*
   71  * Nanosleep tries very hard to sleep for a precisely requested time
   72  * interval, down to 1uS.  The administrator can impose a minimum delay
   73  * and a delay below which we hard-loop instead of initiate a timer
   74  * interrupt and sleep.
   75  *
   76  * For machines under high loads it might be beneficial to increase min_us
   77  * to e.g. 1000uS (1ms) so spining processes sleep meaningfully.
   78  */
   79 static int     nanosleep_min_us = 10;
   80 static int     nanosleep_hard_us = 100;
   81 static int     gettimeofday_quick = 0;
   82 SYSCTL_INT(_kern, OID_AUTO, nanosleep_min_us, CTLFLAG_RW,
   83            &nanosleep_min_us, 0, "")
   84 SYSCTL_INT(_kern, OID_AUTO, nanosleep_hard_us, CTLFLAG_RW,
   85            &nanosleep_hard_us, 0, "")
   86 SYSCTL_INT(_kern, OID_AUTO, gettimeofday_quick, CTLFLAG_RW,
   87            &gettimeofday_quick, 0, "")
   88 
   89 static int
   90 settime(struct timeval *tv)
   91 {
   92         struct timeval delta, tv1, tv2;
   93         static struct timeval maxtime, laststep;
   94         struct timespec ts;
   95         int origcpu;
   96 
   97         if ((origcpu = mycpu->gd_cpuid) != 0)
   98                 lwkt_setcpu_self(globaldata_find(0));
   99 
  100         crit_enter();
  101         microtime(&tv1);
  102         delta = *tv;
  103         timevalsub(&delta, &tv1);
  104 
  105         /*
  106          * If the system is secure, we do not allow the time to be 
  107          * set to a value earlier than 1 second less than the highest
  108          * time we have yet seen. The worst a miscreant can do in
  109          * this circumstance is "freeze" time. He couldn't go
  110          * back to the past.
  111          *
  112          * We similarly do not allow the clock to be stepped more
  113          * than one second, nor more than once per second. This allows
  114          * a miscreant to make the clock march double-time, but no worse.
  115          */
  116         if (securelevel > 1) {
  117                 if (delta.tv_sec < 0 || delta.tv_usec < 0) {
  118                         /*
  119                          * Update maxtime to latest time we've seen.
  120                          */
  121                         if (tv1.tv_sec > maxtime.tv_sec)
  122                                 maxtime = tv1;
  123                         tv2 = *tv;
  124                         timevalsub(&tv2, &maxtime);
  125                         if (tv2.tv_sec < -1) {
  126                                 tv->tv_sec = maxtime.tv_sec - 1;
  127                                 kprintf("Time adjustment clamped to -1 second\n");
  128                         }
  129                 } else {
  130                         if (tv1.tv_sec == laststep.tv_sec) {
  131                                 crit_exit();
  132                                 return (EPERM);
  133                         }
  134                         if (delta.tv_sec > 1) {
  135                                 tv->tv_sec = tv1.tv_sec + 1;
  136                                 kprintf("Time adjustment clamped to +1 second\n");
  137                         }
  138                         laststep = *tv;
  139                 }
  140         }
  141 
  142         ts.tv_sec = tv->tv_sec;
  143         ts.tv_nsec = tv->tv_usec * 1000;
  144         set_timeofday(&ts);
  145         crit_exit();
  146 
  147         if (origcpu != 0)
  148                 lwkt_setcpu_self(globaldata_find(origcpu));
  149 
  150         resettodr();
  151         return (0);
  152 }
  153 
  154 static void
  155 get_curthread_cputime(struct timespec *ats)
  156 {
  157         struct thread *td = curthread;
  158 
  159         crit_enter();
  160         /*
  161          * These are 64-bit fields but the actual values should never reach
  162          * the limit. We don't care about overflows.
  163          */
  164         ats->tv_sec = td->td_uticks / 1000000;
  165         ats->tv_sec += td->td_sticks / 1000000;
  166         ats->tv_sec += td->td_iticks / 1000000;
  167         ats->tv_nsec = (td->td_uticks % 1000000) * 1000;
  168         ats->tv_nsec += (td->td_sticks % 1000000) * 1000;
  169         ats->tv_nsec += (td->td_iticks % 1000000) * 1000;
  170         crit_exit();
  171 }
  172 
  173 /*
  174  * MPSAFE
  175  */
  176 int
  177 kern_clock_gettime(clockid_t clock_id, struct timespec *ats)
  178 {
  179         int error = 0;
  180         struct proc *p;
  181 
  182         switch(clock_id) {
  183         case CLOCK_REALTIME:
  184         case CLOCK_REALTIME_PRECISE:
  185                 nanotime(ats);
  186                 break;
  187         case CLOCK_REALTIME_FAST:
  188                 getnanotime(ats);
  189                 break;
  190         case CLOCK_MONOTONIC:
  191         case CLOCK_MONOTONIC_PRECISE:
  192         case CLOCK_UPTIME:
  193         case CLOCK_UPTIME_PRECISE:
  194                 nanouptime(ats);
  195                 break;
  196         case CLOCK_MONOTONIC_FAST:
  197         case CLOCK_UPTIME_FAST:
  198                 getnanouptime(ats);
  199                 break;
  200         case CLOCK_VIRTUAL:
  201                 p = curproc;
  202                 ats->tv_sec = p->p_timer[ITIMER_VIRTUAL].it_value.tv_sec;
  203                 ats->tv_nsec = p->p_timer[ITIMER_VIRTUAL].it_value.tv_usec *
  204                                1000;
  205                 break;
  206         case CLOCK_PROF:
  207         case CLOCK_PROCESS_CPUTIME_ID:
  208                 p = curproc;
  209                 ats->tv_sec = p->p_timer[ITIMER_PROF].it_value.tv_sec;
  210                 ats->tv_nsec = p->p_timer[ITIMER_PROF].it_value.tv_usec *
  211                                1000;
  212                 break;
  213         case CLOCK_SECOND:
  214                 ats->tv_sec = time_second;
  215                 ats->tv_nsec = 0;
  216                 break;
  217         case CLOCK_THREAD_CPUTIME_ID:
  218                 get_curthread_cputime(ats);
  219                 break;
  220         default:
  221                 error = EINVAL;
  222                 break;
  223         }
  224         return (error);
  225 }
  226 
  227 /*
  228  * MPSAFE
  229  */
  230 int
  231 sys_clock_gettime(struct clock_gettime_args *uap)
  232 {
  233         struct timespec ats;
  234         int error;
  235 
  236         error = kern_clock_gettime(uap->clock_id, &ats);
  237         if (error == 0)
  238                 error = copyout(&ats, uap->tp, sizeof(ats));
  239 
  240         return (error);
  241 }
  242 
  243 int
  244 kern_clock_settime(clockid_t clock_id, struct timespec *ats)
  245 {
  246         struct thread *td = curthread;
  247         struct timeval atv;
  248         int error;
  249 
  250         if ((error = priv_check(td, PRIV_CLOCK_SETTIME)) != 0)
  251                 return (error);
  252         if (clock_id != CLOCK_REALTIME)
  253                 return (EINVAL);
  254         if (ats->tv_nsec < 0 || ats->tv_nsec >= 1000000000)
  255                 return (EINVAL);
  256 
  257         TIMESPEC_TO_TIMEVAL(&atv, ats);
  258         error = settime(&atv);
  259         return (error);
  260 }
  261 
  262 /*
  263  * MPALMOSTSAFE
  264  */
  265 int
  266 sys_clock_settime(struct clock_settime_args *uap)
  267 {
  268         struct timespec ats;
  269         int error;
  270 
  271         if ((error = copyin(uap->tp, &ats, sizeof(ats))) != 0)
  272                 return (error);
  273 
  274         get_mplock();
  275         error = kern_clock_settime(uap->clock_id, &ats);
  276         rel_mplock();
  277         return (error);
  278 }
  279 
  280 /*
  281  * MPSAFE
  282  */
  283 int
  284 kern_clock_getres(clockid_t clock_id, struct timespec *ts)
  285 {
  286         int error;
  287 
  288         switch(clock_id) {
  289         case CLOCK_REALTIME:
  290         case CLOCK_REALTIME_FAST:
  291         case CLOCK_REALTIME_PRECISE:
  292         case CLOCK_MONOTONIC:
  293         case CLOCK_MONOTONIC_FAST:
  294         case CLOCK_MONOTONIC_PRECISE:
  295         case CLOCK_UPTIME:
  296         case CLOCK_UPTIME_FAST:
  297         case CLOCK_UPTIME_PRECISE:
  298         case CLOCK_THREAD_CPUTIME_ID:
  299         case CLOCK_PROCESS_CPUTIME_ID:
  300                 /*
  301                  * Round up the result of the division cheaply
  302                  * by adding 1.  Rounding up is especially important
  303                  * if rounding down would give 0.  Perfect rounding
  304                  * is unimportant.
  305                  */
  306                 ts->tv_sec = 0;
  307                 ts->tv_nsec = 1000000000 / sys_cputimer->freq + 1;
  308                 error = 0;
  309                 break;
  310         case CLOCK_VIRTUAL:
  311         case CLOCK_PROF:
  312                 /* Accurately round up here because we can do so cheaply. */
  313                 ts->tv_sec = 0;
  314                 ts->tv_nsec = (1000000000 + hz - 1) / hz;
  315                 error = 0;
  316                 break;
  317         case CLOCK_SECOND:
  318                 ts->tv_sec = 1;
  319                 ts->tv_nsec = 0;
  320                 error = 0;
  321                 break;
  322         default:
  323                 error = EINVAL;
  324                 break;
  325         }
  326 
  327         return(error);
  328 }
  329 
  330 /*
  331  * MPSAFE
  332  */
  333 int
  334 sys_clock_getres(struct clock_getres_args *uap)
  335 {
  336         int error;
  337         struct timespec ts;
  338 
  339         error = kern_clock_getres(uap->clock_id, &ts);
  340         if (error == 0)
  341                 error = copyout(&ts, uap->tp, sizeof(ts));
  342 
  343         return (error);
  344 }
  345 
  346 /*
  347  * nanosleep1()
  348  *
  349  *      This is a general helper function for nanosleep() (aka sleep() aka
  350  *      usleep()).
  351  *
  352  *      If there is less then one tick's worth of time left and
  353  *      we haven't done a yield, or the remaining microseconds is
  354  *      ridiculously low, do a yield.  This avoids having
  355  *      to deal with systimer overheads when the system is under
  356  *      heavy loads.  If we have done a yield already then use
  357  *      a systimer and an uninterruptable thread wait.
  358  *
  359  *      If there is more then a tick's worth of time left,
  360  *      calculate the baseline ticks and use an interruptable
  361  *      tsleep, then handle the fine-grained delay on the next
  362  *      loop.  This usually results in two sleeps occuring, a long one
  363  *      and a short one.
  364  *
  365  * MPSAFE
  366  */
  367 static void
  368 ns1_systimer(systimer_t info, int in_ipi __unused,
  369     struct intrframe *frame __unused)
  370 {
  371         lwkt_schedule(info->data);
  372 }
  373 
  374 int
  375 nanosleep1(struct timespec *rqt, struct timespec *rmt)
  376 {
  377         static int nanowait;
  378         struct timespec ts, ts2, ts3;
  379         struct timeval tv;
  380         int error;
  381 
  382         if (rqt->tv_nsec < 0 || rqt->tv_nsec >= 1000000000)
  383                 return (EINVAL);
  384         /* XXX: imho this should return EINVAL at least for tv_sec < 0 */
  385         if (rqt->tv_sec < 0 || (rqt->tv_sec == 0 && rqt->tv_nsec == 0))
  386                 return (0);
  387         nanouptime(&ts);
  388         timespecadd(&ts, rqt);          /* ts = target timestamp compare */
  389         TIMESPEC_TO_TIMEVAL(&tv, rqt);  /* tv = sleep interval */
  390 
  391         for (;;) {
  392                 int ticks;
  393                 struct systimer info;
  394 
  395                 ticks = tv.tv_usec / ustick;    /* approximate */
  396 
  397                 if (tv.tv_sec == 0 && ticks == 0) {
  398                         thread_t td = curthread;
  399                         if (tv.tv_usec > 0 && tv.tv_usec < nanosleep_min_us)
  400                                 tv.tv_usec = nanosleep_min_us;
  401                         if (tv.tv_usec < nanosleep_hard_us) {
  402                                 lwkt_user_yield();
  403                                 cpu_pause();
  404                         } else {
  405                                 crit_enter_quick(td);
  406                                 systimer_init_oneshot(&info, ns1_systimer,
  407                                                 td, tv.tv_usec);
  408                                 lwkt_deschedule_self(td);
  409                                 crit_exit_quick(td);
  410                                 lwkt_switch();
  411                                 systimer_del(&info); /* make sure it's gone */
  412                         }
  413                         error = iscaught(td->td_lwp);
  414                 } else if (tv.tv_sec == 0) {
  415                         error = tsleep(&nanowait, PCATCH, "nanslp", ticks);
  416                 } else {
  417                         ticks = tvtohz_low(&tv); /* also handles overflow */
  418                         error = tsleep(&nanowait, PCATCH, "nanslp", ticks);
  419                 }
  420                 nanouptime(&ts2);
  421                 if (error && error != EWOULDBLOCK) {
  422                         if (error == ERESTART)
  423                                 error = EINTR;
  424                         if (rmt != NULL) {
  425                                 timespecsub(&ts, &ts2);
  426                                 if (ts.tv_sec < 0)
  427                                         timespecclear(&ts);
  428                                 *rmt = ts;
  429                         }
  430                         return (error);
  431                 }
  432                 if (timespeccmp(&ts2, &ts, >=))
  433                         return (0);
  434                 ts3 = ts;
  435                 timespecsub(&ts3, &ts2);
  436                 TIMESPEC_TO_TIMEVAL(&tv, &ts3);
  437         }
  438 }
  439 
  440 /*
  441  * MPSAFE
  442  */
  443 int
  444 sys_nanosleep(struct nanosleep_args *uap)
  445 {
  446         int error;
  447         struct timespec rqt;
  448         struct timespec rmt;
  449 
  450         error = copyin(uap->rqtp, &rqt, sizeof(rqt));
  451         if (error)
  452                 return (error);
  453 
  454         error = nanosleep1(&rqt, &rmt);
  455 
  456         /*
  457          * copyout the residual if nanosleep was interrupted.
  458          */
  459         if (error && uap->rmtp) {
  460                 int error2;
  461 
  462                 error2 = copyout(&rmt, uap->rmtp, sizeof(rmt));
  463                 if (error2)
  464                         error = error2;
  465         }
  466         return (error);
  467 }
  468 
  469 /*
  470  * The gettimeofday() system call is supposed to return a fine-grained
  471  * realtime stamp.  However, acquiring a fine-grained stamp can create a
  472  * bottleneck when multiple cpu cores are trying to accessing e.g. the
  473  * HPET hardware timer all at the same time, so we have a sysctl that
  474  * allows its behavior to be changed to a more coarse-grained timestamp
  475  * which does not have to access a hardware timer.
  476  */
  477 int
  478 sys_gettimeofday(struct gettimeofday_args *uap)
  479 {
  480         struct timeval atv;
  481         int error = 0;
  482 
  483         if (uap->tp) {
  484                 if (gettimeofday_quick)
  485                         getmicrotime(&atv);
  486                 else
  487                         microtime(&atv);
  488                 if ((error = copyout((caddr_t)&atv, (caddr_t)uap->tp,
  489                     sizeof (atv))))
  490                         return (error);
  491         }
  492         if (uap->tzp)
  493                 error = copyout((caddr_t)&tz, (caddr_t)uap->tzp,
  494                     sizeof (tz));
  495         return (error);
  496 }
  497 
  498 /*
  499  * MPALMOSTSAFE
  500  */
  501 int
  502 sys_settimeofday(struct settimeofday_args *uap)
  503 {
  504         struct thread *td = curthread;
  505         struct timeval atv;
  506         struct timezone atz;
  507         int error;
  508 
  509         if ((error = priv_check(td, PRIV_SETTIMEOFDAY)))
  510                 return (error);
  511         /*
  512          * Verify all parameters before changing time.
  513          *
  514          * XXX: We do not allow the time to be set to 0.0, which also by
  515          *      happy coincidence works around a pkgsrc bulk build bug.
  516          */
  517         if (uap->tv) {
  518                 if ((error = copyin((caddr_t)uap->tv, (caddr_t)&atv,
  519                     sizeof(atv))))
  520                         return (error);
  521                 if (atv.tv_usec < 0 || atv.tv_usec >= 1000000)
  522                         return (EINVAL);
  523                 if (atv.tv_sec == 0 && atv.tv_usec == 0)
  524                         return (EINVAL);
  525         }
  526         if (uap->tzp &&
  527             (error = copyin((caddr_t)uap->tzp, (caddr_t)&atz, sizeof(atz))))
  528                 return (error);
  529 
  530         get_mplock();
  531         if (uap->tv && (error = settime(&atv))) {
  532                 rel_mplock();
  533                 return (error);
  534         }
  535         rel_mplock();
  536         if (uap->tzp)
  537                 tz = atz;
  538         return (0);
  539 }
  540 
  541 static void
  542 kern_adjtime_common(void)
  543 {
  544         if ((ntp_delta >= 0 && ntp_delta < ntp_default_tick_delta) ||
  545             (ntp_delta < 0 && ntp_delta > -ntp_default_tick_delta))
  546                 ntp_tick_delta = ntp_delta;
  547         else if (ntp_delta > ntp_big_delta)
  548                 ntp_tick_delta = 10 * ntp_default_tick_delta;
  549         else if (ntp_delta < -ntp_big_delta)
  550                 ntp_tick_delta = -10 * ntp_default_tick_delta;
  551         else if (ntp_delta > 0)
  552                 ntp_tick_delta = ntp_default_tick_delta;
  553         else
  554                 ntp_tick_delta = -ntp_default_tick_delta;
  555 }
  556 
  557 void
  558 kern_adjtime(int64_t delta, int64_t *odelta)
  559 {
  560         int origcpu;
  561 
  562         if ((origcpu = mycpu->gd_cpuid) != 0)
  563                 lwkt_setcpu_self(globaldata_find(0));
  564 
  565         crit_enter();
  566         *odelta = ntp_delta;
  567         ntp_delta = delta;
  568         kern_adjtime_common();
  569         crit_exit();
  570 
  571         if (origcpu != 0)
  572                 lwkt_setcpu_self(globaldata_find(origcpu));
  573 }
  574 
  575 static void
  576 kern_get_ntp_delta(int64_t *delta)
  577 {
  578         int origcpu;
  579 
  580         if ((origcpu = mycpu->gd_cpuid) != 0)
  581                 lwkt_setcpu_self(globaldata_find(0));
  582 
  583         crit_enter();
  584         *delta = ntp_delta;
  585         crit_exit();
  586 
  587         if (origcpu != 0)
  588                 lwkt_setcpu_self(globaldata_find(origcpu));
  589 }
  590 
  591 void
  592 kern_reladjtime(int64_t delta)
  593 {
  594         int origcpu;
  595 
  596         if ((origcpu = mycpu->gd_cpuid) != 0)
  597                 lwkt_setcpu_self(globaldata_find(0));
  598 
  599         crit_enter();
  600         ntp_delta += delta;
  601         kern_adjtime_common();
  602         crit_exit();
  603 
  604         if (origcpu != 0)
  605                 lwkt_setcpu_self(globaldata_find(origcpu));
  606 }
  607 
  608 static void
  609 kern_adjfreq(int64_t rate)
  610 {
  611         int origcpu;
  612 
  613         if ((origcpu = mycpu->gd_cpuid) != 0)
  614                 lwkt_setcpu_self(globaldata_find(0));
  615 
  616         crit_enter();
  617         ntp_tick_permanent = rate;
  618         crit_exit();
  619 
  620         if (origcpu != 0)
  621                 lwkt_setcpu_self(globaldata_find(origcpu));
  622 }
  623 
  624 /*
  625  * MPALMOSTSAFE
  626  */
  627 int
  628 sys_adjtime(struct adjtime_args *uap)
  629 {
  630         struct thread *td = curthread;
  631         struct timeval atv;
  632         int64_t ndelta, odelta;
  633         int error;
  634 
  635         if ((error = priv_check(td, PRIV_ADJTIME)))
  636                 return (error);
  637         error = copyin(uap->delta, &atv, sizeof(struct timeval));
  638         if (error)
  639                 return (error);
  640 
  641         /*
  642          * Compute the total correction and the rate at which to apply it.
  643          * Round the adjustment down to a whole multiple of the per-tick
  644          * delta, so that after some number of incremental changes in
  645          * hardclock(), tickdelta will become zero, lest the correction
  646          * overshoot and start taking us away from the desired final time.
  647          */
  648         ndelta = (int64_t)atv.tv_sec * 1000000000 + atv.tv_usec * 1000;
  649         get_mplock();
  650         kern_adjtime(ndelta, &odelta);
  651         rel_mplock();
  652 
  653         if (uap->olddelta) {
  654                 atv.tv_sec = odelta / 1000000000;
  655                 atv.tv_usec = odelta % 1000000000 / 1000;
  656                 copyout(&atv, uap->olddelta, sizeof(struct timeval));
  657         }
  658         return (0);
  659 }
  660 
  661 static int
  662 sysctl_adjtime(SYSCTL_HANDLER_ARGS)
  663 {
  664         int64_t delta;
  665         int error;
  666 
  667         if (req->newptr != NULL) {
  668                 if (priv_check(curthread, PRIV_ROOT))
  669                         return (EPERM);
  670                 error = SYSCTL_IN(req, &delta, sizeof(delta));
  671                 if (error)
  672                         return (error);
  673                 kern_reladjtime(delta);
  674         }
  675 
  676         if (req->oldptr)
  677                 kern_get_ntp_delta(&delta);
  678         error = SYSCTL_OUT(req, &delta, sizeof(delta));
  679         return (error);
  680 }
  681 
  682 /*
  683  * delta is in nanoseconds.
  684  */
  685 static int
  686 sysctl_delta(SYSCTL_HANDLER_ARGS)
  687 {
  688         int64_t delta, old_delta;
  689         int error;
  690 
  691         if (req->newptr != NULL) {
  692                 if (priv_check(curthread, PRIV_ROOT))
  693                         return (EPERM);
  694                 error = SYSCTL_IN(req, &delta, sizeof(delta));
  695                 if (error)
  696                         return (error);
  697                 kern_adjtime(delta, &old_delta);
  698         }
  699 
  700         if (req->oldptr != NULL)
  701                 kern_get_ntp_delta(&old_delta);
  702         error = SYSCTL_OUT(req, &old_delta, sizeof(old_delta));
  703         return (error);
  704 }
  705 
  706 /*
  707  * frequency is in nanoseconds per second shifted left 32.
  708  * kern_adjfreq() needs it in nanoseconds per tick shifted left 32.
  709  */
  710 static int
  711 sysctl_adjfreq(SYSCTL_HANDLER_ARGS)
  712 {
  713         int64_t freqdelta;
  714         int error;
  715 
  716         if (req->newptr != NULL) {
  717                 if (priv_check(curthread, PRIV_ROOT))
  718                         return (EPERM);
  719                 error = SYSCTL_IN(req, &freqdelta, sizeof(freqdelta));
  720                 if (error)
  721                         return (error);
  722                 
  723                 freqdelta /= hz;
  724                 kern_adjfreq(freqdelta);
  725         }
  726 
  727         if (req->oldptr != NULL)
  728                 freqdelta = ntp_tick_permanent * hz;
  729         error = SYSCTL_OUT(req, &freqdelta, sizeof(freqdelta));
  730         if (error)
  731                 return (error);
  732 
  733         return (0);
  734 }
  735 
  736 SYSCTL_NODE(_kern, OID_AUTO, ntp, CTLFLAG_RW, 0, "NTP related controls");
  737 SYSCTL_PROC(_kern_ntp, OID_AUTO, permanent,
  738     CTLTYPE_QUAD|CTLFLAG_RW, 0, 0,
  739     sysctl_adjfreq, "Q", "permanent correction per second");
  740 SYSCTL_PROC(_kern_ntp, OID_AUTO, delta,
  741     CTLTYPE_QUAD|CTLFLAG_RW, 0, 0,
  742     sysctl_delta, "Q", "one-time delta");
  743 SYSCTL_OPAQUE(_kern_ntp, OID_AUTO, big_delta, CTLFLAG_RD,
  744     &ntp_big_delta, sizeof(ntp_big_delta), "Q",
  745     "threshold for fast adjustment");
  746 SYSCTL_OPAQUE(_kern_ntp, OID_AUTO, tick_delta, CTLFLAG_RD,
  747     &ntp_tick_delta, sizeof(ntp_tick_delta), "LU",
  748     "per-tick adjustment");
  749 SYSCTL_OPAQUE(_kern_ntp, OID_AUTO, default_tick_delta, CTLFLAG_RD,
  750     &ntp_default_tick_delta, sizeof(ntp_default_tick_delta), "LU",
  751     "default per-tick adjustment");
  752 SYSCTL_OPAQUE(_kern_ntp, OID_AUTO, next_leap_second, CTLFLAG_RW,
  753     &ntp_leap_second, sizeof(ntp_leap_second), "LU",
  754     "next leap second");
  755 SYSCTL_INT(_kern_ntp, OID_AUTO, insert_leap_second, CTLFLAG_RW,
  756     &ntp_leap_insert, 0, "insert or remove leap second");
  757 SYSCTL_PROC(_kern_ntp, OID_AUTO, adjust,
  758     CTLTYPE_QUAD|CTLFLAG_RW, 0, 0,
  759     sysctl_adjtime, "Q", "relative adjust for delta");
  760 
  761 /*
  762  * Get value of an interval timer.  The process virtual and
  763  * profiling virtual time timers are kept in the p_stats area, since
  764  * they can be swapped out.  These are kept internally in the
  765  * way they are specified externally: in time until they expire.
  766  *
  767  * The real time interval timer is kept in the process table slot
  768  * for the process, and its value (it_value) is kept as an
  769  * absolute time rather than as a delta, so that it is easy to keep
  770  * periodic real-time signals from drifting.
  771  *
  772  * Virtual time timers are processed in the hardclock() routine of
  773  * kern_clock.c.  The real time timer is processed by a timeout
  774  * routine, called from the softclock() routine.  Since a callout
  775  * may be delayed in real time due to interrupt processing in the system,
  776  * it is possible for the real time timeout routine (realitexpire, given below),
  777  * to be delayed in real time past when it is supposed to occur.  It
  778  * does not suffice, therefore, to reload the real timer .it_value from the
  779  * real time timers .it_interval.  Rather, we compute the next time in
  780  * absolute time the timer should go off.
  781  *
  782  * MPALMOSTSAFE
  783  */
  784 int
  785 sys_getitimer(struct getitimer_args *uap)
  786 {
  787         struct proc *p = curproc;
  788         struct timeval ctv;
  789         struct itimerval aitv;
  790 
  791         if (uap->which > ITIMER_PROF)
  792                 return (EINVAL);
  793         lwkt_gettoken(&p->p_token);
  794         if (uap->which == ITIMER_REAL) {
  795                 /*
  796                  * Convert from absolute to relative time in .it_value
  797                  * part of real time timer.  If time for real time timer
  798                  * has passed return 0, else return difference between
  799                  * current time and time for the timer to go off.
  800                  */
  801                 aitv = p->p_realtimer;
  802                 if (timevalisset(&aitv.it_value)) {
  803                         getmicrouptime(&ctv);
  804                         if (timevalcmp(&aitv.it_value, &ctv, <))
  805                                 timevalclear(&aitv.it_value);
  806                         else
  807                                 timevalsub(&aitv.it_value, &ctv);
  808                 }
  809         } else {
  810                 aitv = p->p_timer[uap->which];
  811         }
  812         lwkt_reltoken(&p->p_token);
  813         return (copyout(&aitv, uap->itv, sizeof (struct itimerval)));
  814 }
  815 
  816 /*
  817  * MPALMOSTSAFE
  818  */
  819 int
  820 sys_setitimer(struct setitimer_args *uap)
  821 {
  822         struct itimerval aitv;
  823         struct timeval ctv;
  824         struct itimerval *itvp;
  825         struct proc *p = curproc;
  826         int error;
  827 
  828         if (uap->which > ITIMER_PROF)
  829                 return (EINVAL);
  830         itvp = uap->itv;
  831         if (itvp && (error = copyin((caddr_t)itvp, (caddr_t)&aitv,
  832             sizeof(struct itimerval))))
  833                 return (error);
  834         if ((uap->itv = uap->oitv) &&
  835             (error = sys_getitimer((struct getitimer_args *)uap)))
  836                 return (error);
  837         if (itvp == NULL)
  838                 return (0);
  839         if (itimerfix(&aitv.it_value))
  840                 return (EINVAL);
  841         if (!timevalisset(&aitv.it_value))
  842                 timevalclear(&aitv.it_interval);
  843         else if (itimerfix(&aitv.it_interval))
  844                 return (EINVAL);
  845         lwkt_gettoken(&p->p_token);
  846         if (uap->which == ITIMER_REAL) {
  847                 if (timevalisset(&p->p_realtimer.it_value))
  848                         callout_stop_sync(&p->p_ithandle);
  849                 if (timevalisset(&aitv.it_value)) 
  850                         callout_reset(&p->p_ithandle,
  851                             tvtohz_high(&aitv.it_value), realitexpire, p);
  852                 getmicrouptime(&ctv);
  853                 timevaladd(&aitv.it_value, &ctv);
  854                 p->p_realtimer = aitv;
  855         } else {
  856                 p->p_timer[uap->which] = aitv;
  857                 switch(uap->which) {
  858                 case ITIMER_VIRTUAL:
  859                         p->p_flags &= ~P_SIGVTALRM;
  860                         break;
  861                 case ITIMER_PROF:
  862                         p->p_flags &= ~P_SIGPROF;
  863                         break;
  864                 }
  865         }
  866         lwkt_reltoken(&p->p_token);
  867         return (0);
  868 }
  869 
  870 /*
  871  * Real interval timer expired:
  872  * send process whose timer expired an alarm signal.
  873  * If time is not set up to reload, then just return.
  874  * Else compute next time timer should go off which is > current time.
  875  * This is where delay in processing this timeout causes multiple
  876  * SIGALRM calls to be compressed into one.
  877  * tvtohz_high() always adds 1 to allow for the time until the next clock
  878  * interrupt being strictly less than 1 clock tick, but we don't want
  879  * that here since we want to appear to be in sync with the clock
  880  * interrupt even when we're delayed.
  881  */
  882 void
  883 realitexpire(void *arg)
  884 {
  885         struct proc *p;
  886         struct timeval ctv, ntv;
  887 
  888         p = (struct proc *)arg;
  889         PHOLD(p);
  890         lwkt_gettoken(&p->p_token);
  891         ksignal(p, SIGALRM);
  892         if (!timevalisset(&p->p_realtimer.it_interval)) {
  893                 timevalclear(&p->p_realtimer.it_value);
  894                 goto done;
  895         }
  896         for (;;) {
  897                 timevaladd(&p->p_realtimer.it_value,
  898                            &p->p_realtimer.it_interval);
  899                 getmicrouptime(&ctv);
  900                 if (timevalcmp(&p->p_realtimer.it_value, &ctv, >)) {
  901                         ntv = p->p_realtimer.it_value;
  902                         timevalsub(&ntv, &ctv);
  903                         callout_reset(&p->p_ithandle, tvtohz_low(&ntv),
  904                                       realitexpire, p);
  905                         goto done;
  906                 }
  907         }
  908 done:
  909         lwkt_reltoken(&p->p_token);
  910         PRELE(p);
  911 }
  912 
  913 /*
  914  * Check that a proposed value to load into the .it_value or
  915  * .it_interval part of an interval timer is acceptable, and
  916  * fix it to have at least minimal value (i.e. if it is less
  917  * than the resolution of the clock, round it up.)
  918  *
  919  * MPSAFE
  920  */
  921 int
  922 itimerfix(struct timeval *tv)
  923 {
  924 
  925         if (tv->tv_sec < 0 || tv->tv_sec > 100000000 ||
  926             tv->tv_usec < 0 || tv->tv_usec >= 1000000)
  927                 return (EINVAL);
  928         if (tv->tv_sec == 0 && tv->tv_usec != 0 && tv->tv_usec < ustick)
  929                 tv->tv_usec = ustick;
  930         return (0);
  931 }
  932 
  933 /*
  934  * Decrement an interval timer by a specified number
  935  * of microseconds, which must be less than a second,
  936  * i.e. < 1000000.  If the timer expires, then reload
  937  * it.  In this case, carry over (usec - old value) to
  938  * reduce the value reloaded into the timer so that
  939  * the timer does not drift.  This routine assumes
  940  * that it is called in a context where the timers
  941  * on which it is operating cannot change in value.
  942  */
  943 int
  944 itimerdecr(struct itimerval *itp, int usec)
  945 {
  946 
  947         if (itp->it_value.tv_usec < usec) {
  948                 if (itp->it_value.tv_sec == 0) {
  949                         /* expired, and already in next interval */
  950                         usec -= itp->it_value.tv_usec;
  951                         goto expire;
  952                 }
  953                 itp->it_value.tv_usec += 1000000;
  954                 itp->it_value.tv_sec--;
  955         }
  956         itp->it_value.tv_usec -= usec;
  957         usec = 0;
  958         if (timevalisset(&itp->it_value))
  959                 return (1);
  960         /* expired, exactly at end of interval */
  961 expire:
  962         if (timevalisset(&itp->it_interval)) {
  963                 itp->it_value = itp->it_interval;
  964                 itp->it_value.tv_usec -= usec;
  965                 if (itp->it_value.tv_usec < 0) {
  966                         itp->it_value.tv_usec += 1000000;
  967                         itp->it_value.tv_sec--;
  968                 }
  969         } else
  970                 itp->it_value.tv_usec = 0;              /* sec is already 0 */
  971         return (0);
  972 }
  973 
  974 /*
  975  * Add and subtract routines for timevals.
  976  * N.B.: subtract routine doesn't deal with
  977  * results which are before the beginning,
  978  * it just gets very confused in this case.
  979  * Caveat emptor.
  980  */
  981 void
  982 timevaladd(struct timeval *t1, const struct timeval *t2)
  983 {
  984 
  985         t1->tv_sec += t2->tv_sec;
  986         t1->tv_usec += t2->tv_usec;
  987         timevalfix(t1);
  988 }
  989 
  990 void
  991 timevalsub(struct timeval *t1, const struct timeval *t2)
  992 {
  993 
  994         t1->tv_sec -= t2->tv_sec;
  995         t1->tv_usec -= t2->tv_usec;
  996         timevalfix(t1);
  997 }
  998 
  999 static void
 1000 timevalfix(struct timeval *t1)
 1001 {
 1002 
 1003         if (t1->tv_usec < 0) {
 1004                 t1->tv_sec--;
 1005                 t1->tv_usec += 1000000;
 1006         }
 1007         if (t1->tv_usec >= 1000000) {
 1008                 t1->tv_sec++;
 1009                 t1->tv_usec -= 1000000;
 1010         }
 1011 }
 1012 
 1013 /*
 1014  * ratecheck(): simple time-based rate-limit checking.
 1015  */
 1016 int
 1017 ratecheck(struct timeval *lasttime, const struct timeval *mininterval)
 1018 {
 1019         struct timeval tv, delta;
 1020         int rv = 0;
 1021 
 1022         getmicrouptime(&tv);            /* NB: 10ms precision */
 1023         delta = tv;
 1024         timevalsub(&delta, lasttime);
 1025 
 1026         /*
 1027          * check for 0,0 is so that the message will be seen at least once,
 1028          * even if interval is huge.
 1029          */
 1030         if (timevalcmp(&delta, mininterval, >=) ||
 1031             (lasttime->tv_sec == 0 && lasttime->tv_usec == 0)) {
 1032                 *lasttime = tv;
 1033                 rv = 1;
 1034         }
 1035 
 1036         return (rv);
 1037 }
 1038 
 1039 /*
 1040  * ppsratecheck(): packets (or events) per second limitation.
 1041  *
 1042  * Return 0 if the limit is to be enforced (e.g. the caller
 1043  * should drop a packet because of the rate limitation).
 1044  *
 1045  * maxpps of 0 always causes zero to be returned.  maxpps of -1
 1046  * always causes 1 to be returned; this effectively defeats rate
 1047  * limiting.
 1048  *
 1049  * Note that we maintain the struct timeval for compatibility
 1050  * with other bsd systems.  We reuse the storage and just monitor
 1051  * clock ticks for minimal overhead.  
 1052  */
 1053 int
 1054 ppsratecheck(struct timeval *lasttime, int *curpps, int maxpps)
 1055 {
 1056         int now;
 1057 
 1058         /*
 1059          * Reset the last time and counter if this is the first call
 1060          * or more than a second has passed since the last update of
 1061          * lasttime.
 1062          */
 1063         now = ticks;
 1064         if (lasttime->tv_sec == 0 || (u_int)(now - lasttime->tv_sec) >= hz) {
 1065                 lasttime->tv_sec = now;
 1066                 *curpps = 1;
 1067                 return (maxpps != 0);
 1068         } else {
 1069                 (*curpps)++;            /* NB: ignore potential overflow */
 1070                 return (maxpps < 0 || *curpps < maxpps);
 1071         }
 1072 }

Cache object: 9783c3365eeb3312cfb77e8b71c00670


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.