The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_time.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*      $OpenBSD: kern_time.c,v 1.161 2023/01/02 23:09:48 guenther Exp $        */
    2 /*      $NetBSD: kern_time.c,v 1.20 1996/02/18 11:57:06 fvdl Exp $      */
    3 
    4 /*
    5  * Copyright (c) 1982, 1986, 1989, 1993
    6  *      The Regents of the University of California.  All rights reserved.
    7  *
    8  * Redistribution and use in source and binary forms, with or without
    9  * modification, are permitted provided that the following conditions
   10  * are met:
   11  * 1. Redistributions of source code must retain the above copyright
   12  *    notice, this list of conditions and the following disclaimer.
   13  * 2. Redistributions in binary form must reproduce the above copyright
   14  *    notice, this list of conditions and the following disclaimer in the
   15  *    documentation and/or other materials provided with the distribution.
   16  * 3. Neither the name of the University nor the names of its contributors
   17  *    may be used to endorse or promote products derived from this software
   18  *    without specific prior written permission.
   19  *
   20  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
   21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   30  * SUCH DAMAGE.
   31  *
   32  *      @(#)kern_time.c 8.4 (Berkeley) 5/26/95
   33  */
   34 
   35 #include <sys/param.h>
   36 #include <sys/kernel.h>
   37 #include <sys/systm.h>
   38 #include <sys/mutex.h>
   39 #include <sys/rwlock.h>
   40 #include <sys/proc.h>
   41 #include <sys/ktrace.h>
   42 #include <sys/signalvar.h>
   43 #include <sys/stdint.h>
   44 #include <sys/pledge.h>
   45 #include <sys/task.h>
   46 #include <sys/timeout.h>
   47 #include <sys/timetc.h>
   48 
   49 #include <sys/mount.h>
   50 #include <sys/syscallargs.h>
   51 
   52 #include <dev/clock_subr.h>
   53 
   54 int itimerfix(struct itimerval *);
   55 
   56 /* 
   57  * Time of day and interval timer support.
   58  *
   59  * These routines provide the kernel entry points to get and set
   60  * the time-of-day and per-process interval timers.  Subroutines
   61  * here provide support for adding and subtracting timeval structures
   62  * and decrementing interval timers, optionally reloading the interval
   63  * timers when they expire.
   64  */
   65 
   66 /* This function is used by clock_settime and settimeofday */
   67 int
   68 settime(const struct timespec *ts)
   69 {
   70         struct timespec now;
   71 
   72         /*
   73          * Don't allow the time to be set forward so far it will wrap
   74          * and become negative, thus allowing an attacker to bypass
   75          * the next check below.  The cutoff is 1 year before rollover
   76          * occurs, so even if the attacker uses adjtime(2) to move
   77          * the time past the cutoff, it will take a very long time
   78          * to get to the wrap point.
   79          *
   80          * XXX: we check against UINT_MAX until we can figure out
   81          *      how to deal with the hardware RTCs.
   82          */
   83         if (ts->tv_sec > UINT_MAX - 365*24*60*60) {
   84                 printf("denied attempt to set clock forward to %lld\n",
   85                     (long long)ts->tv_sec);
   86                 return (EPERM);
   87         }
   88         /*
   89          * If the system is secure, we do not allow the time to be
   90          * set to an earlier value (it may be slowed using adjtime,
   91          * but not set back). This feature prevent interlopers from
   92          * setting arbitrary time stamps on files.
   93          */
   94         nanotime(&now);
   95         if (securelevel > 1 && timespeccmp(ts, &now, <=)) {
   96                 printf("denied attempt to set clock back %lld seconds\n",
   97                     (long long)now.tv_sec - ts->tv_sec);
   98                 return (EPERM);
   99         }
  100 
  101         tc_setrealtimeclock(ts);
  102         KERNEL_LOCK();
  103         resettodr();
  104         KERNEL_UNLOCK();
  105 
  106         return (0);
  107 }
  108 
  109 int
  110 clock_gettime(struct proc *p, clockid_t clock_id, struct timespec *tp)
  111 {
  112         struct proc *q;
  113         int error = 0;
  114 
  115         switch (clock_id) {
  116         case CLOCK_REALTIME:
  117                 nanotime(tp);
  118                 break;
  119         case CLOCK_UPTIME:
  120                 nanoruntime(tp);
  121                 break;
  122         case CLOCK_MONOTONIC:
  123         case CLOCK_BOOTTIME:
  124                 nanouptime(tp);
  125                 break;
  126         case CLOCK_PROCESS_CPUTIME_ID:
  127                 nanouptime(tp);
  128                 timespecsub(tp, &curcpu()->ci_schedstate.spc_runtime, tp);
  129                 timespecadd(tp, &p->p_p->ps_tu.tu_runtime, tp);
  130                 timespecadd(tp, &p->p_rtime, tp);
  131                 break;
  132         case CLOCK_THREAD_CPUTIME_ID:
  133                 nanouptime(tp);
  134                 timespecsub(tp, &curcpu()->ci_schedstate.spc_runtime, tp);
  135                 timespecadd(tp, &p->p_tu.tu_runtime, tp);
  136                 timespecadd(tp, &p->p_rtime, tp);
  137                 break;
  138         default:
  139                 /* check for clock from pthread_getcpuclockid() */
  140                 if (__CLOCK_TYPE(clock_id) == CLOCK_THREAD_CPUTIME_ID) {
  141                         KERNEL_LOCK();
  142                         q = tfind_user(__CLOCK_PTID(clock_id), p->p_p);
  143                         if (q == NULL)
  144                                 error = ESRCH;
  145                         else
  146                                 *tp = q->p_tu.tu_runtime;
  147                         KERNEL_UNLOCK();
  148                 } else
  149                         error = EINVAL;
  150                 break;
  151         }
  152         return (error);
  153 }
  154 
  155 int
  156 sys_clock_gettime(struct proc *p, void *v, register_t *retval)
  157 {
  158         struct sys_clock_gettime_args /* {
  159                 syscallarg(clockid_t) clock_id;
  160                 syscallarg(struct timespec *) tp;
  161         } */ *uap = v;
  162         struct timespec ats;
  163         int error;
  164 
  165         memset(&ats, 0, sizeof(ats));
  166         if ((error = clock_gettime(p, SCARG(uap, clock_id), &ats)) != 0)
  167                 return (error);
  168 
  169         error = copyout(&ats, SCARG(uap, tp), sizeof(ats));
  170 #ifdef KTRACE
  171         if (error == 0 && KTRPOINT(p, KTR_STRUCT))
  172                 ktrabstimespec(p, &ats);
  173 #endif
  174         return (error);
  175 }
  176 
  177 int
  178 sys_clock_settime(struct proc *p, void *v, register_t *retval)
  179 {
  180         struct sys_clock_settime_args /* {
  181                 syscallarg(clockid_t) clock_id;
  182                 syscallarg(const struct timespec *) tp;
  183         } */ *uap = v;
  184         struct timespec ats;
  185         clockid_t clock_id;
  186         int error;
  187 
  188         if ((error = suser(p)) != 0)
  189                 return (error);
  190 
  191         if ((error = copyin(SCARG(uap, tp), &ats, sizeof(ats))) != 0)
  192                 return (error);
  193 
  194         clock_id = SCARG(uap, clock_id);
  195         switch (clock_id) {
  196         case CLOCK_REALTIME:
  197                 if (!timespecisvalid(&ats))
  198                         return (EINVAL);
  199                 if ((error = settime(&ats)) != 0)
  200                         return (error);
  201                 break;
  202         default:        /* Other clocks are read-only */
  203                 return (EINVAL);
  204         }
  205 
  206         return (0);
  207 }
  208 
  209 int
  210 sys_clock_getres(struct proc *p, void *v, register_t *retval)
  211 {
  212         struct sys_clock_getres_args /* {
  213                 syscallarg(clockid_t) clock_id;
  214                 syscallarg(struct timespec *) tp;
  215         } */ *uap = v;
  216         clockid_t clock_id;
  217         struct bintime bt;
  218         struct timespec ts;
  219         struct proc *q;
  220         u_int64_t scale;
  221         int error = 0, realstathz;
  222 
  223         memset(&ts, 0, sizeof(ts));
  224         realstathz = (stathz == 0) ? hz : stathz;
  225         clock_id = SCARG(uap, clock_id);
  226 
  227         switch (clock_id) {
  228         case CLOCK_REALTIME:
  229         case CLOCK_MONOTONIC:
  230         case CLOCK_BOOTTIME:
  231         case CLOCK_UPTIME:
  232                 memset(&bt, 0, sizeof(bt));
  233                 rw_enter_read(&tc_lock);
  234                 scale = ((1ULL << 63) / tc_getfrequency()) * 2;
  235                 bt.frac = tc_getprecision() * scale;
  236                 rw_exit_read(&tc_lock);
  237                 BINTIME_TO_TIMESPEC(&bt, &ts);
  238                 break;
  239         case CLOCK_PROCESS_CPUTIME_ID:
  240         case CLOCK_THREAD_CPUTIME_ID:
  241                 ts.tv_nsec = 1000000000 / realstathz;
  242                 break;
  243         default:
  244                 /* check for clock from pthread_getcpuclockid() */
  245                 if (__CLOCK_TYPE(clock_id) == CLOCK_THREAD_CPUTIME_ID) {
  246                         KERNEL_LOCK();
  247                         q = tfind_user(__CLOCK_PTID(clock_id), p->p_p);
  248                         if (q == NULL)
  249                                 error = ESRCH;
  250                         else
  251                                 ts.tv_nsec = 1000000000 / realstathz;
  252                         KERNEL_UNLOCK();
  253                 } else
  254                         error = EINVAL;
  255                 break;
  256         }
  257 
  258         if (error == 0 && SCARG(uap, tp)) {
  259                 ts.tv_nsec = MAX(ts.tv_nsec, 1);
  260                 error = copyout(&ts, SCARG(uap, tp), sizeof(ts));
  261 #ifdef KTRACE
  262                 if (error == 0 && KTRPOINT(p, KTR_STRUCT))
  263                         ktrreltimespec(p, &ts);
  264 #endif
  265         }
  266 
  267         return error;
  268 }
  269 
  270 int
  271 sys_nanosleep(struct proc *p, void *v, register_t *retval)
  272 {
  273         static int chan;
  274         struct sys_nanosleep_args/* {
  275                 syscallarg(const struct timespec *) rqtp;
  276                 syscallarg(struct timespec *) rmtp;
  277         } */ *uap = v;
  278         struct timespec elapsed, remainder, request, start, stop;
  279         uint64_t nsecs;
  280         struct timespec *rmtp;
  281         int copyout_error, error;
  282 
  283         rmtp = SCARG(uap, rmtp);
  284         error = copyin(SCARG(uap, rqtp), &request, sizeof(request));
  285         if (error)
  286                 return (error);
  287 #ifdef KTRACE
  288         if (KTRPOINT(p, KTR_STRUCT))
  289                 ktrreltimespec(p, &request);
  290 #endif
  291 
  292         if (request.tv_sec < 0 || !timespecisvalid(&request))
  293                 return (EINVAL);
  294 
  295         do {
  296                 getnanouptime(&start);
  297                 nsecs = MAX(1, MIN(TIMESPEC_TO_NSEC(&request), MAXTSLP));
  298                 error = tsleep_nsec(&chan, PWAIT | PCATCH, "nanoslp", nsecs);
  299                 getnanouptime(&stop);
  300                 timespecsub(&stop, &start, &elapsed);
  301                 timespecsub(&request, &elapsed, &request);
  302                 if (request.tv_sec < 0)
  303                         timespecclear(&request);
  304                 if (error != EWOULDBLOCK)
  305                         break;
  306         } while (timespecisset(&request));
  307 
  308         if (error == ERESTART)
  309                 error = EINTR;
  310         if (error == EWOULDBLOCK)
  311                 error = 0;
  312 
  313         if (rmtp) {
  314                 memset(&remainder, 0, sizeof(remainder));
  315                 remainder = request;
  316                 copyout_error = copyout(&remainder, rmtp, sizeof(remainder));
  317                 if (copyout_error)
  318                         error = copyout_error;
  319 #ifdef KTRACE
  320                 if (copyout_error == 0 && KTRPOINT(p, KTR_STRUCT))
  321                         ktrreltimespec(p, &remainder);
  322 #endif
  323         }
  324 
  325         return error;
  326 }
  327 
  328 int
  329 sys_gettimeofday(struct proc *p, void *v, register_t *retval)
  330 {
  331         struct sys_gettimeofday_args /* {
  332                 syscallarg(struct timeval *) tp;
  333                 syscallarg(struct timezone *) tzp;
  334         } */ *uap = v;
  335         struct timeval atv;
  336         static const struct timezone zerotz = { 0, 0 };
  337         struct timeval *tp;
  338         struct timezone *tzp;
  339         int error = 0;
  340 
  341         tp = SCARG(uap, tp);
  342         tzp = SCARG(uap, tzp);
  343 
  344         if (tp) {
  345                 memset(&atv, 0, sizeof(atv));
  346                 microtime(&atv);
  347                 if ((error = copyout(&atv, tp, sizeof (atv))))
  348                         return (error);
  349 #ifdef KTRACE
  350                 if (KTRPOINT(p, KTR_STRUCT))
  351                         ktrabstimeval(p, &atv);
  352 #endif
  353         }
  354         if (tzp)
  355                 error = copyout(&zerotz, tzp, sizeof(zerotz));
  356         return (error);
  357 }
  358 
  359 int
  360 sys_settimeofday(struct proc *p, void *v, register_t *retval)
  361 {
  362         struct sys_settimeofday_args /* {
  363                 syscallarg(const struct timeval *) tv;
  364                 syscallarg(const struct timezone *) tzp;
  365         } */ *uap = v;
  366         struct timezone atz;
  367         struct timeval atv;
  368         const struct timeval *tv;
  369         const struct timezone *tzp;
  370         int error;
  371 
  372         tv = SCARG(uap, tv);
  373         tzp = SCARG(uap, tzp);
  374 
  375         if ((error = suser(p)))
  376                 return (error);
  377         /* Verify all parameters before changing time. */
  378         if (tv && (error = copyin(tv, &atv, sizeof(atv))))
  379                 return (error);
  380         if (tzp && (error = copyin(tzp, &atz, sizeof(atz))))
  381                 return (error);
  382         if (tv) {
  383                 struct timespec ts;
  384 
  385 #ifdef KTRACE
  386                 if (KTRPOINT(p, KTR_STRUCT))
  387                         ktrabstimeval(p, &atv);
  388 #endif
  389                 if (!timerisvalid(&atv))
  390                         return (EINVAL);
  391                 TIMEVAL_TO_TIMESPEC(&atv, &ts);
  392                 if ((error = settime(&ts)) != 0)
  393                         return (error);
  394         }
  395 
  396         return (0);
  397 }
  398 
  399 #define ADJFREQ_MAX (500000000LL << 32)
  400 #define ADJFREQ_MIN (-ADJFREQ_MAX)
  401 
  402 int
  403 sys_adjfreq(struct proc *p, void *v, register_t *retval)
  404 {
  405         struct sys_adjfreq_args /* {
  406                 syscallarg(const int64_t *) freq;
  407                 syscallarg(int64_t *) oldfreq;
  408         } */ *uap = v;
  409         int error = 0;
  410         int64_t f, oldf;
  411         const int64_t *freq = SCARG(uap, freq);
  412         int64_t *oldfreq = SCARG(uap, oldfreq);
  413 
  414         if (freq) {
  415                 if ((error = suser(p)))
  416                         return (error);
  417                 if ((error = copyin(freq, &f, sizeof(f))))
  418                         return (error);
  419                 if (f < ADJFREQ_MIN || f > ADJFREQ_MAX)
  420                         return (EINVAL);
  421         }
  422 
  423         rw_enter(&tc_lock, (freq == NULL) ? RW_READ : RW_WRITE);
  424         if (oldfreq) {
  425                 tc_adjfreq(&oldf, NULL);
  426                 if ((error = copyout(&oldf, oldfreq, sizeof(oldf))))
  427                         goto out;
  428         }
  429         if (freq)
  430                 tc_adjfreq(NULL, &f);
  431 out:
  432         rw_exit(&tc_lock);
  433         return (error);
  434 }
  435 
  436 int
  437 sys_adjtime(struct proc *p, void *v, register_t *retval)
  438 {
  439         struct sys_adjtime_args /* {
  440                 syscallarg(const struct timeval *) delta;
  441                 syscallarg(struct timeval *) olddelta;
  442         } */ *uap = v;
  443         struct timeval atv;
  444         const struct timeval *delta = SCARG(uap, delta);
  445         struct timeval *olddelta = SCARG(uap, olddelta);
  446         int64_t adjustment, remaining;
  447         int error;
  448 
  449         error = pledge_adjtime(p, delta);
  450         if (error)
  451                 return error;
  452 
  453         if (delta) {
  454                 if ((error = suser(p)))
  455                         return (error);
  456                 if ((error = copyin(delta, &atv, sizeof(struct timeval))))
  457                         return (error);
  458 #ifdef KTRACE
  459                 if (KTRPOINT(p, KTR_STRUCT))
  460                         ktrreltimeval(p, &atv);
  461 #endif
  462                 if (!timerisvalid(&atv))
  463                         return (EINVAL);
  464 
  465                 if (atv.tv_sec > INT64_MAX / 1000000)
  466                         return EINVAL;
  467                 if (atv.tv_sec < INT64_MIN / 1000000)
  468                         return EINVAL;
  469                 adjustment = atv.tv_sec * 1000000;
  470                 if (adjustment > INT64_MAX - atv.tv_usec)
  471                         return EINVAL;
  472                 adjustment += atv.tv_usec;
  473 
  474                 rw_enter_write(&tc_lock);
  475         }
  476 
  477         if (olddelta) {
  478                 tc_adjtime(&remaining, NULL);
  479                 memset(&atv, 0, sizeof(atv));
  480                 atv.tv_sec =  remaining / 1000000;
  481                 atv.tv_usec = remaining % 1000000;
  482                 if (atv.tv_usec < 0) {
  483                         atv.tv_usec += 1000000;
  484                         atv.tv_sec--;
  485                 }
  486 
  487                 if ((error = copyout(&atv, olddelta, sizeof(struct timeval))))
  488                         goto out;
  489         }
  490 
  491         if (delta)
  492                 tc_adjtime(NULL, &adjustment);
  493 out:
  494         if (delta)
  495                 rw_exit_write(&tc_lock);
  496         return (error);
  497 }
  498 
  499 
  500 struct mutex itimer_mtx = MUTEX_INITIALIZER(IPL_CLOCK);
  501 
  502 /*
  503  * Get or set value of an interval timer.  The process virtual and
  504  * profiling virtual time timers are kept internally in the
  505  * way they are specified externally: in time until they expire.
  506  *
  507  * The real time interval timer's it_value, in contrast, is kept as an 
  508  * absolute time rather than as a delta, so that it is easy to keep
  509  * periodic real-time signals from drifting.
  510  *
  511  * Virtual time timers are processed in the hardclock() routine of
  512  * kern_clock.c.  The real time timer is processed by a timeout
  513  * routine, called from the softclock() routine.  Since a callout
  514  * may be delayed in real time due to interrupt processing in the system,
  515  * it is possible for the real time timeout routine (realitexpire, given below),
  516  * to be delayed in real time past when it is supposed to occur.  It
  517  * does not suffice, therefore, to reload the real timer .it_value from the
  518  * real time timers .it_interval.  Rather, we compute the next time in
  519  * absolute time the timer should go off.
  520  */
  521 void
  522 setitimer(int which, const struct itimerval *itv, struct itimerval *olditv)
  523 {
  524         struct itimerspec its, oldits;
  525         struct timespec now;
  526         struct itimerspec *itimer;
  527         struct process *pr;
  528 
  529         KASSERT(which >= ITIMER_REAL && which <= ITIMER_PROF);
  530 
  531         pr = curproc->p_p;
  532         itimer = &pr->ps_timer[which];
  533 
  534         if (itv != NULL) {
  535                 TIMEVAL_TO_TIMESPEC(&itv->it_value, &its.it_value);
  536                 TIMEVAL_TO_TIMESPEC(&itv->it_interval, &its.it_interval);
  537         }
  538 
  539         if (which == ITIMER_REAL) {
  540                 mtx_enter(&pr->ps_mtx);
  541                 nanouptime(&now);
  542         } else
  543                 mtx_enter(&itimer_mtx);
  544 
  545         if (olditv != NULL)
  546                 oldits = *itimer;
  547         if (itv != NULL) {
  548                 if (which == ITIMER_REAL) {
  549                         if (timespecisset(&its.it_value)) {
  550                                 timespecadd(&its.it_value, &now, &its.it_value);
  551                                 timeout_abs_ts(&pr->ps_realit_to,&its.it_value);
  552                         } else
  553                                 timeout_del(&pr->ps_realit_to);
  554                 }
  555                 *itimer = its;
  556         }
  557 
  558         if (which == ITIMER_REAL)
  559                 mtx_leave(&pr->ps_mtx);
  560         else
  561                 mtx_leave(&itimer_mtx);
  562 
  563         if (olditv != NULL) {
  564                 if (which == ITIMER_REAL && timespecisset(&oldits.it_value)) {
  565                         if (timespeccmp(&oldits.it_value, &now, <))
  566                                 timespecclear(&oldits.it_value);
  567                         else {
  568                                 timespecsub(&oldits.it_value, &now,
  569                                     &oldits.it_value);
  570                         }
  571                 }
  572                 TIMESPEC_TO_TIMEVAL(&olditv->it_value, &oldits.it_value);
  573                 TIMESPEC_TO_TIMEVAL(&olditv->it_interval, &oldits.it_interval);
  574         }
  575 }
  576 
  577 void
  578 cancel_all_itimers(void)
  579 {
  580         struct itimerval itv;
  581         int i;
  582 
  583         timerclear(&itv.it_value);
  584         timerclear(&itv.it_interval);
  585 
  586         for (i = 0; i < nitems(curproc->p_p->ps_timer); i++)
  587                 setitimer(i, &itv, NULL);
  588 }
  589 
  590 int
  591 sys_getitimer(struct proc *p, void *v, register_t *retval)
  592 {
  593         struct sys_getitimer_args /* {
  594                 syscallarg(int) which;
  595                 syscallarg(struct itimerval *) itv;
  596         } */ *uap = v;
  597         struct itimerval aitv;
  598         int which;
  599 
  600         which = SCARG(uap, which);
  601         if (which < ITIMER_REAL || which > ITIMER_PROF)
  602                 return EINVAL;
  603 
  604         memset(&aitv, 0, sizeof(aitv));
  605 
  606         setitimer(which, NULL, &aitv);
  607 
  608         return copyout(&aitv, SCARG(uap, itv), sizeof(aitv));
  609 }
  610 
  611 int
  612 sys_setitimer(struct proc *p, void *v, register_t *retval)
  613 {
  614         struct sys_setitimer_args /* {
  615                 syscallarg(int) which;
  616                 syscallarg(const struct itimerval *) itv;
  617                 syscallarg(struct itimerval *) oitv;
  618         } */ *uap = v;
  619         struct itimerval aitv, olditv;
  620         struct itimerval *newitvp, *olditvp;
  621         int error, which;
  622 
  623         which = SCARG(uap, which);
  624         if (which < ITIMER_REAL || which > ITIMER_PROF)
  625                 return EINVAL;
  626 
  627         newitvp = olditvp = NULL;
  628         if (SCARG(uap, itv) != NULL) {
  629                 error = copyin(SCARG(uap, itv), &aitv, sizeof(aitv));
  630                 if (error)
  631                         return error;
  632                 error = itimerfix(&aitv);
  633                 if (error)
  634                         return error;
  635                 newitvp = &aitv;
  636         }
  637         if (SCARG(uap, oitv) != NULL) {
  638                 memset(&olditv, 0, sizeof(olditv));
  639                 olditvp = &olditv;
  640         }
  641         if (newitvp == NULL && olditvp == NULL)
  642                 return 0;
  643 
  644         setitimer(which, newitvp, olditvp);
  645 
  646         if (SCARG(uap, oitv) != NULL)
  647                 return copyout(&olditv, SCARG(uap, oitv), sizeof(olditv));
  648 
  649         return 0;
  650 }
  651 
  652 /*
  653  * Real interval timer expired:
  654  * send process whose timer expired an alarm signal.
  655  * If time is not set up to reload, then just return.
  656  * Else compute next time timer should go off which is > current time.
  657  * This is where delay in processing this timeout causes multiple
  658  * SIGALRM calls to be compressed into one.
  659  */
  660 void
  661 realitexpire(void *arg)
  662 {
  663         struct timespec cts;
  664         struct process *pr = arg;
  665         struct itimerspec *tp = &pr->ps_timer[ITIMER_REAL];
  666         int need_signal = 0;
  667 
  668         mtx_enter(&pr->ps_mtx);
  669 
  670         /*
  671          * Do nothing if the timer was cancelled or rescheduled while we
  672          * were entering the mutex.
  673          */
  674         if (!timespecisset(&tp->it_value) || timeout_pending(&pr->ps_realit_to))
  675                 goto out;
  676 
  677         /* The timer expired.  We need to send the signal. */
  678         need_signal = 1;
  679 
  680         /* One-shot timers are not reloaded. */
  681         if (!timespecisset(&tp->it_interval)) {
  682                 timespecclear(&tp->it_value);
  683                 goto out;
  684         }
  685 
  686         /*
  687          * Find the nearest future expiration point and restart
  688          * the timeout.
  689          */
  690         nanouptime(&cts);
  691         while (timespeccmp(&tp->it_value, &cts, <=))
  692                 timespecadd(&tp->it_value, &tp->it_interval, &tp->it_value);
  693         if ((pr->ps_flags & PS_EXITING) == 0)
  694                 timeout_abs_ts(&pr->ps_realit_to, &tp->it_value);
  695 
  696 out:
  697         mtx_leave(&pr->ps_mtx);
  698 
  699         if (need_signal)
  700                 prsignal(pr, SIGALRM);
  701 }
  702 
  703 /*
  704  * Check if the given setitimer(2) input is valid.  Clear it_interval
  705  * if it_value is unset.  Round it_interval up to the minimum interval
  706  * if necessary.
  707  */
  708 int
  709 itimerfix(struct itimerval *itv)
  710 {
  711         static const struct timeval max = { .tv_sec = UINT_MAX, .tv_usec = 0 };
  712         struct timeval min_interval = { .tv_sec = 0, .tv_usec = tick };
  713 
  714         if (itv->it_value.tv_sec < 0 || !timerisvalid(&itv->it_value))
  715                 return EINVAL;
  716         if (timercmp(&itv->it_value, &max, >))
  717                 return EINVAL;
  718         if (itv->it_interval.tv_sec < 0 || !timerisvalid(&itv->it_interval))
  719                 return EINVAL;
  720         if (timercmp(&itv->it_interval, &max, >))
  721                 return EINVAL;
  722 
  723         if (!timerisset(&itv->it_value))
  724                 timerclear(&itv->it_interval);
  725         if (timerisset(&itv->it_interval)) {
  726                 if (timercmp(&itv->it_interval, &min_interval, <))
  727                         itv->it_interval = min_interval;
  728         }
  729 
  730         return 0;
  731 }
  732 
  733 /*
  734  * Decrement an interval timer by the given number of nanoseconds.
  735  * If the timer expires and it is periodic then reload it.  When reloading
  736  * the timer we subtract any overrun from the next period so that the timer
  737  * does not drift.
  738  */
  739 int
  740 itimerdecr(struct itimerspec *itp, long nsec)
  741 {
  742         struct timespec decrement;
  743 
  744         NSEC_TO_TIMESPEC(nsec, &decrement);
  745 
  746         mtx_enter(&itimer_mtx);
  747 
  748         /*
  749          * Double-check that the timer is enabled.  A different thread
  750          * in setitimer(2) may have disabled it while we were entering
  751          * the mutex.
  752          */
  753         if (!timespecisset(&itp->it_value)) {
  754                 mtx_leave(&itimer_mtx);
  755                 return (1);
  756         }
  757 
  758         /*
  759          * The timer is enabled.  Update and reload it as needed.
  760          */
  761         timespecsub(&itp->it_value, &decrement, &itp->it_value);
  762         if (itp->it_value.tv_sec >= 0 && timespecisset(&itp->it_value)) {
  763                 mtx_leave(&itimer_mtx);
  764                 return (1);
  765         }
  766         if (!timespecisset(&itp->it_interval)) {
  767                 timespecclear(&itp->it_value);
  768                 mtx_leave(&itimer_mtx);
  769                 return (0);
  770         }
  771         while (itp->it_value.tv_sec < 0 || !timespecisset(&itp->it_value))
  772                 timespecadd(&itp->it_value, &itp->it_interval, &itp->it_value);
  773         mtx_leave(&itimer_mtx);
  774         return (0);
  775 }
  776 
  777 struct mutex ratecheck_mtx = MUTEX_INITIALIZER(IPL_HIGH);
  778 
  779 /*
  780  * ratecheck(): simple time-based rate-limit checking.  see ratecheck(9)
  781  * for usage and rationale.
  782  */
  783 int
  784 ratecheck(struct timeval *lasttime, const struct timeval *mininterval)
  785 {
  786         struct timeval tv, delta;
  787         int rv = 0;
  788 
  789         getmicrouptime(&tv);
  790 
  791         mtx_enter(&ratecheck_mtx);
  792         timersub(&tv, lasttime, &delta);
  793 
  794         /*
  795          * check for 0,0 is so that the message will be seen at least once,
  796          * even if interval is huge.
  797          */
  798         if (timercmp(&delta, mininterval, >=) ||
  799             (lasttime->tv_sec == 0 && lasttime->tv_usec == 0)) {
  800                 *lasttime = tv;
  801                 rv = 1;
  802         }
  803         mtx_leave(&ratecheck_mtx);
  804 
  805         return (rv);
  806 }
  807 
  808 struct mutex ppsratecheck_mtx = MUTEX_INITIALIZER(IPL_HIGH);
  809 
  810 /*
  811  * ppsratecheck(): packets (or events) per second limitation.
  812  */
  813 int
  814 ppsratecheck(struct timeval *lasttime, int *curpps, int maxpps)
  815 {
  816         struct timeval tv, delta;
  817         int rv;
  818 
  819         microuptime(&tv);
  820 
  821         mtx_enter(&ppsratecheck_mtx);
  822         timersub(&tv, lasttime, &delta);
  823 
  824         /*
  825          * check for 0,0 is so that the message will be seen at least once.
  826          * if more than one second have passed since the last update of
  827          * lasttime, reset the counter.
  828          *
  829          * we do increment *curpps even in *curpps < maxpps case, as some may
  830          * try to use *curpps for stat purposes as well.
  831          */
  832         if (maxpps == 0)
  833                 rv = 0;
  834         else if ((lasttime->tv_sec == 0 && lasttime->tv_usec == 0) ||
  835             delta.tv_sec >= 1) {
  836                 *lasttime = tv;
  837                 *curpps = 0;
  838                 rv = 1;
  839         } else if (maxpps < 0)
  840                 rv = 1;
  841         else if (*curpps < maxpps)
  842                 rv = 1;
  843         else
  844                 rv = 0;
  845 
  846         /* be careful about wrap-around */
  847         if (*curpps + 1 > *curpps)
  848                 *curpps = *curpps + 1;
  849 
  850         mtx_leave(&ppsratecheck_mtx);
  851 
  852         return (rv);
  853 }
  854 
  855 todr_chip_handle_t todr_handle;
  856 int inittodr_done;
  857 
  858 #define MINYEAR         ((OpenBSD / 100) - 1)   /* minimum plausible year */
  859 
  860 /*
  861  * inittodr:
  862  *
  863  *      Initialize time from the time-of-day register.
  864  */
  865 void
  866 inittodr(time_t base)
  867 {
  868         time_t deltat;
  869         struct timeval rtctime;
  870         struct timespec ts;
  871         int badbase;
  872 
  873         inittodr_done = 1;
  874 
  875         if (base < (MINYEAR - 1970) * SECYR) {
  876                 printf("WARNING: preposterous time in file system\n");
  877                 /* read the system clock anyway */
  878                 base = (MINYEAR - 1970) * SECYR;
  879                 badbase = 1;
  880         } else
  881                 badbase = 0;
  882 
  883         rtctime.tv_sec = base;
  884         rtctime.tv_usec = 0;
  885 
  886         if (todr_handle == NULL ||
  887             todr_gettime(todr_handle, &rtctime) != 0 ||
  888             rtctime.tv_sec < (MINYEAR - 1970) * SECYR) {
  889                 /*
  890                  * Believe the time in the file system for lack of
  891                  * anything better, resetting the TODR.
  892                  */
  893                 rtctime.tv_sec = base;
  894                 rtctime.tv_usec = 0;
  895                 if (todr_handle != NULL && !badbase)
  896                         printf("WARNING: bad clock chip time\n");
  897                 ts.tv_sec = rtctime.tv_sec;
  898                 ts.tv_nsec = rtctime.tv_usec * 1000;
  899                 tc_setclock(&ts);
  900                 goto bad;
  901         } else {
  902                 ts.tv_sec = rtctime.tv_sec;
  903                 ts.tv_nsec = rtctime.tv_usec * 1000;
  904                 tc_setclock(&ts);
  905         }
  906 
  907         if (!badbase) {
  908                 /*
  909                  * See if we gained/lost two or more days; if
  910                  * so, assume something is amiss.
  911                  */
  912                 deltat = rtctime.tv_sec - base;
  913                 if (deltat < 0)
  914                         deltat = -deltat;
  915                 if (deltat < 2 * SECDAY)
  916                         return;         /* all is well */
  917 #ifndef SMALL_KERNEL
  918                 printf("WARNING: clock %s %lld days\n",
  919                     rtctime.tv_sec < base ? "lost" : "gained",
  920                     (long long)(deltat / SECDAY));
  921 #endif
  922         }
  923  bad:
  924         printf("WARNING: CHECK AND RESET THE DATE!\n");
  925 }
  926 
  927 /*
  928  * resettodr:
  929  *
  930  *      Reset the time-of-day register with the current time.
  931  */
  932 void
  933 resettodr(void)
  934 {
  935         struct timeval rtctime;
  936 
  937         /*
  938          * Skip writing the RTC if inittodr(9) never ran.  We don't
  939          * want to overwrite a reasonable value with a nonsense value.
  940          */
  941         if (!inittodr_done)
  942                 return;
  943 
  944         microtime(&rtctime);
  945 
  946         if (todr_handle != NULL &&
  947             todr_settime(todr_handle, &rtctime) != 0)
  948                 printf("WARNING: can't update clock chip time\n");
  949 }
  950 
  951 void
  952 todr_attach(struct todr_chip_handle *todr)
  953 {
  954         if (todr_handle == NULL ||
  955             todr->todr_quality > todr_handle->todr_quality)
  956                 todr_handle = todr;
  957 }
  958 
  959 #define RESETTODR_PERIOD        1800
  960 
  961 void periodic_resettodr(void *);
  962 void perform_resettodr(void *);
  963 
  964 struct timeout resettodr_to = TIMEOUT_INITIALIZER(periodic_resettodr, NULL);
  965 struct task resettodr_task = TASK_INITIALIZER(perform_resettodr, NULL);
  966 
  967 void
  968 periodic_resettodr(void *arg __unused)
  969 {
  970         task_add(systq, &resettodr_task);
  971 }
  972 
  973 void
  974 perform_resettodr(void *arg __unused)
  975 {
  976         resettodr();
  977         timeout_add_sec(&resettodr_to, RESETTODR_PERIOD);
  978 }
  979 
  980 void
  981 start_periodic_resettodr(void)
  982 {
  983         timeout_add_sec(&resettodr_to, RESETTODR_PERIOD);
  984 }
  985 
  986 void
  987 stop_periodic_resettodr(void)
  988 {
  989         timeout_del(&resettodr_to);
  990         task_del(systq, &resettodr_task);
  991 }

Cache object: 5aad8e65bfb5f894ca6c8752a726188a


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.