FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_time.c
1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 1982, 1986, 1989, 1993
5 * The Regents of the University of California. All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of the University nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 *
31 * @(#)kern_time.c 8.1 (Berkeley) 6/10/93
32 */
33
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36
37 #include "opt_ktrace.h"
38
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/limits.h>
42 #include <sys/clock.h>
43 #include <sys/lock.h>
44 #include <sys/mutex.h>
45 #include <sys/sysproto.h>
46 #include <sys/resourcevar.h>
47 #include <sys/signalvar.h>
48 #include <sys/kernel.h>
49 #include <sys/sleepqueue.h>
50 #include <sys/syscallsubr.h>
51 #include <sys/sysctl.h>
52 #include <sys/priv.h>
53 #include <sys/proc.h>
54 #include <sys/posix4.h>
55 #include <sys/time.h>
56 #include <sys/timers.h>
57 #include <sys/timetc.h>
58 #include <sys/vnode.h>
59 #ifdef KTRACE
60 #include <sys/ktrace.h>
61 #endif
62
63 #include <vm/vm.h>
64 #include <vm/vm_extern.h>
65
66 #define MAX_CLOCKS (CLOCK_MONOTONIC+1)
67 #define CPUCLOCK_BIT 0x80000000
68 #define CPUCLOCK_PROCESS_BIT 0x40000000
69 #define CPUCLOCK_ID_MASK (~(CPUCLOCK_BIT|CPUCLOCK_PROCESS_BIT))
70 #define MAKE_THREAD_CPUCLOCK(tid) (CPUCLOCK_BIT|(tid))
71 #define MAKE_PROCESS_CPUCLOCK(pid) \
72 (CPUCLOCK_BIT|CPUCLOCK_PROCESS_BIT|(pid))
73
74 #define NS_PER_SEC 1000000000
75
76 static struct kclock posix_clocks[MAX_CLOCKS];
77 static uma_zone_t itimer_zone = NULL;
78
79 /*
80 * Time of day and interval timer support.
81 *
82 * These routines provide the kernel entry points to get and set
83 * the time-of-day and per-process interval timers. Subroutines
84 * here provide support for adding and subtracting timeval structures
85 * and decrementing interval timers, optionally reloading the interval
86 * timers when they expire.
87 */
88
89 static int settime(struct thread *, struct timeval *);
90 static void timevalfix(struct timeval *);
91 static int user_clock_nanosleep(struct thread *td, clockid_t clock_id,
92 int flags, const struct timespec *ua_rqtp,
93 struct timespec *ua_rmtp);
94
95 static void itimer_start(void);
96 static int itimer_init(void *, int, int);
97 static void itimer_fini(void *, int);
98 static void itimer_enter(struct itimer *);
99 static void itimer_leave(struct itimer *);
100 static struct itimer *itimer_find(struct proc *, int);
101 static void itimers_alloc(struct proc *);
102 static int realtimer_create(struct itimer *);
103 static int realtimer_gettime(struct itimer *, struct itimerspec *);
104 static int realtimer_settime(struct itimer *, int,
105 struct itimerspec *, struct itimerspec *);
106 static int realtimer_delete(struct itimer *);
107 static void realtimer_clocktime(clockid_t, struct timespec *);
108 static void realtimer_expire(void *);
109 static void realtimer_expire_l(struct itimer *it, bool proc_locked);
110
111 static void realitexpire(void *arg);
112
113 static int register_posix_clock(int, const struct kclock *);
114 static void itimer_fire(struct itimer *it);
115 static int itimespecfix(struct timespec *ts);
116
117 #define CLOCK_CALL(clock, call, arglist) \
118 ((*posix_clocks[clock].call) arglist)
119
120 SYSINIT(posix_timer, SI_SUB_P1003_1B, SI_ORDER_FIRST+4, itimer_start, NULL);
121
122 static int
123 settime(struct thread *td, struct timeval *tv)
124 {
125 struct timeval delta, tv1, tv2;
126 static struct timeval maxtime, laststep;
127 struct timespec ts;
128
129 microtime(&tv1);
130 delta = *tv;
131 timevalsub(&delta, &tv1);
132
133 /*
134 * If the system is secure, we do not allow the time to be
135 * set to a value earlier than 1 second less than the highest
136 * time we have yet seen. The worst a miscreant can do in
137 * this circumstance is "freeze" time. He couldn't go
138 * back to the past.
139 *
140 * We similarly do not allow the clock to be stepped more
141 * than one second, nor more than once per second. This allows
142 * a miscreant to make the clock march double-time, but no worse.
143 */
144 if (securelevel_gt(td->td_ucred, 1) != 0) {
145 if (delta.tv_sec < 0 || delta.tv_usec < 0) {
146 /*
147 * Update maxtime to latest time we've seen.
148 */
149 if (tv1.tv_sec > maxtime.tv_sec)
150 maxtime = tv1;
151 tv2 = *tv;
152 timevalsub(&tv2, &maxtime);
153 if (tv2.tv_sec < -1) {
154 tv->tv_sec = maxtime.tv_sec - 1;
155 printf("Time adjustment clamped to -1 second\n");
156 }
157 } else {
158 if (tv1.tv_sec == laststep.tv_sec)
159 return (EPERM);
160 if (delta.tv_sec > 1) {
161 tv->tv_sec = tv1.tv_sec + 1;
162 printf("Time adjustment clamped to +1 second\n");
163 }
164 laststep = *tv;
165 }
166 }
167
168 ts.tv_sec = tv->tv_sec;
169 ts.tv_nsec = tv->tv_usec * 1000;
170 tc_setclock(&ts);
171 resettodr();
172 return (0);
173 }
174
175 #ifndef _SYS_SYSPROTO_H_
176 struct clock_getcpuclockid2_args {
177 id_t id;
178 int which,
179 clockid_t *clock_id;
180 };
181 #endif
182 /* ARGSUSED */
183 int
184 sys_clock_getcpuclockid2(struct thread *td, struct clock_getcpuclockid2_args *uap)
185 {
186 clockid_t clk_id;
187 int error;
188
189 error = kern_clock_getcpuclockid2(td, uap->id, uap->which, &clk_id);
190 if (error == 0)
191 error = copyout(&clk_id, uap->clock_id, sizeof(clockid_t));
192 return (error);
193 }
194
195 int
196 kern_clock_getcpuclockid2(struct thread *td, id_t id, int which,
197 clockid_t *clk_id)
198 {
199 struct proc *p;
200 pid_t pid;
201 lwpid_t tid;
202 int error;
203
204 switch (which) {
205 case CPUCLOCK_WHICH_PID:
206 if (id != 0) {
207 error = pget(id, PGET_CANSEE | PGET_NOTID, &p);
208 if (error != 0)
209 return (error);
210 PROC_UNLOCK(p);
211 pid = id;
212 } else {
213 pid = td->td_proc->p_pid;
214 }
215 *clk_id = MAKE_PROCESS_CPUCLOCK(pid);
216 return (0);
217 case CPUCLOCK_WHICH_TID:
218 tid = id == 0 ? td->td_tid : id;
219 *clk_id = MAKE_THREAD_CPUCLOCK(tid);
220 return (0);
221 default:
222 return (EINVAL);
223 }
224 }
225
226 #ifndef _SYS_SYSPROTO_H_
227 struct clock_gettime_args {
228 clockid_t clock_id;
229 struct timespec *tp;
230 };
231 #endif
232 /* ARGSUSED */
233 int
234 sys_clock_gettime(struct thread *td, struct clock_gettime_args *uap)
235 {
236 struct timespec ats;
237 int error;
238
239 error = kern_clock_gettime(td, uap->clock_id, &ats);
240 if (error == 0)
241 error = copyout(&ats, uap->tp, sizeof(ats));
242
243 return (error);
244 }
245
246 static inline void
247 cputick2timespec(uint64_t runtime, struct timespec *ats)
248 {
249 runtime = cputick2usec(runtime);
250 ats->tv_sec = runtime / 1000000;
251 ats->tv_nsec = runtime % 1000000 * 1000;
252 }
253
254 void
255 kern_thread_cputime(struct thread *targettd, struct timespec *ats)
256 {
257 uint64_t runtime, curtime, switchtime;
258
259 if (targettd == NULL) { /* current thread */
260 spinlock_enter();
261 switchtime = PCPU_GET(switchtime);
262 curtime = cpu_ticks();
263 runtime = curthread->td_runtime;
264 spinlock_exit();
265 runtime += curtime - switchtime;
266 } else {
267 PROC_LOCK_ASSERT(targettd->td_proc, MA_OWNED);
268 thread_lock(targettd);
269 runtime = targettd->td_runtime;
270 thread_unlock(targettd);
271 }
272 cputick2timespec(runtime, ats);
273 }
274
275 void
276 kern_process_cputime(struct proc *targetp, struct timespec *ats)
277 {
278 uint64_t runtime;
279 struct rusage ru;
280
281 PROC_LOCK_ASSERT(targetp, MA_OWNED);
282 PROC_STATLOCK(targetp);
283 rufetch(targetp, &ru);
284 runtime = targetp->p_rux.rux_runtime;
285 if (curthread->td_proc == targetp)
286 runtime += cpu_ticks() - PCPU_GET(switchtime);
287 PROC_STATUNLOCK(targetp);
288 cputick2timespec(runtime, ats);
289 }
290
291 static int
292 get_cputime(struct thread *td, clockid_t clock_id, struct timespec *ats)
293 {
294 struct proc *p, *p2;
295 struct thread *td2;
296 lwpid_t tid;
297 pid_t pid;
298 int error;
299
300 p = td->td_proc;
301 if ((clock_id & CPUCLOCK_PROCESS_BIT) == 0) {
302 tid = clock_id & CPUCLOCK_ID_MASK;
303 td2 = tdfind(tid, p->p_pid);
304 if (td2 == NULL)
305 return (EINVAL);
306 kern_thread_cputime(td2, ats);
307 PROC_UNLOCK(td2->td_proc);
308 } else {
309 pid = clock_id & CPUCLOCK_ID_MASK;
310 error = pget(pid, PGET_CANSEE, &p2);
311 if (error != 0)
312 return (EINVAL);
313 kern_process_cputime(p2, ats);
314 PROC_UNLOCK(p2);
315 }
316 return (0);
317 }
318
319 int
320 kern_clock_gettime(struct thread *td, clockid_t clock_id, struct timespec *ats)
321 {
322 struct timeval sys, user;
323 struct proc *p;
324
325 p = td->td_proc;
326 switch (clock_id) {
327 case CLOCK_REALTIME: /* Default to precise. */
328 case CLOCK_REALTIME_PRECISE:
329 nanotime(ats);
330 break;
331 case CLOCK_REALTIME_FAST:
332 getnanotime(ats);
333 break;
334 case CLOCK_VIRTUAL:
335 PROC_LOCK(p);
336 PROC_STATLOCK(p);
337 calcru(p, &user, &sys);
338 PROC_STATUNLOCK(p);
339 PROC_UNLOCK(p);
340 TIMEVAL_TO_TIMESPEC(&user, ats);
341 break;
342 case CLOCK_PROF:
343 PROC_LOCK(p);
344 PROC_STATLOCK(p);
345 calcru(p, &user, &sys);
346 PROC_STATUNLOCK(p);
347 PROC_UNLOCK(p);
348 timevaladd(&user, &sys);
349 TIMEVAL_TO_TIMESPEC(&user, ats);
350 break;
351 case CLOCK_MONOTONIC: /* Default to precise. */
352 case CLOCK_MONOTONIC_PRECISE:
353 case CLOCK_UPTIME:
354 case CLOCK_UPTIME_PRECISE:
355 nanouptime(ats);
356 break;
357 case CLOCK_UPTIME_FAST:
358 case CLOCK_MONOTONIC_FAST:
359 getnanouptime(ats);
360 break;
361 case CLOCK_SECOND:
362 ats->tv_sec = time_second;
363 ats->tv_nsec = 0;
364 break;
365 case CLOCK_THREAD_CPUTIME_ID:
366 kern_thread_cputime(NULL, ats);
367 break;
368 case CLOCK_PROCESS_CPUTIME_ID:
369 PROC_LOCK(p);
370 kern_process_cputime(p, ats);
371 PROC_UNLOCK(p);
372 break;
373 default:
374 if ((int)clock_id >= 0)
375 return (EINVAL);
376 return (get_cputime(td, clock_id, ats));
377 }
378 return (0);
379 }
380
381 #ifndef _SYS_SYSPROTO_H_
382 struct clock_settime_args {
383 clockid_t clock_id;
384 const struct timespec *tp;
385 };
386 #endif
387 /* ARGSUSED */
388 int
389 sys_clock_settime(struct thread *td, struct clock_settime_args *uap)
390 {
391 struct timespec ats;
392 int error;
393
394 if ((error = copyin(uap->tp, &ats, sizeof(ats))) != 0)
395 return (error);
396 return (kern_clock_settime(td, uap->clock_id, &ats));
397 }
398
399 static int allow_insane_settime = 0;
400 SYSCTL_INT(_debug, OID_AUTO, allow_insane_settime, CTLFLAG_RWTUN,
401 &allow_insane_settime, 0,
402 "do not perform possibly restrictive checks on settime(2) args");
403
404 int
405 kern_clock_settime(struct thread *td, clockid_t clock_id, struct timespec *ats)
406 {
407 struct timeval atv;
408 int error;
409
410 if ((error = priv_check(td, PRIV_CLOCK_SETTIME)) != 0)
411 return (error);
412 if (clock_id != CLOCK_REALTIME)
413 return (EINVAL);
414 if (!timespecvalid_interval(ats))
415 return (EINVAL);
416 if (!allow_insane_settime &&
417 (ats->tv_sec > 8000ULL * 365 * 24 * 60 * 60 ||
418 ats->tv_sec < utc_offset()))
419 return (EINVAL);
420 /* XXX Don't convert nsec->usec and back */
421 TIMESPEC_TO_TIMEVAL(&atv, ats);
422 error = settime(td, &atv);
423 return (error);
424 }
425
426 #ifndef _SYS_SYSPROTO_H_
427 struct clock_getres_args {
428 clockid_t clock_id;
429 struct timespec *tp;
430 };
431 #endif
432 int
433 sys_clock_getres(struct thread *td, struct clock_getres_args *uap)
434 {
435 struct timespec ts;
436 int error;
437
438 if (uap->tp == NULL)
439 return (0);
440
441 error = kern_clock_getres(td, uap->clock_id, &ts);
442 if (error == 0)
443 error = copyout(&ts, uap->tp, sizeof(ts));
444 return (error);
445 }
446
447 int
448 kern_clock_getres(struct thread *td, clockid_t clock_id, struct timespec *ts)
449 {
450
451 ts->tv_sec = 0;
452 switch (clock_id) {
453 case CLOCK_REALTIME:
454 case CLOCK_REALTIME_FAST:
455 case CLOCK_REALTIME_PRECISE:
456 case CLOCK_MONOTONIC:
457 case CLOCK_MONOTONIC_FAST:
458 case CLOCK_MONOTONIC_PRECISE:
459 case CLOCK_UPTIME:
460 case CLOCK_UPTIME_FAST:
461 case CLOCK_UPTIME_PRECISE:
462 /*
463 * Round up the result of the division cheaply by adding 1.
464 * Rounding up is especially important if rounding down
465 * would give 0. Perfect rounding is unimportant.
466 */
467 ts->tv_nsec = NS_PER_SEC / tc_getfrequency() + 1;
468 break;
469 case CLOCK_VIRTUAL:
470 case CLOCK_PROF:
471 /* Accurately round up here because we can do so cheaply. */
472 ts->tv_nsec = howmany(NS_PER_SEC, hz);
473 break;
474 case CLOCK_SECOND:
475 ts->tv_sec = 1;
476 ts->tv_nsec = 0;
477 break;
478 case CLOCK_THREAD_CPUTIME_ID:
479 case CLOCK_PROCESS_CPUTIME_ID:
480 cputime:
481 /* sync with cputick2usec */
482 ts->tv_nsec = 1000000 / cpu_tickrate();
483 if (ts->tv_nsec == 0)
484 ts->tv_nsec = 1000;
485 break;
486 default:
487 if ((int)clock_id < 0)
488 goto cputime;
489 return (EINVAL);
490 }
491 return (0);
492 }
493
494 int
495 kern_nanosleep(struct thread *td, struct timespec *rqt, struct timespec *rmt)
496 {
497
498 return (kern_clock_nanosleep(td, CLOCK_REALTIME, TIMER_RELTIME, rqt,
499 rmt));
500 }
501
502 static uint8_t nanowait[MAXCPU];
503
504 int
505 kern_clock_nanosleep(struct thread *td, clockid_t clock_id, int flags,
506 const struct timespec *rqt, struct timespec *rmt)
507 {
508 struct timespec ts, now;
509 sbintime_t sbt, sbtt, prec, tmp;
510 time_t over;
511 int error;
512 bool is_abs_real;
513
514 if (rqt->tv_nsec < 0 || rqt->tv_nsec >= NS_PER_SEC)
515 return (EINVAL);
516 if ((flags & ~TIMER_ABSTIME) != 0)
517 return (EINVAL);
518 switch (clock_id) {
519 case CLOCK_REALTIME:
520 case CLOCK_REALTIME_PRECISE:
521 case CLOCK_REALTIME_FAST:
522 case CLOCK_SECOND:
523 is_abs_real = (flags & TIMER_ABSTIME) != 0;
524 break;
525 case CLOCK_MONOTONIC:
526 case CLOCK_MONOTONIC_PRECISE:
527 case CLOCK_MONOTONIC_FAST:
528 case CLOCK_UPTIME:
529 case CLOCK_UPTIME_PRECISE:
530 case CLOCK_UPTIME_FAST:
531 is_abs_real = false;
532 break;
533 case CLOCK_VIRTUAL:
534 case CLOCK_PROF:
535 case CLOCK_PROCESS_CPUTIME_ID:
536 return (ENOTSUP);
537 case CLOCK_THREAD_CPUTIME_ID:
538 default:
539 return (EINVAL);
540 }
541 do {
542 ts = *rqt;
543 if ((flags & TIMER_ABSTIME) != 0) {
544 if (is_abs_real)
545 td->td_rtcgen =
546 atomic_load_acq_int(&rtc_generation);
547 error = kern_clock_gettime(td, clock_id, &now);
548 KASSERT(error == 0, ("kern_clock_gettime: %d", error));
549 timespecsub(&ts, &now, &ts);
550 }
551 if (ts.tv_sec < 0 || (ts.tv_sec == 0 && ts.tv_nsec == 0)) {
552 error = EWOULDBLOCK;
553 break;
554 }
555 if (ts.tv_sec > INT32_MAX / 2) {
556 over = ts.tv_sec - INT32_MAX / 2;
557 ts.tv_sec -= over;
558 } else
559 over = 0;
560 tmp = tstosbt(ts);
561 prec = tmp;
562 prec >>= tc_precexp;
563 if (TIMESEL(&sbt, tmp))
564 sbt += tc_tick_sbt;
565 sbt += tmp;
566 error = tsleep_sbt(&nanowait[curcpu], PWAIT | PCATCH, "nanslp",
567 sbt, prec, C_ABSOLUTE);
568 } while (error == 0 && is_abs_real && td->td_rtcgen == 0);
569 td->td_rtcgen = 0;
570 if (error != EWOULDBLOCK) {
571 if (TIMESEL(&sbtt, tmp))
572 sbtt += tc_tick_sbt;
573 if (sbtt >= sbt)
574 return (0);
575 if (error == ERESTART)
576 error = EINTR;
577 if ((flags & TIMER_ABSTIME) == 0 && rmt != NULL) {
578 ts = sbttots(sbt - sbtt);
579 ts.tv_sec += over;
580 if (ts.tv_sec < 0)
581 timespecclear(&ts);
582 *rmt = ts;
583 }
584 return (error);
585 }
586 return (0);
587 }
588
589 #ifndef _SYS_SYSPROTO_H_
590 struct nanosleep_args {
591 struct timespec *rqtp;
592 struct timespec *rmtp;
593 };
594 #endif
595 /* ARGSUSED */
596 int
597 sys_nanosleep(struct thread *td, struct nanosleep_args *uap)
598 {
599
600 return (user_clock_nanosleep(td, CLOCK_REALTIME, TIMER_RELTIME,
601 uap->rqtp, uap->rmtp));
602 }
603
604 #ifndef _SYS_SYSPROTO_H_
605 struct clock_nanosleep_args {
606 clockid_t clock_id;
607 int flags;
608 struct timespec *rqtp;
609 struct timespec *rmtp;
610 };
611 #endif
612 /* ARGSUSED */
613 int
614 sys_clock_nanosleep(struct thread *td, struct clock_nanosleep_args *uap)
615 {
616 int error;
617
618 error = user_clock_nanosleep(td, uap->clock_id, uap->flags, uap->rqtp,
619 uap->rmtp);
620 return (kern_posix_error(td, error));
621 }
622
623 static int
624 user_clock_nanosleep(struct thread *td, clockid_t clock_id, int flags,
625 const struct timespec *ua_rqtp, struct timespec *ua_rmtp)
626 {
627 struct timespec rmt, rqt;
628 int error, error2;
629
630 error = copyin(ua_rqtp, &rqt, sizeof(rqt));
631 if (error)
632 return (error);
633 error = kern_clock_nanosleep(td, clock_id, flags, &rqt, &rmt);
634 if (error == EINTR && ua_rmtp != NULL && (flags & TIMER_ABSTIME) == 0) {
635 error2 = copyout(&rmt, ua_rmtp, sizeof(rmt));
636 if (error2 != 0)
637 error = error2;
638 }
639 return (error);
640 }
641
642 #ifndef _SYS_SYSPROTO_H_
643 struct gettimeofday_args {
644 struct timeval *tp;
645 struct timezone *tzp;
646 };
647 #endif
648 /* ARGSUSED */
649 int
650 sys_gettimeofday(struct thread *td, struct gettimeofday_args *uap)
651 {
652 struct timeval atv;
653 struct timezone rtz;
654 int error = 0;
655
656 if (uap->tp) {
657 microtime(&atv);
658 error = copyout(&atv, uap->tp, sizeof (atv));
659 }
660 if (error == 0 && uap->tzp != NULL) {
661 rtz.tz_minuteswest = 0;
662 rtz.tz_dsttime = 0;
663 error = copyout(&rtz, uap->tzp, sizeof (rtz));
664 }
665 return (error);
666 }
667
668 #ifndef _SYS_SYSPROTO_H_
669 struct settimeofday_args {
670 struct timeval *tv;
671 struct timezone *tzp;
672 };
673 #endif
674 /* ARGSUSED */
675 int
676 sys_settimeofday(struct thread *td, struct settimeofday_args *uap)
677 {
678 struct timeval atv, *tvp;
679 struct timezone atz, *tzp;
680 int error;
681
682 if (uap->tv) {
683 error = copyin(uap->tv, &atv, sizeof(atv));
684 if (error)
685 return (error);
686 tvp = &atv;
687 } else
688 tvp = NULL;
689 if (uap->tzp) {
690 error = copyin(uap->tzp, &atz, sizeof(atz));
691 if (error)
692 return (error);
693 tzp = &atz;
694 } else
695 tzp = NULL;
696 return (kern_settimeofday(td, tvp, tzp));
697 }
698
699 int
700 kern_settimeofday(struct thread *td, struct timeval *tv, struct timezone *tzp)
701 {
702 int error;
703
704 error = priv_check(td, PRIV_SETTIMEOFDAY);
705 if (error)
706 return (error);
707 /* Verify all parameters before changing time. */
708 if (tv) {
709 if (tv->tv_usec < 0 || tv->tv_usec >= 1000000 ||
710 tv->tv_sec < 0)
711 return (EINVAL);
712 error = settime(td, tv);
713 }
714 return (error);
715 }
716
717 /*
718 * Get value of an interval timer. The process virtual and profiling virtual
719 * time timers are kept in the p_stats area, since they can be swapped out.
720 * These are kept internally in the way they are specified externally: in
721 * time until they expire.
722 *
723 * The real time interval timer is kept in the process table slot for the
724 * process, and its value (it_value) is kept as an absolute time rather than
725 * as a delta, so that it is easy to keep periodic real-time signals from
726 * drifting.
727 *
728 * Virtual time timers are processed in the hardclock() routine of
729 * kern_clock.c. The real time timer is processed by a timeout routine,
730 * called from the softclock() routine. Since a callout may be delayed in
731 * real time due to interrupt processing in the system, it is possible for
732 * the real time timeout routine (realitexpire, given below), to be delayed
733 * in real time past when it is supposed to occur. It does not suffice,
734 * therefore, to reload the real timer .it_value from the real time timers
735 * .it_interval. Rather, we compute the next time in absolute time the timer
736 * should go off.
737 */
738 #ifndef _SYS_SYSPROTO_H_
739 struct getitimer_args {
740 u_int which;
741 struct itimerval *itv;
742 };
743 #endif
744 int
745 sys_getitimer(struct thread *td, struct getitimer_args *uap)
746 {
747 struct itimerval aitv;
748 int error;
749
750 error = kern_getitimer(td, uap->which, &aitv);
751 if (error != 0)
752 return (error);
753 return (copyout(&aitv, uap->itv, sizeof (struct itimerval)));
754 }
755
756 int
757 kern_getitimer(struct thread *td, u_int which, struct itimerval *aitv)
758 {
759 struct proc *p = td->td_proc;
760 struct timeval ctv;
761
762 if (which > ITIMER_PROF)
763 return (EINVAL);
764
765 if (which == ITIMER_REAL) {
766 /*
767 * Convert from absolute to relative time in .it_value
768 * part of real time timer. If time for real time timer
769 * has passed return 0, else return difference between
770 * current time and time for the timer to go off.
771 */
772 PROC_LOCK(p);
773 *aitv = p->p_realtimer;
774 PROC_UNLOCK(p);
775 if (timevalisset(&aitv->it_value)) {
776 microuptime(&ctv);
777 if (timevalcmp(&aitv->it_value, &ctv, <))
778 timevalclear(&aitv->it_value);
779 else
780 timevalsub(&aitv->it_value, &ctv);
781 }
782 } else {
783 PROC_ITIMLOCK(p);
784 *aitv = p->p_stats->p_timer[which];
785 PROC_ITIMUNLOCK(p);
786 }
787 #ifdef KTRACE
788 if (KTRPOINT(td, KTR_STRUCT))
789 ktritimerval(aitv);
790 #endif
791 return (0);
792 }
793
794 #ifndef _SYS_SYSPROTO_H_
795 struct setitimer_args {
796 u_int which;
797 struct itimerval *itv, *oitv;
798 };
799 #endif
800 int
801 sys_setitimer(struct thread *td, struct setitimer_args *uap)
802 {
803 struct itimerval aitv, oitv;
804 int error;
805
806 if (uap->itv == NULL) {
807 uap->itv = uap->oitv;
808 return (sys_getitimer(td, (struct getitimer_args *)uap));
809 }
810
811 if ((error = copyin(uap->itv, &aitv, sizeof(struct itimerval))))
812 return (error);
813 error = kern_setitimer(td, uap->which, &aitv, &oitv);
814 if (error != 0 || uap->oitv == NULL)
815 return (error);
816 return (copyout(&oitv, uap->oitv, sizeof(struct itimerval)));
817 }
818
819 int
820 kern_setitimer(struct thread *td, u_int which, struct itimerval *aitv,
821 struct itimerval *oitv)
822 {
823 struct proc *p = td->td_proc;
824 struct timeval ctv;
825 sbintime_t sbt, pr;
826
827 if (aitv == NULL)
828 return (kern_getitimer(td, which, oitv));
829
830 if (which > ITIMER_PROF)
831 return (EINVAL);
832 #ifdef KTRACE
833 if (KTRPOINT(td, KTR_STRUCT))
834 ktritimerval(aitv);
835 #endif
836 if (itimerfix(&aitv->it_value) ||
837 aitv->it_value.tv_sec > INT32_MAX / 2)
838 return (EINVAL);
839 if (!timevalisset(&aitv->it_value))
840 timevalclear(&aitv->it_interval);
841 else if (itimerfix(&aitv->it_interval) ||
842 aitv->it_interval.tv_sec > INT32_MAX / 2)
843 return (EINVAL);
844
845 if (which == ITIMER_REAL) {
846 PROC_LOCK(p);
847 if (timevalisset(&p->p_realtimer.it_value))
848 callout_stop(&p->p_itcallout);
849 microuptime(&ctv);
850 if (timevalisset(&aitv->it_value)) {
851 pr = tvtosbt(aitv->it_value) >> tc_precexp;
852 timevaladd(&aitv->it_value, &ctv);
853 sbt = tvtosbt(aitv->it_value);
854 callout_reset_sbt(&p->p_itcallout, sbt, pr,
855 realitexpire, p, C_ABSOLUTE);
856 }
857 *oitv = p->p_realtimer;
858 p->p_realtimer = *aitv;
859 PROC_UNLOCK(p);
860 if (timevalisset(&oitv->it_value)) {
861 if (timevalcmp(&oitv->it_value, &ctv, <))
862 timevalclear(&oitv->it_value);
863 else
864 timevalsub(&oitv->it_value, &ctv);
865 }
866 } else {
867 if (aitv->it_interval.tv_sec == 0 &&
868 aitv->it_interval.tv_usec != 0 &&
869 aitv->it_interval.tv_usec < tick)
870 aitv->it_interval.tv_usec = tick;
871 if (aitv->it_value.tv_sec == 0 &&
872 aitv->it_value.tv_usec != 0 &&
873 aitv->it_value.tv_usec < tick)
874 aitv->it_value.tv_usec = tick;
875 PROC_ITIMLOCK(p);
876 *oitv = p->p_stats->p_timer[which];
877 p->p_stats->p_timer[which] = *aitv;
878 PROC_ITIMUNLOCK(p);
879 }
880 #ifdef KTRACE
881 if (KTRPOINT(td, KTR_STRUCT))
882 ktritimerval(oitv);
883 #endif
884 return (0);
885 }
886
887 static void
888 realitexpire_reset_callout(struct proc *p, sbintime_t *isbtp)
889 {
890 sbintime_t prec;
891
892 prec = isbtp == NULL ? tvtosbt(p->p_realtimer.it_interval) : *isbtp;
893 callout_reset_sbt(&p->p_itcallout, tvtosbt(p->p_realtimer.it_value),
894 prec >> tc_precexp, realitexpire, p, C_ABSOLUTE);
895 }
896
897 void
898 itimer_proc_continue(struct proc *p)
899 {
900 struct timeval ctv;
901 struct itimer *it;
902 int id;
903
904 PROC_LOCK_ASSERT(p, MA_OWNED);
905
906 if ((p->p_flag2 & P2_ITSTOPPED) != 0) {
907 p->p_flag2 &= ~P2_ITSTOPPED;
908 microuptime(&ctv);
909 if (timevalcmp(&p->p_realtimer.it_value, &ctv, >=))
910 realitexpire(p);
911 else
912 realitexpire_reset_callout(p, NULL);
913 }
914
915 if (p->p_itimers != NULL) {
916 for (id = 3; id < TIMER_MAX; id++) {
917 it = p->p_itimers->its_timers[id];
918 if (it == NULL)
919 continue;
920 if ((it->it_flags & ITF_PSTOPPED) != 0) {
921 ITIMER_LOCK(it);
922 if ((it->it_flags & ITF_PSTOPPED) != 0) {
923 it->it_flags &= ~ITF_PSTOPPED;
924 if ((it->it_flags & ITF_DELETING) == 0)
925 realtimer_expire_l(it, true);
926 }
927 ITIMER_UNLOCK(it);
928 }
929 }
930 }
931 }
932
933 /*
934 * Real interval timer expired:
935 * send process whose timer expired an alarm signal.
936 * If time is not set up to reload, then just return.
937 * Else compute next time timer should go off which is > current time.
938 * This is where delay in processing this timeout causes multiple
939 * SIGALRM calls to be compressed into one.
940 * tvtohz() always adds 1 to allow for the time until the next clock
941 * interrupt being strictly less than 1 clock tick, but we don't want
942 * that here since we want to appear to be in sync with the clock
943 * interrupt even when we're delayed.
944 */
945 static void
946 realitexpire(void *arg)
947 {
948 struct proc *p;
949 struct timeval ctv;
950 sbintime_t isbt;
951
952 p = (struct proc *)arg;
953 kern_psignal(p, SIGALRM);
954 if (!timevalisset(&p->p_realtimer.it_interval)) {
955 timevalclear(&p->p_realtimer.it_value);
956 return;
957 }
958
959 isbt = tvtosbt(p->p_realtimer.it_interval);
960 if (isbt >= sbt_timethreshold)
961 getmicrouptime(&ctv);
962 else
963 microuptime(&ctv);
964 do {
965 timevaladd(&p->p_realtimer.it_value,
966 &p->p_realtimer.it_interval);
967 } while (timevalcmp(&p->p_realtimer.it_value, &ctv, <=));
968
969 if (P_SHOULDSTOP(p) || P_KILLED(p)) {
970 p->p_flag2 |= P2_ITSTOPPED;
971 return;
972 }
973
974 p->p_flag2 &= ~P2_ITSTOPPED;
975 realitexpire_reset_callout(p, &isbt);
976 }
977
978 /*
979 * Check that a proposed value to load into the .it_value or
980 * .it_interval part of an interval timer is acceptable, and
981 * fix it to have at least minimal value (i.e. if it is less
982 * than the resolution of the clock, round it up.)
983 */
984 int
985 itimerfix(struct timeval *tv)
986 {
987
988 if (tv->tv_sec < 0 || tv->tv_usec < 0 || tv->tv_usec >= 1000000)
989 return (EINVAL);
990 if (tv->tv_sec == 0 && tv->tv_usec != 0 &&
991 tv->tv_usec < (u_int)tick / 16)
992 tv->tv_usec = (u_int)tick / 16;
993 return (0);
994 }
995
996 /*
997 * Decrement an interval timer by a specified number
998 * of microseconds, which must be less than a second,
999 * i.e. < 1000000. If the timer expires, then reload
1000 * it. In this case, carry over (usec - old value) to
1001 * reduce the value reloaded into the timer so that
1002 * the timer does not drift. This routine assumes
1003 * that it is called in a context where the timers
1004 * on which it is operating cannot change in value.
1005 */
1006 int
1007 itimerdecr(struct itimerval *itp, int usec)
1008 {
1009
1010 if (itp->it_value.tv_usec < usec) {
1011 if (itp->it_value.tv_sec == 0) {
1012 /* expired, and already in next interval */
1013 usec -= itp->it_value.tv_usec;
1014 goto expire;
1015 }
1016 itp->it_value.tv_usec += 1000000;
1017 itp->it_value.tv_sec--;
1018 }
1019 itp->it_value.tv_usec -= usec;
1020 usec = 0;
1021 if (timevalisset(&itp->it_value))
1022 return (1);
1023 /* expired, exactly at end of interval */
1024 expire:
1025 if (timevalisset(&itp->it_interval)) {
1026 itp->it_value = itp->it_interval;
1027 itp->it_value.tv_usec -= usec;
1028 if (itp->it_value.tv_usec < 0) {
1029 itp->it_value.tv_usec += 1000000;
1030 itp->it_value.tv_sec--;
1031 }
1032 } else
1033 itp->it_value.tv_usec = 0; /* sec is already 0 */
1034 return (0);
1035 }
1036
1037 /*
1038 * Add and subtract routines for timevals.
1039 * N.B.: subtract routine doesn't deal with
1040 * results which are before the beginning,
1041 * it just gets very confused in this case.
1042 * Caveat emptor.
1043 */
1044 void
1045 timevaladd(struct timeval *t1, const struct timeval *t2)
1046 {
1047
1048 t1->tv_sec += t2->tv_sec;
1049 t1->tv_usec += t2->tv_usec;
1050 timevalfix(t1);
1051 }
1052
1053 void
1054 timevalsub(struct timeval *t1, const struct timeval *t2)
1055 {
1056
1057 t1->tv_sec -= t2->tv_sec;
1058 t1->tv_usec -= t2->tv_usec;
1059 timevalfix(t1);
1060 }
1061
1062 static void
1063 timevalfix(struct timeval *t1)
1064 {
1065
1066 if (t1->tv_usec < 0) {
1067 t1->tv_sec--;
1068 t1->tv_usec += 1000000;
1069 }
1070 if (t1->tv_usec >= 1000000) {
1071 t1->tv_sec++;
1072 t1->tv_usec -= 1000000;
1073 }
1074 }
1075
1076 /*
1077 * ratecheck(): simple time-based rate-limit checking.
1078 */
1079 int
1080 ratecheck(struct timeval *lasttime, const struct timeval *mininterval)
1081 {
1082 struct timeval tv, delta;
1083 int rv = 0;
1084
1085 getmicrouptime(&tv); /* NB: 10ms precision */
1086 delta = tv;
1087 timevalsub(&delta, lasttime);
1088
1089 /*
1090 * check for 0,0 is so that the message will be seen at least once,
1091 * even if interval is huge.
1092 */
1093 if (timevalcmp(&delta, mininterval, >=) ||
1094 (lasttime->tv_sec == 0 && lasttime->tv_usec == 0)) {
1095 *lasttime = tv;
1096 rv = 1;
1097 }
1098
1099 return (rv);
1100 }
1101
1102 /*
1103 * ppsratecheck(): packets (or events) per second limitation.
1104 *
1105 * Return 0 if the limit is to be enforced (e.g. the caller
1106 * should drop a packet because of the rate limitation).
1107 *
1108 * maxpps of 0 always causes zero to be returned. maxpps of -1
1109 * always causes 1 to be returned; this effectively defeats rate
1110 * limiting.
1111 *
1112 * Note that we maintain the struct timeval for compatibility
1113 * with other bsd systems. We reuse the storage and just monitor
1114 * clock ticks for minimal overhead.
1115 */
1116 int
1117 ppsratecheck(struct timeval *lasttime, int *curpps, int maxpps)
1118 {
1119 int now;
1120
1121 /*
1122 * Reset the last time and counter if this is the first call
1123 * or more than a second has passed since the last update of
1124 * lasttime.
1125 */
1126 now = ticks;
1127 if (lasttime->tv_sec == 0 || (u_int)(now - lasttime->tv_sec) >= hz) {
1128 lasttime->tv_sec = now;
1129 *curpps = 1;
1130 return (maxpps != 0);
1131 } else {
1132 (*curpps)++; /* NB: ignore potential overflow */
1133 return (maxpps < 0 || *curpps <= maxpps);
1134 }
1135 }
1136
1137 static void
1138 itimer_start(void)
1139 {
1140 static const struct kclock rt_clock = {
1141 .timer_create = realtimer_create,
1142 .timer_delete = realtimer_delete,
1143 .timer_settime = realtimer_settime,
1144 .timer_gettime = realtimer_gettime,
1145 };
1146
1147 itimer_zone = uma_zcreate("itimer", sizeof(struct itimer),
1148 NULL, NULL, itimer_init, itimer_fini, UMA_ALIGN_PTR, 0);
1149 register_posix_clock(CLOCK_REALTIME, &rt_clock);
1150 register_posix_clock(CLOCK_MONOTONIC, &rt_clock);
1151 p31b_setcfg(CTL_P1003_1B_TIMERS, 200112L);
1152 p31b_setcfg(CTL_P1003_1B_DELAYTIMER_MAX, INT_MAX);
1153 p31b_setcfg(CTL_P1003_1B_TIMER_MAX, TIMER_MAX);
1154 }
1155
1156 static int
1157 register_posix_clock(int clockid, const struct kclock *clk)
1158 {
1159 if ((unsigned)clockid >= MAX_CLOCKS) {
1160 printf("%s: invalid clockid\n", __func__);
1161 return (0);
1162 }
1163 posix_clocks[clockid] = *clk;
1164 return (1);
1165 }
1166
1167 static int
1168 itimer_init(void *mem, int size, int flags)
1169 {
1170 struct itimer *it;
1171
1172 it = (struct itimer *)mem;
1173 mtx_init(&it->it_mtx, "itimer lock", NULL, MTX_DEF);
1174 return (0);
1175 }
1176
1177 static void
1178 itimer_fini(void *mem, int size)
1179 {
1180 struct itimer *it;
1181
1182 it = (struct itimer *)mem;
1183 mtx_destroy(&it->it_mtx);
1184 }
1185
1186 static void
1187 itimer_enter(struct itimer *it)
1188 {
1189
1190 mtx_assert(&it->it_mtx, MA_OWNED);
1191 it->it_usecount++;
1192 }
1193
1194 static void
1195 itimer_leave(struct itimer *it)
1196 {
1197
1198 mtx_assert(&it->it_mtx, MA_OWNED);
1199 KASSERT(it->it_usecount > 0, ("invalid it_usecount"));
1200
1201 if (--it->it_usecount == 0 && (it->it_flags & ITF_WANTED) != 0)
1202 wakeup(it);
1203 }
1204
1205 #ifndef _SYS_SYSPROTO_H_
1206 struct ktimer_create_args {
1207 clockid_t clock_id;
1208 struct sigevent * evp;
1209 int * timerid;
1210 };
1211 #endif
1212 int
1213 sys_ktimer_create(struct thread *td, struct ktimer_create_args *uap)
1214 {
1215 struct sigevent *evp, ev;
1216 int id;
1217 int error;
1218
1219 if (uap->evp == NULL) {
1220 evp = NULL;
1221 } else {
1222 error = copyin(uap->evp, &ev, sizeof(ev));
1223 if (error != 0)
1224 return (error);
1225 evp = &ev;
1226 }
1227 error = kern_ktimer_create(td, uap->clock_id, evp, &id, -1);
1228 if (error == 0) {
1229 error = copyout(&id, uap->timerid, sizeof(int));
1230 if (error != 0)
1231 kern_ktimer_delete(td, id);
1232 }
1233 return (error);
1234 }
1235
1236 int
1237 kern_ktimer_create(struct thread *td, clockid_t clock_id, struct sigevent *evp,
1238 int *timerid, int preset_id)
1239 {
1240 struct proc *p = td->td_proc;
1241 struct itimer *it;
1242 int id;
1243 int error;
1244
1245 if (clock_id < 0 || clock_id >= MAX_CLOCKS)
1246 return (EINVAL);
1247
1248 if (posix_clocks[clock_id].timer_create == NULL)
1249 return (EINVAL);
1250
1251 if (evp != NULL) {
1252 if (evp->sigev_notify != SIGEV_NONE &&
1253 evp->sigev_notify != SIGEV_SIGNAL &&
1254 evp->sigev_notify != SIGEV_THREAD_ID)
1255 return (EINVAL);
1256 if ((evp->sigev_notify == SIGEV_SIGNAL ||
1257 evp->sigev_notify == SIGEV_THREAD_ID) &&
1258 !_SIG_VALID(evp->sigev_signo))
1259 return (EINVAL);
1260 }
1261
1262 if (p->p_itimers == NULL)
1263 itimers_alloc(p);
1264
1265 it = uma_zalloc(itimer_zone, M_WAITOK);
1266 it->it_flags = 0;
1267 it->it_usecount = 0;
1268 timespecclear(&it->it_time.it_value);
1269 timespecclear(&it->it_time.it_interval);
1270 it->it_overrun = 0;
1271 it->it_overrun_last = 0;
1272 it->it_clockid = clock_id;
1273 it->it_proc = p;
1274 ksiginfo_init(&it->it_ksi);
1275 it->it_ksi.ksi_flags |= KSI_INS | KSI_EXT;
1276 error = CLOCK_CALL(clock_id, timer_create, (it));
1277 if (error != 0)
1278 goto out;
1279
1280 PROC_LOCK(p);
1281 if (preset_id != -1) {
1282 KASSERT(preset_id >= 0 && preset_id < 3, ("invalid preset_id"));
1283 id = preset_id;
1284 if (p->p_itimers->its_timers[id] != NULL) {
1285 PROC_UNLOCK(p);
1286 error = 0;
1287 goto out;
1288 }
1289 } else {
1290 /*
1291 * Find a free timer slot, skipping those reserved
1292 * for setitimer().
1293 */
1294 for (id = 3; id < TIMER_MAX; id++)
1295 if (p->p_itimers->its_timers[id] == NULL)
1296 break;
1297 if (id == TIMER_MAX) {
1298 PROC_UNLOCK(p);
1299 error = EAGAIN;
1300 goto out;
1301 }
1302 }
1303 p->p_itimers->its_timers[id] = it;
1304 if (evp != NULL)
1305 it->it_sigev = *evp;
1306 else {
1307 it->it_sigev.sigev_notify = SIGEV_SIGNAL;
1308 switch (clock_id) {
1309 default:
1310 case CLOCK_REALTIME:
1311 it->it_sigev.sigev_signo = SIGALRM;
1312 break;
1313 case CLOCK_VIRTUAL:
1314 it->it_sigev.sigev_signo = SIGVTALRM;
1315 break;
1316 case CLOCK_PROF:
1317 it->it_sigev.sigev_signo = SIGPROF;
1318 break;
1319 }
1320 it->it_sigev.sigev_value.sival_int = id;
1321 }
1322
1323 if (it->it_sigev.sigev_notify == SIGEV_SIGNAL ||
1324 it->it_sigev.sigev_notify == SIGEV_THREAD_ID) {
1325 it->it_ksi.ksi_signo = it->it_sigev.sigev_signo;
1326 it->it_ksi.ksi_code = SI_TIMER;
1327 it->it_ksi.ksi_value = it->it_sigev.sigev_value;
1328 it->it_ksi.ksi_timerid = id;
1329 }
1330 PROC_UNLOCK(p);
1331 *timerid = id;
1332 return (0);
1333
1334 out:
1335 ITIMER_LOCK(it);
1336 CLOCK_CALL(it->it_clockid, timer_delete, (it));
1337 ITIMER_UNLOCK(it);
1338 uma_zfree(itimer_zone, it);
1339 return (error);
1340 }
1341
1342 #ifndef _SYS_SYSPROTO_H_
1343 struct ktimer_delete_args {
1344 int timerid;
1345 };
1346 #endif
1347 int
1348 sys_ktimer_delete(struct thread *td, struct ktimer_delete_args *uap)
1349 {
1350
1351 return (kern_ktimer_delete(td, uap->timerid));
1352 }
1353
1354 static struct itimer *
1355 itimer_find(struct proc *p, int timerid)
1356 {
1357 struct itimer *it;
1358
1359 PROC_LOCK_ASSERT(p, MA_OWNED);
1360 if ((p->p_itimers == NULL) ||
1361 (timerid < 0) || (timerid >= TIMER_MAX) ||
1362 (it = p->p_itimers->its_timers[timerid]) == NULL) {
1363 return (NULL);
1364 }
1365 ITIMER_LOCK(it);
1366 if ((it->it_flags & ITF_DELETING) != 0) {
1367 ITIMER_UNLOCK(it);
1368 it = NULL;
1369 }
1370 return (it);
1371 }
1372
1373 int
1374 kern_ktimer_delete(struct thread *td, int timerid)
1375 {
1376 struct proc *p = td->td_proc;
1377 struct itimer *it;
1378
1379 PROC_LOCK(p);
1380 it = itimer_find(p, timerid);
1381 if (it == NULL) {
1382 PROC_UNLOCK(p);
1383 return (EINVAL);
1384 }
1385 PROC_UNLOCK(p);
1386
1387 it->it_flags |= ITF_DELETING;
1388 while (it->it_usecount > 0) {
1389 it->it_flags |= ITF_WANTED;
1390 msleep(it, &it->it_mtx, PPAUSE, "itimer", 0);
1391 }
1392 it->it_flags &= ~ITF_WANTED;
1393 CLOCK_CALL(it->it_clockid, timer_delete, (it));
1394 ITIMER_UNLOCK(it);
1395
1396 PROC_LOCK(p);
1397 if (KSI_ONQ(&it->it_ksi))
1398 sigqueue_take(&it->it_ksi);
1399 p->p_itimers->its_timers[timerid] = NULL;
1400 PROC_UNLOCK(p);
1401 uma_zfree(itimer_zone, it);
1402 return (0);
1403 }
1404
1405 #ifndef _SYS_SYSPROTO_H_
1406 struct ktimer_settime_args {
1407 int timerid;
1408 int flags;
1409 const struct itimerspec * value;
1410 struct itimerspec * ovalue;
1411 };
1412 #endif
1413 int
1414 sys_ktimer_settime(struct thread *td, struct ktimer_settime_args *uap)
1415 {
1416 struct itimerspec val, oval, *ovalp;
1417 int error;
1418
1419 error = copyin(uap->value, &val, sizeof(val));
1420 if (error != 0)
1421 return (error);
1422 ovalp = uap->ovalue != NULL ? &oval : NULL;
1423 error = kern_ktimer_settime(td, uap->timerid, uap->flags, &val, ovalp);
1424 if (error == 0 && uap->ovalue != NULL)
1425 error = copyout(ovalp, uap->ovalue, sizeof(*ovalp));
1426 return (error);
1427 }
1428
1429 int
1430 kern_ktimer_settime(struct thread *td, int timer_id, int flags,
1431 struct itimerspec *val, struct itimerspec *oval)
1432 {
1433 struct proc *p;
1434 struct itimer *it;
1435 int error;
1436
1437 p = td->td_proc;
1438 PROC_LOCK(p);
1439 if (timer_id < 3 || (it = itimer_find(p, timer_id)) == NULL) {
1440 PROC_UNLOCK(p);
1441 error = EINVAL;
1442 } else {
1443 PROC_UNLOCK(p);
1444 itimer_enter(it);
1445 error = CLOCK_CALL(it->it_clockid, timer_settime, (it,
1446 flags, val, oval));
1447 itimer_leave(it);
1448 ITIMER_UNLOCK(it);
1449 }
1450 return (error);
1451 }
1452
1453 #ifndef _SYS_SYSPROTO_H_
1454 struct ktimer_gettime_args {
1455 int timerid;
1456 struct itimerspec * value;
1457 };
1458 #endif
1459 int
1460 sys_ktimer_gettime(struct thread *td, struct ktimer_gettime_args *uap)
1461 {
1462 struct itimerspec val;
1463 int error;
1464
1465 error = kern_ktimer_gettime(td, uap->timerid, &val);
1466 if (error == 0)
1467 error = copyout(&val, uap->value, sizeof(val));
1468 return (error);
1469 }
1470
1471 int
1472 kern_ktimer_gettime(struct thread *td, int timer_id, struct itimerspec *val)
1473 {
1474 struct proc *p;
1475 struct itimer *it;
1476 int error;
1477
1478 p = td->td_proc;
1479 PROC_LOCK(p);
1480 if (timer_id < 3 || (it = itimer_find(p, timer_id)) == NULL) {
1481 PROC_UNLOCK(p);
1482 error = EINVAL;
1483 } else {
1484 PROC_UNLOCK(p);
1485 itimer_enter(it);
1486 error = CLOCK_CALL(it->it_clockid, timer_gettime, (it, val));
1487 itimer_leave(it);
1488 ITIMER_UNLOCK(it);
1489 }
1490 return (error);
1491 }
1492
1493 #ifndef _SYS_SYSPROTO_H_
1494 struct timer_getoverrun_args {
1495 int timerid;
1496 };
1497 #endif
1498 int
1499 sys_ktimer_getoverrun(struct thread *td, struct ktimer_getoverrun_args *uap)
1500 {
1501
1502 return (kern_ktimer_getoverrun(td, uap->timerid));
1503 }
1504
1505 int
1506 kern_ktimer_getoverrun(struct thread *td, int timer_id)
1507 {
1508 struct proc *p = td->td_proc;
1509 struct itimer *it;
1510 int error ;
1511
1512 PROC_LOCK(p);
1513 if (timer_id < 3 ||
1514 (it = itimer_find(p, timer_id)) == NULL) {
1515 PROC_UNLOCK(p);
1516 error = EINVAL;
1517 } else {
1518 td->td_retval[0] = it->it_overrun_last;
1519 ITIMER_UNLOCK(it);
1520 PROC_UNLOCK(p);
1521 error = 0;
1522 }
1523 return (error);
1524 }
1525
1526 static int
1527 realtimer_create(struct itimer *it)
1528 {
1529 callout_init_mtx(&it->it_callout, &it->it_mtx, 0);
1530 return (0);
1531 }
1532
1533 static int
1534 realtimer_delete(struct itimer *it)
1535 {
1536 mtx_assert(&it->it_mtx, MA_OWNED);
1537
1538 /*
1539 * clear timer's value and interval to tell realtimer_expire
1540 * to not rearm the timer.
1541 */
1542 timespecclear(&it->it_time.it_value);
1543 timespecclear(&it->it_time.it_interval);
1544 ITIMER_UNLOCK(it);
1545 callout_drain(&it->it_callout);
1546 ITIMER_LOCK(it);
1547 return (0);
1548 }
1549
1550 static int
1551 realtimer_gettime(struct itimer *it, struct itimerspec *ovalue)
1552 {
1553 struct timespec cts;
1554
1555 mtx_assert(&it->it_mtx, MA_OWNED);
1556
1557 realtimer_clocktime(it->it_clockid, &cts);
1558 *ovalue = it->it_time;
1559 if (ovalue->it_value.tv_sec != 0 || ovalue->it_value.tv_nsec != 0) {
1560 timespecsub(&ovalue->it_value, &cts, &ovalue->it_value);
1561 if (ovalue->it_value.tv_sec < 0 ||
1562 (ovalue->it_value.tv_sec == 0 &&
1563 ovalue->it_value.tv_nsec == 0)) {
1564 ovalue->it_value.tv_sec = 0;
1565 ovalue->it_value.tv_nsec = 1;
1566 }
1567 }
1568 return (0);
1569 }
1570
1571 static int
1572 realtimer_settime(struct itimer *it, int flags, struct itimerspec *value,
1573 struct itimerspec *ovalue)
1574 {
1575 struct timespec cts, ts;
1576 struct timeval tv;
1577 struct itimerspec val;
1578
1579 mtx_assert(&it->it_mtx, MA_OWNED);
1580
1581 val = *value;
1582 if (itimespecfix(&val.it_value))
1583 return (EINVAL);
1584
1585 if (timespecisset(&val.it_value)) {
1586 if (itimespecfix(&val.it_interval))
1587 return (EINVAL);
1588 } else {
1589 timespecclear(&val.it_interval);
1590 }
1591
1592 if (ovalue != NULL)
1593 realtimer_gettime(it, ovalue);
1594
1595 it->it_time = val;
1596 if (timespecisset(&val.it_value)) {
1597 realtimer_clocktime(it->it_clockid, &cts);
1598 ts = val.it_value;
1599 if ((flags & TIMER_ABSTIME) == 0) {
1600 /* Convert to absolute time. */
1601 timespecadd(&it->it_time.it_value, &cts,
1602 &it->it_time.it_value);
1603 } else {
1604 timespecsub(&ts, &cts, &ts);
1605 /*
1606 * We don't care if ts is negative, tztohz will
1607 * fix it.
1608 */
1609 }
1610 TIMESPEC_TO_TIMEVAL(&tv, &ts);
1611 callout_reset(&it->it_callout, tvtohz(&tv), realtimer_expire,
1612 it);
1613 } else {
1614 callout_stop(&it->it_callout);
1615 }
1616
1617 return (0);
1618 }
1619
1620 static void
1621 realtimer_clocktime(clockid_t id, struct timespec *ts)
1622 {
1623 if (id == CLOCK_REALTIME)
1624 getnanotime(ts);
1625 else /* CLOCK_MONOTONIC */
1626 getnanouptime(ts);
1627 }
1628
1629 int
1630 itimer_accept(struct proc *p, int timerid, ksiginfo_t *ksi)
1631 {
1632 struct itimer *it;
1633
1634 PROC_LOCK_ASSERT(p, MA_OWNED);
1635 it = itimer_find(p, timerid);
1636 if (it != NULL) {
1637 ksi->ksi_overrun = it->it_overrun;
1638 it->it_overrun_last = it->it_overrun;
1639 it->it_overrun = 0;
1640 ITIMER_UNLOCK(it);
1641 return (0);
1642 }
1643 return (EINVAL);
1644 }
1645
1646 static int
1647 itimespecfix(struct timespec *ts)
1648 {
1649
1650 if (!timespecvalid_interval(ts))
1651 return (EINVAL);
1652 if ((UINT64_MAX - ts->tv_nsec) / NS_PER_SEC < ts->tv_sec)
1653 return (EINVAL);
1654 if (ts->tv_sec == 0 && ts->tv_nsec != 0 && ts->tv_nsec < tick * 1000)
1655 ts->tv_nsec = tick * 1000;
1656 return (0);
1657 }
1658
1659 #define timespectons(tsp) \
1660 ((uint64_t)(tsp)->tv_sec * NS_PER_SEC + (tsp)->tv_nsec)
1661 #define timespecfromns(ns) (struct timespec){ \
1662 .tv_sec = (ns) / NS_PER_SEC, \
1663 .tv_nsec = (ns) % NS_PER_SEC \
1664 }
1665
1666 static void
1667 realtimer_expire_l(struct itimer *it, bool proc_locked)
1668 {
1669 struct timespec cts, ts;
1670 struct timeval tv;
1671 struct proc *p;
1672 uint64_t interval, now, overruns, value;
1673
1674 realtimer_clocktime(it->it_clockid, &cts);
1675 /* Only fire if time is reached. */
1676 if (timespeccmp(&cts, &it->it_time.it_value, >=)) {
1677 if (timespecisset(&it->it_time.it_interval)) {
1678 timespecadd(&it->it_time.it_value,
1679 &it->it_time.it_interval,
1680 &it->it_time.it_value);
1681
1682 interval = timespectons(&it->it_time.it_interval);
1683 value = timespectons(&it->it_time.it_value);
1684 now = timespectons(&cts);
1685
1686 if (now >= value) {
1687 /*
1688 * We missed at least one period.
1689 */
1690 overruns = howmany(now - value + 1, interval);
1691 if (it->it_overrun + overruns >=
1692 it->it_overrun &&
1693 it->it_overrun + overruns <= INT_MAX) {
1694 it->it_overrun += (int)overruns;
1695 } else {
1696 it->it_overrun = INT_MAX;
1697 it->it_ksi.ksi_errno = ERANGE;
1698 }
1699 value =
1700 now + interval - (now - value) % interval;
1701 it->it_time.it_value = timespecfromns(value);
1702 }
1703 } else {
1704 /* single shot timer ? */
1705 timespecclear(&it->it_time.it_value);
1706 }
1707
1708 p = it->it_proc;
1709 if (timespecisset(&it->it_time.it_value)) {
1710 if (P_SHOULDSTOP(p) || P_KILLED(p)) {
1711 it->it_flags |= ITF_PSTOPPED;
1712 } else {
1713 timespecsub(&it->it_time.it_value, &cts, &ts);
1714 TIMESPEC_TO_TIMEVAL(&tv, &ts);
1715 callout_reset(&it->it_callout, tvtohz(&tv),
1716 realtimer_expire, it);
1717 }
1718 }
1719
1720 itimer_enter(it);
1721 ITIMER_UNLOCK(it);
1722 if (proc_locked)
1723 PROC_UNLOCK(p);
1724 itimer_fire(it);
1725 if (proc_locked)
1726 PROC_LOCK(p);
1727 ITIMER_LOCK(it);
1728 itimer_leave(it);
1729 } else if (timespecisset(&it->it_time.it_value)) {
1730 p = it->it_proc;
1731 if (P_SHOULDSTOP(p) || P_KILLED(p)) {
1732 it->it_flags |= ITF_PSTOPPED;
1733 } else {
1734 ts = it->it_time.it_value;
1735 timespecsub(&ts, &cts, &ts);
1736 TIMESPEC_TO_TIMEVAL(&tv, &ts);
1737 callout_reset(&it->it_callout, tvtohz(&tv),
1738 realtimer_expire, it);
1739 }
1740 }
1741 }
1742
1743 /* Timeout callback for realtime timer */
1744 static void
1745 realtimer_expire(void *arg)
1746 {
1747 realtimer_expire_l(arg, false);
1748 }
1749
1750 static void
1751 itimer_fire(struct itimer *it)
1752 {
1753 struct proc *p = it->it_proc;
1754 struct thread *td;
1755
1756 if (it->it_sigev.sigev_notify == SIGEV_SIGNAL ||
1757 it->it_sigev.sigev_notify == SIGEV_THREAD_ID) {
1758 if (sigev_findtd(p, &it->it_sigev, &td) != 0) {
1759 ITIMER_LOCK(it);
1760 timespecclear(&it->it_time.it_value);
1761 timespecclear(&it->it_time.it_interval);
1762 callout_stop(&it->it_callout);
1763 ITIMER_UNLOCK(it);
1764 return;
1765 }
1766 if (!KSI_ONQ(&it->it_ksi)) {
1767 it->it_ksi.ksi_errno = 0;
1768 ksiginfo_set_sigev(&it->it_ksi, &it->it_sigev);
1769 tdsendsignal(p, td, it->it_ksi.ksi_signo, &it->it_ksi);
1770 } else {
1771 if (it->it_overrun < INT_MAX)
1772 it->it_overrun++;
1773 else
1774 it->it_ksi.ksi_errno = ERANGE;
1775 }
1776 PROC_UNLOCK(p);
1777 }
1778 }
1779
1780 static void
1781 itimers_alloc(struct proc *p)
1782 {
1783 struct itimers *its;
1784
1785 its = malloc(sizeof (struct itimers), M_SUBPROC, M_WAITOK | M_ZERO);
1786 PROC_LOCK(p);
1787 if (p->p_itimers == NULL) {
1788 p->p_itimers = its;
1789 PROC_UNLOCK(p);
1790 }
1791 else {
1792 PROC_UNLOCK(p);
1793 free(its, M_SUBPROC);
1794 }
1795 }
1796
1797 /* Clean up timers when some process events are being triggered. */
1798 static void
1799 itimers_event_exit_exec(int start_idx, struct proc *p)
1800 {
1801 struct itimers *its;
1802 struct itimer *it;
1803 int i;
1804
1805 its = p->p_itimers;
1806 if (its == NULL)
1807 return;
1808
1809 for (i = start_idx; i < TIMER_MAX; ++i) {
1810 if ((it = its->its_timers[i]) != NULL)
1811 kern_ktimer_delete(curthread, i);
1812 }
1813 if (its->its_timers[0] == NULL && its->its_timers[1] == NULL &&
1814 its->its_timers[2] == NULL) {
1815 /* Synchronize with itimer_proc_continue(). */
1816 PROC_LOCK(p);
1817 p->p_itimers = NULL;
1818 PROC_UNLOCK(p);
1819 free(its, M_SUBPROC);
1820 }
1821 }
1822
1823 void
1824 itimers_exec(struct proc *p)
1825 {
1826 /*
1827 * According to susv3, XSI interval timers should be inherited
1828 * by new image.
1829 */
1830 itimers_event_exit_exec(3, p);
1831 }
1832
1833 void
1834 itimers_exit(struct proc *p)
1835 {
1836 itimers_event_exit_exec(0, p);
1837 }
Cache object: 9baaceab38dc41da821f2e10a397f952
|