FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_time.c
1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 1982, 1986, 1989, 1993
5 * The Regents of the University of California. All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of the University nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 *
31 * @(#)kern_time.c 8.1 (Berkeley) 6/10/93
32 */
33
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36
37 #include "opt_ktrace.h"
38
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/limits.h>
42 #include <sys/clock.h>
43 #include <sys/lock.h>
44 #include <sys/mutex.h>
45 #include <sys/sysproto.h>
46 #include <sys/resourcevar.h>
47 #include <sys/signalvar.h>
48 #include <sys/kernel.h>
49 #include <sys/sleepqueue.h>
50 #include <sys/syscallsubr.h>
51 #include <sys/sysctl.h>
52 #include <sys/sysent.h>
53 #include <sys/priv.h>
54 #include <sys/proc.h>
55 #include <sys/posix4.h>
56 #include <sys/time.h>
57 #include <sys/timers.h>
58 #include <sys/timetc.h>
59 #include <sys/vnode.h>
60 #ifdef KTRACE
61 #include <sys/ktrace.h>
62 #endif
63
64 #include <vm/vm.h>
65 #include <vm/vm_extern.h>
66
67 #define MAX_CLOCKS (CLOCK_MONOTONIC+1)
68 #define CPUCLOCK_BIT 0x80000000
69 #define CPUCLOCK_PROCESS_BIT 0x40000000
70 #define CPUCLOCK_ID_MASK (~(CPUCLOCK_BIT|CPUCLOCK_PROCESS_BIT))
71 #define MAKE_THREAD_CPUCLOCK(tid) (CPUCLOCK_BIT|(tid))
72 #define MAKE_PROCESS_CPUCLOCK(pid) \
73 (CPUCLOCK_BIT|CPUCLOCK_PROCESS_BIT|(pid))
74
75 #define NS_PER_SEC 1000000000
76
77 static struct kclock posix_clocks[MAX_CLOCKS];
78 static uma_zone_t itimer_zone = NULL;
79
80 /*
81 * Time of day and interval timer support.
82 *
83 * These routines provide the kernel entry points to get and set
84 * the time-of-day and per-process interval timers. Subroutines
85 * here provide support for adding and subtracting timeval structures
86 * and decrementing interval timers, optionally reloading the interval
87 * timers when they expire.
88 */
89
90 static int settime(struct thread *, struct timeval *);
91 static void timevalfix(struct timeval *);
92 static int user_clock_nanosleep(struct thread *td, clockid_t clock_id,
93 int flags, const struct timespec *ua_rqtp,
94 struct timespec *ua_rmtp);
95
96 static void itimer_start(void);
97 static int itimer_init(void *, int, int);
98 static void itimer_fini(void *, int);
99 static void itimer_enter(struct itimer *);
100 static void itimer_leave(struct itimer *);
101 static struct itimer *itimer_find(struct proc *, int);
102 static void itimers_alloc(struct proc *);
103 static int realtimer_create(struct itimer *);
104 static int realtimer_gettime(struct itimer *, struct itimerspec *);
105 static int realtimer_settime(struct itimer *, int,
106 struct itimerspec *, struct itimerspec *);
107 static int realtimer_delete(struct itimer *);
108 static void realtimer_clocktime(clockid_t, struct timespec *);
109 static void realtimer_expire(void *);
110 static void realtimer_expire_l(struct itimer *it, bool proc_locked);
111
112 static int register_posix_clock(int, const struct kclock *);
113 static void itimer_fire(struct itimer *it);
114 static int itimespecfix(struct timespec *ts);
115
116 #define CLOCK_CALL(clock, call, arglist) \
117 ((*posix_clocks[clock].call) arglist)
118
119 SYSINIT(posix_timer, SI_SUB_P1003_1B, SI_ORDER_FIRST+4, itimer_start, NULL);
120
121 static int
122 settime(struct thread *td, struct timeval *tv)
123 {
124 struct timeval delta, tv1, tv2;
125 static struct timeval maxtime, laststep;
126 struct timespec ts;
127
128 microtime(&tv1);
129 delta = *tv;
130 timevalsub(&delta, &tv1);
131
132 /*
133 * If the system is secure, we do not allow the time to be
134 * set to a value earlier than 1 second less than the highest
135 * time we have yet seen. The worst a miscreant can do in
136 * this circumstance is "freeze" time. He couldn't go
137 * back to the past.
138 *
139 * We similarly do not allow the clock to be stepped more
140 * than one second, nor more than once per second. This allows
141 * a miscreant to make the clock march double-time, but no worse.
142 */
143 if (securelevel_gt(td->td_ucred, 1) != 0) {
144 if (delta.tv_sec < 0 || delta.tv_usec < 0) {
145 /*
146 * Update maxtime to latest time we've seen.
147 */
148 if (tv1.tv_sec > maxtime.tv_sec)
149 maxtime = tv1;
150 tv2 = *tv;
151 timevalsub(&tv2, &maxtime);
152 if (tv2.tv_sec < -1) {
153 tv->tv_sec = maxtime.tv_sec - 1;
154 printf("Time adjustment clamped to -1 second\n");
155 }
156 } else {
157 if (tv1.tv_sec == laststep.tv_sec)
158 return (EPERM);
159 if (delta.tv_sec > 1) {
160 tv->tv_sec = tv1.tv_sec + 1;
161 printf("Time adjustment clamped to +1 second\n");
162 }
163 laststep = *tv;
164 }
165 }
166
167 ts.tv_sec = tv->tv_sec;
168 ts.tv_nsec = tv->tv_usec * 1000;
169 tc_setclock(&ts);
170 resettodr();
171 return (0);
172 }
173
174 #ifndef _SYS_SYSPROTO_H_
175 struct clock_getcpuclockid2_args {
176 id_t id;
177 int which,
178 clockid_t *clock_id;
179 };
180 #endif
181 /* ARGSUSED */
182 int
183 sys_clock_getcpuclockid2(struct thread *td, struct clock_getcpuclockid2_args *uap)
184 {
185 clockid_t clk_id;
186 int error;
187
188 error = kern_clock_getcpuclockid2(td, uap->id, uap->which, &clk_id);
189 if (error == 0)
190 error = copyout(&clk_id, uap->clock_id, sizeof(clockid_t));
191 return (error);
192 }
193
194 int
195 kern_clock_getcpuclockid2(struct thread *td, id_t id, int which,
196 clockid_t *clk_id)
197 {
198 struct proc *p;
199 pid_t pid;
200 lwpid_t tid;
201 int error;
202
203 switch (which) {
204 case CPUCLOCK_WHICH_PID:
205 if (id != 0) {
206 error = pget(id, PGET_CANSEE | PGET_NOTID, &p);
207 if (error != 0)
208 return (error);
209 PROC_UNLOCK(p);
210 pid = id;
211 } else {
212 pid = td->td_proc->p_pid;
213 }
214 *clk_id = MAKE_PROCESS_CPUCLOCK(pid);
215 return (0);
216 case CPUCLOCK_WHICH_TID:
217 tid = id == 0 ? td->td_tid : id;
218 *clk_id = MAKE_THREAD_CPUCLOCK(tid);
219 return (0);
220 default:
221 return (EINVAL);
222 }
223 }
224
225 #ifndef _SYS_SYSPROTO_H_
226 struct clock_gettime_args {
227 clockid_t clock_id;
228 struct timespec *tp;
229 };
230 #endif
231 /* ARGSUSED */
232 int
233 sys_clock_gettime(struct thread *td, struct clock_gettime_args *uap)
234 {
235 struct timespec ats;
236 int error;
237
238 error = kern_clock_gettime(td, uap->clock_id, &ats);
239 if (error == 0)
240 error = copyout(&ats, uap->tp, sizeof(ats));
241
242 return (error);
243 }
244
245 static inline void
246 cputick2timespec(uint64_t runtime, struct timespec *ats)
247 {
248 runtime = cputick2usec(runtime);
249 ats->tv_sec = runtime / 1000000;
250 ats->tv_nsec = runtime % 1000000 * 1000;
251 }
252
253 void
254 kern_thread_cputime(struct thread *targettd, struct timespec *ats)
255 {
256 uint64_t runtime, curtime, switchtime;
257
258 if (targettd == NULL) { /* current thread */
259 critical_enter();
260 switchtime = PCPU_GET(switchtime);
261 curtime = cpu_ticks();
262 runtime = curthread->td_runtime;
263 critical_exit();
264 runtime += curtime - switchtime;
265 } else {
266 PROC_LOCK_ASSERT(targettd->td_proc, MA_OWNED);
267 thread_lock(targettd);
268 runtime = targettd->td_runtime;
269 thread_unlock(targettd);
270 }
271 cputick2timespec(runtime, ats);
272 }
273
274 void
275 kern_process_cputime(struct proc *targetp, struct timespec *ats)
276 {
277 uint64_t runtime;
278 struct rusage ru;
279
280 PROC_LOCK_ASSERT(targetp, MA_OWNED);
281 PROC_STATLOCK(targetp);
282 rufetch(targetp, &ru);
283 runtime = targetp->p_rux.rux_runtime;
284 if (curthread->td_proc == targetp)
285 runtime += cpu_ticks() - PCPU_GET(switchtime);
286 PROC_STATUNLOCK(targetp);
287 cputick2timespec(runtime, ats);
288 }
289
290 static int
291 get_cputime(struct thread *td, clockid_t clock_id, struct timespec *ats)
292 {
293 struct proc *p, *p2;
294 struct thread *td2;
295 lwpid_t tid;
296 pid_t pid;
297 int error;
298
299 p = td->td_proc;
300 if ((clock_id & CPUCLOCK_PROCESS_BIT) == 0) {
301 tid = clock_id & CPUCLOCK_ID_MASK;
302 td2 = tdfind(tid, p->p_pid);
303 if (td2 == NULL)
304 return (EINVAL);
305 kern_thread_cputime(td2, ats);
306 PROC_UNLOCK(td2->td_proc);
307 } else {
308 pid = clock_id & CPUCLOCK_ID_MASK;
309 error = pget(pid, PGET_CANSEE, &p2);
310 if (error != 0)
311 return (EINVAL);
312 kern_process_cputime(p2, ats);
313 PROC_UNLOCK(p2);
314 }
315 return (0);
316 }
317
318 int
319 kern_clock_gettime(struct thread *td, clockid_t clock_id, struct timespec *ats)
320 {
321 struct timeval sys, user;
322 struct proc *p;
323
324 p = td->td_proc;
325 switch (clock_id) {
326 case CLOCK_REALTIME: /* Default to precise. */
327 case CLOCK_REALTIME_PRECISE:
328 nanotime(ats);
329 break;
330 case CLOCK_REALTIME_FAST:
331 getnanotime(ats);
332 break;
333 case CLOCK_VIRTUAL:
334 PROC_LOCK(p);
335 PROC_STATLOCK(p);
336 calcru(p, &user, &sys);
337 PROC_STATUNLOCK(p);
338 PROC_UNLOCK(p);
339 TIMEVAL_TO_TIMESPEC(&user, ats);
340 break;
341 case CLOCK_PROF:
342 PROC_LOCK(p);
343 PROC_STATLOCK(p);
344 calcru(p, &user, &sys);
345 PROC_STATUNLOCK(p);
346 PROC_UNLOCK(p);
347 timevaladd(&user, &sys);
348 TIMEVAL_TO_TIMESPEC(&user, ats);
349 break;
350 case CLOCK_MONOTONIC: /* Default to precise. */
351 case CLOCK_MONOTONIC_PRECISE:
352 case CLOCK_UPTIME:
353 case CLOCK_UPTIME_PRECISE:
354 nanouptime(ats);
355 break;
356 case CLOCK_UPTIME_FAST:
357 case CLOCK_MONOTONIC_FAST:
358 getnanouptime(ats);
359 break;
360 case CLOCK_SECOND:
361 ats->tv_sec = time_second;
362 ats->tv_nsec = 0;
363 break;
364 case CLOCK_THREAD_CPUTIME_ID:
365 kern_thread_cputime(NULL, ats);
366 break;
367 case CLOCK_PROCESS_CPUTIME_ID:
368 PROC_LOCK(p);
369 kern_process_cputime(p, ats);
370 PROC_UNLOCK(p);
371 break;
372 default:
373 if ((int)clock_id >= 0)
374 return (EINVAL);
375 return (get_cputime(td, clock_id, ats));
376 }
377 return (0);
378 }
379
380 #ifndef _SYS_SYSPROTO_H_
381 struct clock_settime_args {
382 clockid_t clock_id;
383 const struct timespec *tp;
384 };
385 #endif
386 /* ARGSUSED */
387 int
388 sys_clock_settime(struct thread *td, struct clock_settime_args *uap)
389 {
390 struct timespec ats;
391 int error;
392
393 if ((error = copyin(uap->tp, &ats, sizeof(ats))) != 0)
394 return (error);
395 return (kern_clock_settime(td, uap->clock_id, &ats));
396 }
397
398 static int allow_insane_settime = 0;
399 SYSCTL_INT(_debug, OID_AUTO, allow_insane_settime, CTLFLAG_RWTUN,
400 &allow_insane_settime, 0,
401 "do not perform possibly restrictive checks on settime(2) args");
402
403 int
404 kern_clock_settime(struct thread *td, clockid_t clock_id, struct timespec *ats)
405 {
406 struct timeval atv;
407 int error;
408
409 if ((error = priv_check(td, PRIV_CLOCK_SETTIME)) != 0)
410 return (error);
411 if (clock_id != CLOCK_REALTIME)
412 return (EINVAL);
413 if (ats->tv_nsec < 0 || ats->tv_nsec >= NS_PER_SEC || ats->tv_sec < 0)
414 return (EINVAL);
415 if (!allow_insane_settime &&
416 (ats->tv_sec > 8000ULL * 365 * 24 * 60 * 60 ||
417 ats->tv_sec < utc_offset()))
418 return (EINVAL);
419 /* XXX Don't convert nsec->usec and back */
420 TIMESPEC_TO_TIMEVAL(&atv, ats);
421 error = settime(td, &atv);
422 return (error);
423 }
424
425 #ifndef _SYS_SYSPROTO_H_
426 struct clock_getres_args {
427 clockid_t clock_id;
428 struct timespec *tp;
429 };
430 #endif
431 int
432 sys_clock_getres(struct thread *td, struct clock_getres_args *uap)
433 {
434 struct timespec ts;
435 int error;
436
437 if (uap->tp == NULL)
438 return (0);
439
440 error = kern_clock_getres(td, uap->clock_id, &ts);
441 if (error == 0)
442 error = copyout(&ts, uap->tp, sizeof(ts));
443 return (error);
444 }
445
446 int
447 kern_clock_getres(struct thread *td, clockid_t clock_id, struct timespec *ts)
448 {
449
450 ts->tv_sec = 0;
451 switch (clock_id) {
452 case CLOCK_REALTIME:
453 case CLOCK_REALTIME_FAST:
454 case CLOCK_REALTIME_PRECISE:
455 case CLOCK_MONOTONIC:
456 case CLOCK_MONOTONIC_FAST:
457 case CLOCK_MONOTONIC_PRECISE:
458 case CLOCK_UPTIME:
459 case CLOCK_UPTIME_FAST:
460 case CLOCK_UPTIME_PRECISE:
461 /*
462 * Round up the result of the division cheaply by adding 1.
463 * Rounding up is especially important if rounding down
464 * would give 0. Perfect rounding is unimportant.
465 */
466 ts->tv_nsec = NS_PER_SEC / tc_getfrequency() + 1;
467 break;
468 case CLOCK_VIRTUAL:
469 case CLOCK_PROF:
470 /* Accurately round up here because we can do so cheaply. */
471 ts->tv_nsec = howmany(NS_PER_SEC, hz);
472 break;
473 case CLOCK_SECOND:
474 ts->tv_sec = 1;
475 ts->tv_nsec = 0;
476 break;
477 case CLOCK_THREAD_CPUTIME_ID:
478 case CLOCK_PROCESS_CPUTIME_ID:
479 cputime:
480 /* sync with cputick2usec */
481 ts->tv_nsec = 1000000 / cpu_tickrate();
482 if (ts->tv_nsec == 0)
483 ts->tv_nsec = 1000;
484 break;
485 default:
486 if ((int)clock_id < 0)
487 goto cputime;
488 return (EINVAL);
489 }
490 return (0);
491 }
492
493 int
494 kern_nanosleep(struct thread *td, struct timespec *rqt, struct timespec *rmt)
495 {
496
497 return (kern_clock_nanosleep(td, CLOCK_REALTIME, TIMER_RELTIME, rqt,
498 rmt));
499 }
500
501 static uint8_t nanowait[MAXCPU];
502
503 int
504 kern_clock_nanosleep(struct thread *td, clockid_t clock_id, int flags,
505 const struct timespec *rqt, struct timespec *rmt)
506 {
507 struct timespec ts, now;
508 sbintime_t sbt, sbtt, prec, tmp;
509 time_t over;
510 int error;
511 bool is_abs_real;
512
513 if (rqt->tv_nsec < 0 || rqt->tv_nsec >= NS_PER_SEC)
514 return (EINVAL);
515 if ((flags & ~TIMER_ABSTIME) != 0)
516 return (EINVAL);
517 switch (clock_id) {
518 case CLOCK_REALTIME:
519 case CLOCK_REALTIME_PRECISE:
520 case CLOCK_REALTIME_FAST:
521 case CLOCK_SECOND:
522 is_abs_real = (flags & TIMER_ABSTIME) != 0;
523 break;
524 case CLOCK_MONOTONIC:
525 case CLOCK_MONOTONIC_PRECISE:
526 case CLOCK_MONOTONIC_FAST:
527 case CLOCK_UPTIME:
528 case CLOCK_UPTIME_PRECISE:
529 case CLOCK_UPTIME_FAST:
530 is_abs_real = false;
531 break;
532 case CLOCK_VIRTUAL:
533 case CLOCK_PROF:
534 case CLOCK_PROCESS_CPUTIME_ID:
535 return (ENOTSUP);
536 case CLOCK_THREAD_CPUTIME_ID:
537 default:
538 return (EINVAL);
539 }
540 do {
541 ts = *rqt;
542 if ((flags & TIMER_ABSTIME) != 0) {
543 if (is_abs_real)
544 td->td_rtcgen =
545 atomic_load_acq_int(&rtc_generation);
546 error = kern_clock_gettime(td, clock_id, &now);
547 KASSERT(error == 0, ("kern_clock_gettime: %d", error));
548 timespecsub(&ts, &now, &ts);
549 }
550 if (ts.tv_sec < 0 || (ts.tv_sec == 0 && ts.tv_nsec == 0)) {
551 error = EWOULDBLOCK;
552 break;
553 }
554 if (ts.tv_sec > INT32_MAX / 2) {
555 over = ts.tv_sec - INT32_MAX / 2;
556 ts.tv_sec -= over;
557 } else
558 over = 0;
559 tmp = tstosbt(ts);
560 prec = tmp;
561 prec >>= tc_precexp;
562 if (TIMESEL(&sbt, tmp))
563 sbt += tc_tick_sbt;
564 sbt += tmp;
565 error = tsleep_sbt(&nanowait[curcpu], PWAIT | PCATCH, "nanslp",
566 sbt, prec, C_ABSOLUTE);
567 } while (error == 0 && is_abs_real && td->td_rtcgen == 0);
568 td->td_rtcgen = 0;
569 if (error != EWOULDBLOCK) {
570 if (TIMESEL(&sbtt, tmp))
571 sbtt += tc_tick_sbt;
572 if (sbtt >= sbt)
573 return (0);
574 if (error == ERESTART)
575 error = EINTR;
576 if ((flags & TIMER_ABSTIME) == 0 && rmt != NULL) {
577 ts = sbttots(sbt - sbtt);
578 ts.tv_sec += over;
579 if (ts.tv_sec < 0)
580 timespecclear(&ts);
581 *rmt = ts;
582 }
583 return (error);
584 }
585 return (0);
586 }
587
588 #ifndef _SYS_SYSPROTO_H_
589 struct nanosleep_args {
590 struct timespec *rqtp;
591 struct timespec *rmtp;
592 };
593 #endif
594 /* ARGSUSED */
595 int
596 sys_nanosleep(struct thread *td, struct nanosleep_args *uap)
597 {
598
599 return (user_clock_nanosleep(td, CLOCK_REALTIME, TIMER_RELTIME,
600 uap->rqtp, uap->rmtp));
601 }
602
603 #ifndef _SYS_SYSPROTO_H_
604 struct clock_nanosleep_args {
605 clockid_t clock_id;
606 int flags;
607 struct timespec *rqtp;
608 struct timespec *rmtp;
609 };
610 #endif
611 /* ARGSUSED */
612 int
613 sys_clock_nanosleep(struct thread *td, struct clock_nanosleep_args *uap)
614 {
615 int error;
616
617 error = user_clock_nanosleep(td, uap->clock_id, uap->flags, uap->rqtp,
618 uap->rmtp);
619 return (kern_posix_error(td, error));
620 }
621
622 static int
623 user_clock_nanosleep(struct thread *td, clockid_t clock_id, int flags,
624 const struct timespec *ua_rqtp, struct timespec *ua_rmtp)
625 {
626 struct timespec rmt, rqt;
627 int error, error2;
628
629 error = copyin(ua_rqtp, &rqt, sizeof(rqt));
630 if (error)
631 return (error);
632 error = kern_clock_nanosleep(td, clock_id, flags, &rqt, &rmt);
633 if (error == EINTR && ua_rmtp != NULL && (flags & TIMER_ABSTIME) == 0) {
634 error2 = copyout(&rmt, ua_rmtp, sizeof(rmt));
635 if (error2 != 0)
636 error = error2;
637 }
638 return (error);
639 }
640
641 #ifndef _SYS_SYSPROTO_H_
642 struct gettimeofday_args {
643 struct timeval *tp;
644 struct timezone *tzp;
645 };
646 #endif
647 /* ARGSUSED */
648 int
649 sys_gettimeofday(struct thread *td, struct gettimeofday_args *uap)
650 {
651 struct timeval atv;
652 struct timezone rtz;
653 int error = 0;
654
655 if (uap->tp) {
656 microtime(&atv);
657 error = copyout(&atv, uap->tp, sizeof (atv));
658 }
659 if (error == 0 && uap->tzp != NULL) {
660 rtz.tz_minuteswest = 0;
661 rtz.tz_dsttime = 0;
662 error = copyout(&rtz, uap->tzp, sizeof (rtz));
663 }
664 return (error);
665 }
666
667 #ifndef _SYS_SYSPROTO_H_
668 struct settimeofday_args {
669 struct timeval *tv;
670 struct timezone *tzp;
671 };
672 #endif
673 /* ARGSUSED */
674 int
675 sys_settimeofday(struct thread *td, struct settimeofday_args *uap)
676 {
677 struct timeval atv, *tvp;
678 struct timezone atz, *tzp;
679 int error;
680
681 if (uap->tv) {
682 error = copyin(uap->tv, &atv, sizeof(atv));
683 if (error)
684 return (error);
685 tvp = &atv;
686 } else
687 tvp = NULL;
688 if (uap->tzp) {
689 error = copyin(uap->tzp, &atz, sizeof(atz));
690 if (error)
691 return (error);
692 tzp = &atz;
693 } else
694 tzp = NULL;
695 return (kern_settimeofday(td, tvp, tzp));
696 }
697
698 int
699 kern_settimeofday(struct thread *td, struct timeval *tv, struct timezone *tzp)
700 {
701 int error;
702
703 error = priv_check(td, PRIV_SETTIMEOFDAY);
704 if (error)
705 return (error);
706 /* Verify all parameters before changing time. */
707 if (tv) {
708 if (tv->tv_usec < 0 || tv->tv_usec >= 1000000 ||
709 tv->tv_sec < 0)
710 return (EINVAL);
711 error = settime(td, tv);
712 }
713 return (error);
714 }
715
716 /*
717 * Get value of an interval timer. The process virtual and profiling virtual
718 * time timers are kept in the p_stats area, since they can be swapped out.
719 * These are kept internally in the way they are specified externally: in
720 * time until they expire.
721 *
722 * The real time interval timer is kept in the process table slot for the
723 * process, and its value (it_value) is kept as an absolute time rather than
724 * as a delta, so that it is easy to keep periodic real-time signals from
725 * drifting.
726 *
727 * Virtual time timers are processed in the hardclock() routine of
728 * kern_clock.c. The real time timer is processed by a timeout routine,
729 * called from the softclock() routine. Since a callout may be delayed in
730 * real time due to interrupt processing in the system, it is possible for
731 * the real time timeout routine (realitexpire, given below), to be delayed
732 * in real time past when it is supposed to occur. It does not suffice,
733 * therefore, to reload the real timer .it_value from the real time timers
734 * .it_interval. Rather, we compute the next time in absolute time the timer
735 * should go off.
736 */
737 #ifndef _SYS_SYSPROTO_H_
738 struct getitimer_args {
739 u_int which;
740 struct itimerval *itv;
741 };
742 #endif
743 int
744 sys_getitimer(struct thread *td, struct getitimer_args *uap)
745 {
746 struct itimerval aitv;
747 int error;
748
749 error = kern_getitimer(td, uap->which, &aitv);
750 if (error != 0)
751 return (error);
752 return (copyout(&aitv, uap->itv, sizeof (struct itimerval)));
753 }
754
755 int
756 kern_getitimer(struct thread *td, u_int which, struct itimerval *aitv)
757 {
758 struct proc *p = td->td_proc;
759 struct timeval ctv;
760
761 if (which > ITIMER_PROF)
762 return (EINVAL);
763
764 if (which == ITIMER_REAL) {
765 /*
766 * Convert from absolute to relative time in .it_value
767 * part of real time timer. If time for real time timer
768 * has passed return 0, else return difference between
769 * current time and time for the timer to go off.
770 */
771 PROC_LOCK(p);
772 *aitv = p->p_realtimer;
773 PROC_UNLOCK(p);
774 if (timevalisset(&aitv->it_value)) {
775 microuptime(&ctv);
776 if (timevalcmp(&aitv->it_value, &ctv, <))
777 timevalclear(&aitv->it_value);
778 else
779 timevalsub(&aitv->it_value, &ctv);
780 }
781 } else {
782 PROC_ITIMLOCK(p);
783 *aitv = p->p_stats->p_timer[which];
784 PROC_ITIMUNLOCK(p);
785 }
786 #ifdef KTRACE
787 if (KTRPOINT(td, KTR_STRUCT))
788 ktritimerval(aitv);
789 #endif
790 return (0);
791 }
792
793 #ifndef _SYS_SYSPROTO_H_
794 struct setitimer_args {
795 u_int which;
796 struct itimerval *itv, *oitv;
797 };
798 #endif
799 int
800 sys_setitimer(struct thread *td, struct setitimer_args *uap)
801 {
802 struct itimerval aitv, oitv;
803 int error;
804
805 if (uap->itv == NULL) {
806 uap->itv = uap->oitv;
807 return (sys_getitimer(td, (struct getitimer_args *)uap));
808 }
809
810 if ((error = copyin(uap->itv, &aitv, sizeof(struct itimerval))))
811 return (error);
812 error = kern_setitimer(td, uap->which, &aitv, &oitv);
813 if (error != 0 || uap->oitv == NULL)
814 return (error);
815 return (copyout(&oitv, uap->oitv, sizeof(struct itimerval)));
816 }
817
818 int
819 kern_setitimer(struct thread *td, u_int which, struct itimerval *aitv,
820 struct itimerval *oitv)
821 {
822 struct proc *p = td->td_proc;
823 struct timeval ctv;
824 sbintime_t sbt, pr;
825
826 if (aitv == NULL)
827 return (kern_getitimer(td, which, oitv));
828
829 if (which > ITIMER_PROF)
830 return (EINVAL);
831 #ifdef KTRACE
832 if (KTRPOINT(td, KTR_STRUCT))
833 ktritimerval(aitv);
834 #endif
835 if (itimerfix(&aitv->it_value) ||
836 aitv->it_value.tv_sec > INT32_MAX / 2)
837 return (EINVAL);
838 if (!timevalisset(&aitv->it_value))
839 timevalclear(&aitv->it_interval);
840 else if (itimerfix(&aitv->it_interval) ||
841 aitv->it_interval.tv_sec > INT32_MAX / 2)
842 return (EINVAL);
843
844 if (which == ITIMER_REAL) {
845 PROC_LOCK(p);
846 if (timevalisset(&p->p_realtimer.it_value))
847 callout_stop(&p->p_itcallout);
848 microuptime(&ctv);
849 if (timevalisset(&aitv->it_value)) {
850 pr = tvtosbt(aitv->it_value) >> tc_precexp;
851 timevaladd(&aitv->it_value, &ctv);
852 sbt = tvtosbt(aitv->it_value);
853 callout_reset_sbt(&p->p_itcallout, sbt, pr,
854 realitexpire, p, C_ABSOLUTE);
855 }
856 *oitv = p->p_realtimer;
857 p->p_realtimer = *aitv;
858 PROC_UNLOCK(p);
859 if (timevalisset(&oitv->it_value)) {
860 if (timevalcmp(&oitv->it_value, &ctv, <))
861 timevalclear(&oitv->it_value);
862 else
863 timevalsub(&oitv->it_value, &ctv);
864 }
865 } else {
866 if (aitv->it_interval.tv_sec == 0 &&
867 aitv->it_interval.tv_usec != 0 &&
868 aitv->it_interval.tv_usec < tick)
869 aitv->it_interval.tv_usec = tick;
870 if (aitv->it_value.tv_sec == 0 &&
871 aitv->it_value.tv_usec != 0 &&
872 aitv->it_value.tv_usec < tick)
873 aitv->it_value.tv_usec = tick;
874 PROC_ITIMLOCK(p);
875 *oitv = p->p_stats->p_timer[which];
876 p->p_stats->p_timer[which] = *aitv;
877 PROC_ITIMUNLOCK(p);
878 }
879 #ifdef KTRACE
880 if (KTRPOINT(td, KTR_STRUCT))
881 ktritimerval(oitv);
882 #endif
883 return (0);
884 }
885
886 static void
887 realitexpire_reset_callout(struct proc *p, sbintime_t *isbtp)
888 {
889 sbintime_t prec;
890
891 prec = isbtp == NULL ? tvtosbt(p->p_realtimer.it_interval) : *isbtp;
892 callout_reset_sbt(&p->p_itcallout, tvtosbt(p->p_realtimer.it_value),
893 prec >> tc_precexp, realitexpire, p, C_ABSOLUTE);
894 }
895
896 void
897 itimer_proc_continue(struct proc *p)
898 {
899 struct timeval ctv;
900 struct itimer *it;
901 int id;
902
903 PROC_LOCK_ASSERT(p, MA_OWNED);
904
905 if ((p->p_flag2 & P2_ITSTOPPED) != 0) {
906 p->p_flag2 &= ~P2_ITSTOPPED;
907 microuptime(&ctv);
908 if (timevalcmp(&p->p_realtimer.it_value, &ctv, >=))
909 realitexpire(p);
910 else
911 realitexpire_reset_callout(p, NULL);
912 }
913
914 if (p->p_itimers != NULL) {
915 for (id = 3; id < TIMER_MAX; id++) {
916 it = p->p_itimers->its_timers[id];
917 if (it == NULL)
918 continue;
919 if ((it->it_flags & ITF_PSTOPPED) != 0) {
920 ITIMER_LOCK(it);
921 if ((it->it_flags & ITF_PSTOPPED) != 0) {
922 it->it_flags &= ~ITF_PSTOPPED;
923 if ((it->it_flags & ITF_DELETING) == 0)
924 realtimer_expire_l(it, true);
925 }
926 ITIMER_UNLOCK(it);
927 }
928 }
929 }
930 }
931
932 /*
933 * Real interval timer expired:
934 * send process whose timer expired an alarm signal.
935 * If time is not set up to reload, then just return.
936 * Else compute next time timer should go off which is > current time.
937 * This is where delay in processing this timeout causes multiple
938 * SIGALRM calls to be compressed into one.
939 * tvtohz() always adds 1 to allow for the time until the next clock
940 * interrupt being strictly less than 1 clock tick, but we don't want
941 * that here since we want to appear to be in sync with the clock
942 * interrupt even when we're delayed.
943 */
944 void
945 realitexpire(void *arg)
946 {
947 struct proc *p;
948 struct timeval ctv;
949 sbintime_t isbt;
950
951 p = (struct proc *)arg;
952 kern_psignal(p, SIGALRM);
953 if (!timevalisset(&p->p_realtimer.it_interval)) {
954 timevalclear(&p->p_realtimer.it_value);
955 if (p->p_flag & P_WEXIT)
956 wakeup(&p->p_itcallout);
957 return;
958 }
959
960 isbt = tvtosbt(p->p_realtimer.it_interval);
961 if (isbt >= sbt_timethreshold)
962 getmicrouptime(&ctv);
963 else
964 microuptime(&ctv);
965 do {
966 timevaladd(&p->p_realtimer.it_value,
967 &p->p_realtimer.it_interval);
968 } while (timevalcmp(&p->p_realtimer.it_value, &ctv, <=));
969
970 if (P_SHOULDSTOP(p) || P_KILLED(p)) {
971 p->p_flag2 |= P2_ITSTOPPED;
972 return;
973 }
974
975 p->p_flag2 &= ~P2_ITSTOPPED;
976 realitexpire_reset_callout(p, &isbt);
977 }
978
979 /*
980 * Check that a proposed value to load into the .it_value or
981 * .it_interval part of an interval timer is acceptable, and
982 * fix it to have at least minimal value (i.e. if it is less
983 * than the resolution of the clock, round it up.)
984 */
985 int
986 itimerfix(struct timeval *tv)
987 {
988
989 if (tv->tv_sec < 0 || tv->tv_usec < 0 || tv->tv_usec >= 1000000)
990 return (EINVAL);
991 if (tv->tv_sec == 0 && tv->tv_usec != 0 &&
992 tv->tv_usec < (u_int)tick / 16)
993 tv->tv_usec = (u_int)tick / 16;
994 return (0);
995 }
996
997 /*
998 * Decrement an interval timer by a specified number
999 * of microseconds, which must be less than a second,
1000 * i.e. < 1000000. If the timer expires, then reload
1001 * it. In this case, carry over (usec - old value) to
1002 * reduce the value reloaded into the timer so that
1003 * the timer does not drift. This routine assumes
1004 * that it is called in a context where the timers
1005 * on which it is operating cannot change in value.
1006 */
1007 int
1008 itimerdecr(struct itimerval *itp, int usec)
1009 {
1010
1011 if (itp->it_value.tv_usec < usec) {
1012 if (itp->it_value.tv_sec == 0) {
1013 /* expired, and already in next interval */
1014 usec -= itp->it_value.tv_usec;
1015 goto expire;
1016 }
1017 itp->it_value.tv_usec += 1000000;
1018 itp->it_value.tv_sec--;
1019 }
1020 itp->it_value.tv_usec -= usec;
1021 usec = 0;
1022 if (timevalisset(&itp->it_value))
1023 return (1);
1024 /* expired, exactly at end of interval */
1025 expire:
1026 if (timevalisset(&itp->it_interval)) {
1027 itp->it_value = itp->it_interval;
1028 itp->it_value.tv_usec -= usec;
1029 if (itp->it_value.tv_usec < 0) {
1030 itp->it_value.tv_usec += 1000000;
1031 itp->it_value.tv_sec--;
1032 }
1033 } else
1034 itp->it_value.tv_usec = 0; /* sec is already 0 */
1035 return (0);
1036 }
1037
1038 /*
1039 * Add and subtract routines for timevals.
1040 * N.B.: subtract routine doesn't deal with
1041 * results which are before the beginning,
1042 * it just gets very confused in this case.
1043 * Caveat emptor.
1044 */
1045 void
1046 timevaladd(struct timeval *t1, const struct timeval *t2)
1047 {
1048
1049 t1->tv_sec += t2->tv_sec;
1050 t1->tv_usec += t2->tv_usec;
1051 timevalfix(t1);
1052 }
1053
1054 void
1055 timevalsub(struct timeval *t1, const struct timeval *t2)
1056 {
1057
1058 t1->tv_sec -= t2->tv_sec;
1059 t1->tv_usec -= t2->tv_usec;
1060 timevalfix(t1);
1061 }
1062
1063 static void
1064 timevalfix(struct timeval *t1)
1065 {
1066
1067 if (t1->tv_usec < 0) {
1068 t1->tv_sec--;
1069 t1->tv_usec += 1000000;
1070 }
1071 if (t1->tv_usec >= 1000000) {
1072 t1->tv_sec++;
1073 t1->tv_usec -= 1000000;
1074 }
1075 }
1076
1077 /*
1078 * ratecheck(): simple time-based rate-limit checking.
1079 */
1080 int
1081 ratecheck(struct timeval *lasttime, const struct timeval *mininterval)
1082 {
1083 struct timeval tv, delta;
1084 int rv = 0;
1085
1086 getmicrouptime(&tv); /* NB: 10ms precision */
1087 delta = tv;
1088 timevalsub(&delta, lasttime);
1089
1090 /*
1091 * check for 0,0 is so that the message will be seen at least once,
1092 * even if interval is huge.
1093 */
1094 if (timevalcmp(&delta, mininterval, >=) ||
1095 (lasttime->tv_sec == 0 && lasttime->tv_usec == 0)) {
1096 *lasttime = tv;
1097 rv = 1;
1098 }
1099
1100 return (rv);
1101 }
1102
1103 /*
1104 * ppsratecheck(): packets (or events) per second limitation.
1105 *
1106 * Return 0 if the limit is to be enforced (e.g. the caller
1107 * should drop a packet because of the rate limitation).
1108 *
1109 * maxpps of 0 always causes zero to be returned. maxpps of -1
1110 * always causes 1 to be returned; this effectively defeats rate
1111 * limiting.
1112 *
1113 * Note that we maintain the struct timeval for compatibility
1114 * with other bsd systems. We reuse the storage and just monitor
1115 * clock ticks for minimal overhead.
1116 */
1117 int
1118 ppsratecheck(struct timeval *lasttime, int *curpps, int maxpps)
1119 {
1120 int now;
1121
1122 /*
1123 * Reset the last time and counter if this is the first call
1124 * or more than a second has passed since the last update of
1125 * lasttime.
1126 */
1127 now = ticks;
1128 if (lasttime->tv_sec == 0 || (u_int)(now - lasttime->tv_sec) >= hz) {
1129 lasttime->tv_sec = now;
1130 *curpps = 1;
1131 return (maxpps != 0);
1132 } else {
1133 (*curpps)++; /* NB: ignore potential overflow */
1134 return (maxpps < 0 || *curpps <= maxpps);
1135 }
1136 }
1137
1138 static void
1139 itimer_start(void)
1140 {
1141 static const struct kclock rt_clock = {
1142 .timer_create = realtimer_create,
1143 .timer_delete = realtimer_delete,
1144 .timer_settime = realtimer_settime,
1145 .timer_gettime = realtimer_gettime,
1146 };
1147
1148 itimer_zone = uma_zcreate("itimer", sizeof(struct itimer),
1149 NULL, NULL, itimer_init, itimer_fini, UMA_ALIGN_PTR, 0);
1150 register_posix_clock(CLOCK_REALTIME, &rt_clock);
1151 register_posix_clock(CLOCK_MONOTONIC, &rt_clock);
1152 p31b_setcfg(CTL_P1003_1B_TIMERS, 200112L);
1153 p31b_setcfg(CTL_P1003_1B_DELAYTIMER_MAX, INT_MAX);
1154 p31b_setcfg(CTL_P1003_1B_TIMER_MAX, TIMER_MAX);
1155 }
1156
1157 static int
1158 register_posix_clock(int clockid, const struct kclock *clk)
1159 {
1160 if ((unsigned)clockid >= MAX_CLOCKS) {
1161 printf("%s: invalid clockid\n", __func__);
1162 return (0);
1163 }
1164 posix_clocks[clockid] = *clk;
1165 return (1);
1166 }
1167
1168 static int
1169 itimer_init(void *mem, int size, int flags)
1170 {
1171 struct itimer *it;
1172
1173 it = (struct itimer *)mem;
1174 mtx_init(&it->it_mtx, "itimer lock", NULL, MTX_DEF);
1175 return (0);
1176 }
1177
1178 static void
1179 itimer_fini(void *mem, int size)
1180 {
1181 struct itimer *it;
1182
1183 it = (struct itimer *)mem;
1184 mtx_destroy(&it->it_mtx);
1185 }
1186
1187 static void
1188 itimer_enter(struct itimer *it)
1189 {
1190
1191 mtx_assert(&it->it_mtx, MA_OWNED);
1192 it->it_usecount++;
1193 }
1194
1195 static void
1196 itimer_leave(struct itimer *it)
1197 {
1198
1199 mtx_assert(&it->it_mtx, MA_OWNED);
1200 KASSERT(it->it_usecount > 0, ("invalid it_usecount"));
1201
1202 if (--it->it_usecount == 0 && (it->it_flags & ITF_WANTED) != 0)
1203 wakeup(it);
1204 }
1205
1206 #ifndef _SYS_SYSPROTO_H_
1207 struct ktimer_create_args {
1208 clockid_t clock_id;
1209 struct sigevent * evp;
1210 int * timerid;
1211 };
1212 #endif
1213 int
1214 sys_ktimer_create(struct thread *td, struct ktimer_create_args *uap)
1215 {
1216 struct sigevent *evp, ev;
1217 int id;
1218 int error;
1219
1220 if (uap->evp == NULL) {
1221 evp = NULL;
1222 } else {
1223 error = copyin(uap->evp, &ev, sizeof(ev));
1224 if (error != 0)
1225 return (error);
1226 evp = &ev;
1227 }
1228 error = kern_ktimer_create(td, uap->clock_id, evp, &id, -1);
1229 if (error == 0) {
1230 error = copyout(&id, uap->timerid, sizeof(int));
1231 if (error != 0)
1232 kern_ktimer_delete(td, id);
1233 }
1234 return (error);
1235 }
1236
1237 int
1238 kern_ktimer_create(struct thread *td, clockid_t clock_id, struct sigevent *evp,
1239 int *timerid, int preset_id)
1240 {
1241 struct proc *p = td->td_proc;
1242 struct itimer *it;
1243 int id;
1244 int error;
1245
1246 if (clock_id < 0 || clock_id >= MAX_CLOCKS)
1247 return (EINVAL);
1248
1249 if (posix_clocks[clock_id].timer_create == NULL)
1250 return (EINVAL);
1251
1252 if (evp != NULL) {
1253 if (evp->sigev_notify != SIGEV_NONE &&
1254 evp->sigev_notify != SIGEV_SIGNAL &&
1255 evp->sigev_notify != SIGEV_THREAD_ID)
1256 return (EINVAL);
1257 if ((evp->sigev_notify == SIGEV_SIGNAL ||
1258 evp->sigev_notify == SIGEV_THREAD_ID) &&
1259 !_SIG_VALID(evp->sigev_signo))
1260 return (EINVAL);
1261 }
1262
1263 if (p->p_itimers == NULL)
1264 itimers_alloc(p);
1265
1266 it = uma_zalloc(itimer_zone, M_WAITOK);
1267 it->it_flags = 0;
1268 it->it_usecount = 0;
1269 it->it_active = 0;
1270 timespecclear(&it->it_time.it_value);
1271 timespecclear(&it->it_time.it_interval);
1272 it->it_overrun = 0;
1273 it->it_overrun_last = 0;
1274 it->it_clockid = clock_id;
1275 it->it_timerid = -1;
1276 it->it_proc = p;
1277 ksiginfo_init(&it->it_ksi);
1278 it->it_ksi.ksi_flags |= KSI_INS | KSI_EXT;
1279 error = CLOCK_CALL(clock_id, timer_create, (it));
1280 if (error != 0)
1281 goto out;
1282
1283 PROC_LOCK(p);
1284 if (preset_id != -1) {
1285 KASSERT(preset_id >= 0 && preset_id < 3, ("invalid preset_id"));
1286 id = preset_id;
1287 if (p->p_itimers->its_timers[id] != NULL) {
1288 PROC_UNLOCK(p);
1289 error = 0;
1290 goto out;
1291 }
1292 } else {
1293 /*
1294 * Find a free timer slot, skipping those reserved
1295 * for setitimer().
1296 */
1297 for (id = 3; id < TIMER_MAX; id++)
1298 if (p->p_itimers->its_timers[id] == NULL)
1299 break;
1300 if (id == TIMER_MAX) {
1301 PROC_UNLOCK(p);
1302 error = EAGAIN;
1303 goto out;
1304 }
1305 }
1306 it->it_timerid = id;
1307 p->p_itimers->its_timers[id] = it;
1308 if (evp != NULL)
1309 it->it_sigev = *evp;
1310 else {
1311 it->it_sigev.sigev_notify = SIGEV_SIGNAL;
1312 switch (clock_id) {
1313 default:
1314 case CLOCK_REALTIME:
1315 it->it_sigev.sigev_signo = SIGALRM;
1316 break;
1317 case CLOCK_VIRTUAL:
1318 it->it_sigev.sigev_signo = SIGVTALRM;
1319 break;
1320 case CLOCK_PROF:
1321 it->it_sigev.sigev_signo = SIGPROF;
1322 break;
1323 }
1324 it->it_sigev.sigev_value.sival_int = id;
1325 }
1326
1327 if (it->it_sigev.sigev_notify == SIGEV_SIGNAL ||
1328 it->it_sigev.sigev_notify == SIGEV_THREAD_ID) {
1329 it->it_ksi.ksi_signo = it->it_sigev.sigev_signo;
1330 it->it_ksi.ksi_code = SI_TIMER;
1331 it->it_ksi.ksi_value = it->it_sigev.sigev_value;
1332 it->it_ksi.ksi_timerid = id;
1333 }
1334 PROC_UNLOCK(p);
1335 *timerid = id;
1336 return (0);
1337
1338 out:
1339 ITIMER_LOCK(it);
1340 CLOCK_CALL(it->it_clockid, timer_delete, (it));
1341 ITIMER_UNLOCK(it);
1342 uma_zfree(itimer_zone, it);
1343 return (error);
1344 }
1345
1346 #ifndef _SYS_SYSPROTO_H_
1347 struct ktimer_delete_args {
1348 int timerid;
1349 };
1350 #endif
1351 int
1352 sys_ktimer_delete(struct thread *td, struct ktimer_delete_args *uap)
1353 {
1354
1355 return (kern_ktimer_delete(td, uap->timerid));
1356 }
1357
1358 static struct itimer *
1359 itimer_find(struct proc *p, int timerid)
1360 {
1361 struct itimer *it;
1362
1363 PROC_LOCK_ASSERT(p, MA_OWNED);
1364 if ((p->p_itimers == NULL) ||
1365 (timerid < 0) || (timerid >= TIMER_MAX) ||
1366 (it = p->p_itimers->its_timers[timerid]) == NULL) {
1367 return (NULL);
1368 }
1369 ITIMER_LOCK(it);
1370 if ((it->it_flags & ITF_DELETING) != 0) {
1371 ITIMER_UNLOCK(it);
1372 it = NULL;
1373 }
1374 return (it);
1375 }
1376
1377 int
1378 kern_ktimer_delete(struct thread *td, int timerid)
1379 {
1380 struct proc *p = td->td_proc;
1381 struct itimer *it;
1382
1383 PROC_LOCK(p);
1384 it = itimer_find(p, timerid);
1385 if (it == NULL) {
1386 PROC_UNLOCK(p);
1387 return (EINVAL);
1388 }
1389 PROC_UNLOCK(p);
1390
1391 it->it_flags |= ITF_DELETING;
1392 while (it->it_usecount > 0) {
1393 it->it_flags |= ITF_WANTED;
1394 msleep(it, &it->it_mtx, PPAUSE, "itimer", 0);
1395 }
1396 it->it_flags &= ~ITF_WANTED;
1397 CLOCK_CALL(it->it_clockid, timer_delete, (it));
1398 ITIMER_UNLOCK(it);
1399
1400 PROC_LOCK(p);
1401 if (KSI_ONQ(&it->it_ksi))
1402 sigqueue_take(&it->it_ksi);
1403 p->p_itimers->its_timers[timerid] = NULL;
1404 PROC_UNLOCK(p);
1405 uma_zfree(itimer_zone, it);
1406 return (0);
1407 }
1408
1409 #ifndef _SYS_SYSPROTO_H_
1410 struct ktimer_settime_args {
1411 int timerid;
1412 int flags;
1413 const struct itimerspec * value;
1414 struct itimerspec * ovalue;
1415 };
1416 #endif
1417 int
1418 sys_ktimer_settime(struct thread *td, struct ktimer_settime_args *uap)
1419 {
1420 struct itimerspec val, oval, *ovalp;
1421 int error;
1422
1423 error = copyin(uap->value, &val, sizeof(val));
1424 if (error != 0)
1425 return (error);
1426 ovalp = uap->ovalue != NULL ? &oval : NULL;
1427 error = kern_ktimer_settime(td, uap->timerid, uap->flags, &val, ovalp);
1428 if (error == 0 && uap->ovalue != NULL)
1429 error = copyout(ovalp, uap->ovalue, sizeof(*ovalp));
1430 return (error);
1431 }
1432
1433 int
1434 kern_ktimer_settime(struct thread *td, int timer_id, int flags,
1435 struct itimerspec *val, struct itimerspec *oval)
1436 {
1437 struct proc *p;
1438 struct itimer *it;
1439 int error;
1440
1441 p = td->td_proc;
1442 PROC_LOCK(p);
1443 if (timer_id < 3 || (it = itimer_find(p, timer_id)) == NULL) {
1444 PROC_UNLOCK(p);
1445 error = EINVAL;
1446 } else {
1447 PROC_UNLOCK(p);
1448 itimer_enter(it);
1449 error = CLOCK_CALL(it->it_clockid, timer_settime, (it,
1450 flags, val, oval));
1451 itimer_leave(it);
1452 ITIMER_UNLOCK(it);
1453 }
1454 return (error);
1455 }
1456
1457 #ifndef _SYS_SYSPROTO_H_
1458 struct ktimer_gettime_args {
1459 int timerid;
1460 struct itimerspec * value;
1461 };
1462 #endif
1463 int
1464 sys_ktimer_gettime(struct thread *td, struct ktimer_gettime_args *uap)
1465 {
1466 struct itimerspec val;
1467 int error;
1468
1469 error = kern_ktimer_gettime(td, uap->timerid, &val);
1470 if (error == 0)
1471 error = copyout(&val, uap->value, sizeof(val));
1472 return (error);
1473 }
1474
1475 int
1476 kern_ktimer_gettime(struct thread *td, int timer_id, struct itimerspec *val)
1477 {
1478 struct proc *p;
1479 struct itimer *it;
1480 int error;
1481
1482 p = td->td_proc;
1483 PROC_LOCK(p);
1484 if (timer_id < 3 || (it = itimer_find(p, timer_id)) == NULL) {
1485 PROC_UNLOCK(p);
1486 error = EINVAL;
1487 } else {
1488 PROC_UNLOCK(p);
1489 itimer_enter(it);
1490 error = CLOCK_CALL(it->it_clockid, timer_gettime, (it, val));
1491 itimer_leave(it);
1492 ITIMER_UNLOCK(it);
1493 }
1494 return (error);
1495 }
1496
1497 #ifndef _SYS_SYSPROTO_H_
1498 struct timer_getoverrun_args {
1499 int timerid;
1500 };
1501 #endif
1502 int
1503 sys_ktimer_getoverrun(struct thread *td, struct ktimer_getoverrun_args *uap)
1504 {
1505
1506 return (kern_ktimer_getoverrun(td, uap->timerid));
1507 }
1508
1509 int
1510 kern_ktimer_getoverrun(struct thread *td, int timer_id)
1511 {
1512 struct proc *p = td->td_proc;
1513 struct itimer *it;
1514 int error ;
1515
1516 PROC_LOCK(p);
1517 if (timer_id < 3 ||
1518 (it = itimer_find(p, timer_id)) == NULL) {
1519 PROC_UNLOCK(p);
1520 error = EINVAL;
1521 } else {
1522 td->td_retval[0] = it->it_overrun_last;
1523 ITIMER_UNLOCK(it);
1524 PROC_UNLOCK(p);
1525 error = 0;
1526 }
1527 return (error);
1528 }
1529
1530 static int
1531 realtimer_create(struct itimer *it)
1532 {
1533 callout_init_mtx(&it->it_callout, &it->it_mtx, 0);
1534 return (0);
1535 }
1536
1537 static int
1538 realtimer_delete(struct itimer *it)
1539 {
1540 mtx_assert(&it->it_mtx, MA_OWNED);
1541
1542 /*
1543 * clear timer's value and interval to tell realtimer_expire
1544 * to not rearm the timer.
1545 */
1546 timespecclear(&it->it_time.it_value);
1547 timespecclear(&it->it_time.it_interval);
1548 ITIMER_UNLOCK(it);
1549 callout_drain(&it->it_callout);
1550 ITIMER_LOCK(it);
1551 return (0);
1552 }
1553
1554 static int
1555 realtimer_gettime(struct itimer *it, struct itimerspec *ovalue)
1556 {
1557 struct timespec cts;
1558
1559 mtx_assert(&it->it_mtx, MA_OWNED);
1560
1561 realtimer_clocktime(it->it_clockid, &cts);
1562 *ovalue = it->it_time;
1563 if (ovalue->it_value.tv_sec != 0 || ovalue->it_value.tv_nsec != 0) {
1564 timespecsub(&ovalue->it_value, &cts, &ovalue->it_value);
1565 if (ovalue->it_value.tv_sec < 0 ||
1566 (ovalue->it_value.tv_sec == 0 &&
1567 ovalue->it_value.tv_nsec == 0)) {
1568 ovalue->it_value.tv_sec = 0;
1569 ovalue->it_value.tv_nsec = 1;
1570 }
1571 }
1572 return (0);
1573 }
1574
1575 static int
1576 realtimer_settime(struct itimer *it, int flags, struct itimerspec *value,
1577 struct itimerspec *ovalue)
1578 {
1579 struct timespec cts, ts;
1580 struct timeval tv;
1581 struct itimerspec val;
1582
1583 mtx_assert(&it->it_mtx, MA_OWNED);
1584
1585 val = *value;
1586 if (itimespecfix(&val.it_value))
1587 return (EINVAL);
1588
1589 if (timespecisset(&val.it_value)) {
1590 if (itimespecfix(&val.it_interval))
1591 return (EINVAL);
1592 } else {
1593 timespecclear(&val.it_interval);
1594 }
1595
1596 if (ovalue != NULL)
1597 realtimer_gettime(it, ovalue);
1598
1599 it->it_time = val;
1600 if (timespecisset(&val.it_value)) {
1601 realtimer_clocktime(it->it_clockid, &cts);
1602 ts = val.it_value;
1603 if ((flags & TIMER_ABSTIME) == 0) {
1604 /* Convert to absolute time. */
1605 timespecadd(&it->it_time.it_value, &cts,
1606 &it->it_time.it_value);
1607 } else {
1608 timespecsub(&ts, &cts, &ts);
1609 /*
1610 * We don't care if ts is negative, tztohz will
1611 * fix it.
1612 */
1613 }
1614 TIMESPEC_TO_TIMEVAL(&tv, &ts);
1615 callout_reset(&it->it_callout, tvtohz(&tv), realtimer_expire,
1616 it);
1617 } else {
1618 callout_stop(&it->it_callout);
1619 }
1620
1621 return (0);
1622 }
1623
1624 static void
1625 realtimer_clocktime(clockid_t id, struct timespec *ts)
1626 {
1627 if (id == CLOCK_REALTIME)
1628 getnanotime(ts);
1629 else /* CLOCK_MONOTONIC */
1630 getnanouptime(ts);
1631 }
1632
1633 int
1634 itimer_accept(struct proc *p, int timerid, ksiginfo_t *ksi)
1635 {
1636 struct itimer *it;
1637
1638 PROC_LOCK_ASSERT(p, MA_OWNED);
1639 it = itimer_find(p, timerid);
1640 if (it != NULL) {
1641 ksi->ksi_overrun = it->it_overrun;
1642 it->it_overrun_last = it->it_overrun;
1643 it->it_overrun = 0;
1644 ITIMER_UNLOCK(it);
1645 return (0);
1646 }
1647 return (EINVAL);
1648 }
1649
1650 static int
1651 itimespecfix(struct timespec *ts)
1652 {
1653
1654 if (ts->tv_sec < 0 || ts->tv_nsec < 0 || ts->tv_nsec >= NS_PER_SEC)
1655 return (EINVAL);
1656 if ((UINT64_MAX - ts->tv_nsec) / NS_PER_SEC < ts->tv_sec)
1657 return (EINVAL);
1658 if (ts->tv_sec == 0 && ts->tv_nsec != 0 && ts->tv_nsec < tick * 1000)
1659 ts->tv_nsec = tick * 1000;
1660 return (0);
1661 }
1662
1663 #define timespectons(tsp) \
1664 ((uint64_t)(tsp)->tv_sec * NS_PER_SEC + (tsp)->tv_nsec)
1665 #define timespecfromns(ns) (struct timespec){ \
1666 .tv_sec = (ns) / NS_PER_SEC, \
1667 .tv_nsec = (ns) % NS_PER_SEC \
1668 }
1669
1670 static void
1671 realtimer_expire_l(struct itimer *it, bool proc_locked)
1672 {
1673 struct timespec cts, ts;
1674 struct timeval tv;
1675 struct proc *p;
1676 uint64_t interval, now, overruns, value;
1677
1678 realtimer_clocktime(it->it_clockid, &cts);
1679 /* Only fire if time is reached. */
1680 if (timespeccmp(&cts, &it->it_time.it_value, >=)) {
1681 if (timespecisset(&it->it_time.it_interval)) {
1682 timespecadd(&it->it_time.it_value,
1683 &it->it_time.it_interval,
1684 &it->it_time.it_value);
1685
1686 interval = timespectons(&it->it_time.it_interval);
1687 value = timespectons(&it->it_time.it_value);
1688 now = timespectons(&cts);
1689
1690 if (now >= value) {
1691 /*
1692 * We missed at least one period.
1693 */
1694 overruns = howmany(now - value + 1, interval);
1695 if (it->it_overrun + overruns >=
1696 it->it_overrun &&
1697 it->it_overrun + overruns <= INT_MAX) {
1698 it->it_overrun += (int)overruns;
1699 } else {
1700 it->it_overrun = INT_MAX;
1701 it->it_ksi.ksi_errno = ERANGE;
1702 }
1703 value =
1704 now + interval - (now - value) % interval;
1705 it->it_time.it_value = timespecfromns(value);
1706 }
1707 } else {
1708 /* single shot timer ? */
1709 timespecclear(&it->it_time.it_value);
1710 }
1711
1712 p = it->it_proc;
1713 if (timespecisset(&it->it_time.it_value)) {
1714 if (P_SHOULDSTOP(p) || P_KILLED(p)) {
1715 it->it_flags |= ITF_PSTOPPED;
1716 } else {
1717 timespecsub(&it->it_time.it_value, &cts, &ts);
1718 TIMESPEC_TO_TIMEVAL(&tv, &ts);
1719 callout_reset(&it->it_callout, tvtohz(&tv),
1720 realtimer_expire, it);
1721 }
1722 }
1723
1724 itimer_enter(it);
1725 ITIMER_UNLOCK(it);
1726 if (proc_locked)
1727 PROC_UNLOCK(p);
1728 itimer_fire(it);
1729 if (proc_locked)
1730 PROC_LOCK(p);
1731 ITIMER_LOCK(it);
1732 itimer_leave(it);
1733 } else if (timespecisset(&it->it_time.it_value)) {
1734 p = it->it_proc;
1735 if (P_SHOULDSTOP(p) || P_KILLED(p)) {
1736 it->it_flags |= ITF_PSTOPPED;
1737 } else {
1738 ts = it->it_time.it_value;
1739 timespecsub(&ts, &cts, &ts);
1740 TIMESPEC_TO_TIMEVAL(&tv, &ts);
1741 callout_reset(&it->it_callout, tvtohz(&tv),
1742 realtimer_expire, it);
1743 }
1744 }
1745 }
1746
1747 /* Timeout callback for realtime timer */
1748 static void
1749 realtimer_expire(void *arg)
1750 {
1751 realtimer_expire_l(arg, false);
1752 }
1753
1754 static void
1755 itimer_fire(struct itimer *it)
1756 {
1757 struct proc *p = it->it_proc;
1758 struct thread *td;
1759
1760 if (it->it_sigev.sigev_notify == SIGEV_SIGNAL ||
1761 it->it_sigev.sigev_notify == SIGEV_THREAD_ID) {
1762 if (sigev_findtd(p, &it->it_sigev, &td) != 0) {
1763 ITIMER_LOCK(it);
1764 timespecclear(&it->it_time.it_value);
1765 timespecclear(&it->it_time.it_interval);
1766 callout_stop(&it->it_callout);
1767 ITIMER_UNLOCK(it);
1768 return;
1769 }
1770 if (!KSI_ONQ(&it->it_ksi)) {
1771 it->it_ksi.ksi_errno = 0;
1772 ksiginfo_set_sigev(&it->it_ksi, &it->it_sigev);
1773 tdsendsignal(p, td, it->it_ksi.ksi_signo, &it->it_ksi);
1774 } else {
1775 if (it->it_overrun < INT_MAX)
1776 it->it_overrun++;
1777 else
1778 it->it_ksi.ksi_errno = ERANGE;
1779 }
1780 PROC_UNLOCK(p);
1781 }
1782 }
1783
1784 static void
1785 itimers_alloc(struct proc *p)
1786 {
1787 struct itimers *its;
1788 int i;
1789
1790 its = malloc(sizeof (struct itimers), M_SUBPROC, M_WAITOK | M_ZERO);
1791 LIST_INIT(&its->its_virtual);
1792 LIST_INIT(&its->its_prof);
1793 TAILQ_INIT(&its->its_worklist);
1794 for (i = 0; i < TIMER_MAX; i++)
1795 its->its_timers[i] = NULL;
1796 PROC_LOCK(p);
1797 if (p->p_itimers == NULL) {
1798 p->p_itimers = its;
1799 PROC_UNLOCK(p);
1800 }
1801 else {
1802 PROC_UNLOCK(p);
1803 free(its, M_SUBPROC);
1804 }
1805 }
1806
1807 /* Clean up timers when some process events are being triggered. */
1808 static void
1809 itimers_event_exit_exec(int start_idx, struct proc *p)
1810 {
1811 struct itimers *its;
1812 struct itimer *it;
1813 int i;
1814
1815 its = p->p_itimers;
1816 if (its == NULL)
1817 return;
1818
1819 for (i = start_idx; i < TIMER_MAX; ++i) {
1820 if ((it = its->its_timers[i]) != NULL)
1821 kern_ktimer_delete(curthread, i);
1822 }
1823 if (its->its_timers[0] == NULL && its->its_timers[1] == NULL &&
1824 its->its_timers[2] == NULL) {
1825 /* Synchronize with itimer_proc_continue(). */
1826 PROC_LOCK(p);
1827 p->p_itimers = NULL;
1828 PROC_UNLOCK(p);
1829 free(its, M_SUBPROC);
1830 }
1831 }
1832
1833 void
1834 itimers_exec(struct proc *p)
1835 {
1836 /*
1837 * According to susv3, XSI interval timers should be inherited
1838 * by new image.
1839 */
1840 itimers_event_exit_exec(3, p);
1841 }
1842
1843 void
1844 itimers_exit(struct proc *p)
1845 {
1846 itimers_event_exit_exec(0, p);
1847 }
Cache object: 1cb0ef9aa86ae63001e7fde645d63bf7
|