FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_time.c
1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 1982, 1986, 1989, 1993
5 * The Regents of the University of California. All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of the University nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 *
31 * @(#)kern_time.c 8.1 (Berkeley) 6/10/93
32 */
33
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36
37 #include "opt_ktrace.h"
38
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/limits.h>
42 #include <sys/clock.h>
43 #include <sys/lock.h>
44 #include <sys/mutex.h>
45 #include <sys/sysproto.h>
46 #include <sys/resourcevar.h>
47 #include <sys/signalvar.h>
48 #include <sys/kernel.h>
49 #include <sys/sleepqueue.h>
50 #include <sys/syscallsubr.h>
51 #include <sys/sysctl.h>
52 #include <sys/priv.h>
53 #include <sys/proc.h>
54 #include <sys/posix4.h>
55 #include <sys/time.h>
56 #include <sys/timers.h>
57 #include <sys/timetc.h>
58 #include <sys/vnode.h>
59 #ifdef KTRACE
60 #include <sys/ktrace.h>
61 #endif
62
63 #include <vm/vm.h>
64 #include <vm/vm_extern.h>
65
66 #define MAX_CLOCKS (CLOCK_MONOTONIC+1)
67 #define CPUCLOCK_BIT 0x80000000
68 #define CPUCLOCK_PROCESS_BIT 0x40000000
69 #define CPUCLOCK_ID_MASK (~(CPUCLOCK_BIT|CPUCLOCK_PROCESS_BIT))
70 #define MAKE_THREAD_CPUCLOCK(tid) (CPUCLOCK_BIT|(tid))
71 #define MAKE_PROCESS_CPUCLOCK(pid) \
72 (CPUCLOCK_BIT|CPUCLOCK_PROCESS_BIT|(pid))
73
74 #define NS_PER_SEC 1000000000
75
76 static struct kclock posix_clocks[MAX_CLOCKS];
77 static uma_zone_t itimer_zone = NULL;
78
79 /*
80 * Time of day and interval timer support.
81 *
82 * These routines provide the kernel entry points to get and set
83 * the time-of-day and per-process interval timers. Subroutines
84 * here provide support for adding and subtracting timeval structures
85 * and decrementing interval timers, optionally reloading the interval
86 * timers when they expire.
87 */
88
89 static int settime(struct thread *, struct timeval *);
90 static void timevalfix(struct timeval *);
91 static int user_clock_nanosleep(struct thread *td, clockid_t clock_id,
92 int flags, const struct timespec *ua_rqtp,
93 struct timespec *ua_rmtp);
94
95 static void itimer_start(void);
96 static int itimer_init(void *, int, int);
97 static void itimer_fini(void *, int);
98 static void itimer_enter(struct itimer *);
99 static void itimer_leave(struct itimer *);
100 static struct itimer *itimer_find(struct proc *, int);
101 static void itimers_alloc(struct proc *);
102 static int realtimer_create(struct itimer *);
103 static int realtimer_gettime(struct itimer *, struct itimerspec *);
104 static int realtimer_settime(struct itimer *, int,
105 struct itimerspec *, struct itimerspec *);
106 static int realtimer_delete(struct itimer *);
107 static void realtimer_clocktime(clockid_t, struct timespec *);
108 static void realtimer_expire(void *);
109 static void realtimer_expire_l(struct itimer *it, bool proc_locked);
110
111 static void realitexpire(void *arg);
112
113 static int register_posix_clock(int, const struct kclock *);
114 static void itimer_fire(struct itimer *it);
115 static int itimespecfix(struct timespec *ts);
116
117 #define CLOCK_CALL(clock, call, arglist) \
118 ((*posix_clocks[clock].call) arglist)
119
120 SYSINIT(posix_timer, SI_SUB_P1003_1B, SI_ORDER_FIRST+4, itimer_start, NULL);
121
122 static int
123 settime(struct thread *td, struct timeval *tv)
124 {
125 struct timeval delta, tv1, tv2;
126 static struct timeval maxtime, laststep;
127 struct timespec ts;
128
129 microtime(&tv1);
130 delta = *tv;
131 timevalsub(&delta, &tv1);
132
133 /*
134 * If the system is secure, we do not allow the time to be
135 * set to a value earlier than 1 second less than the highest
136 * time we have yet seen. The worst a miscreant can do in
137 * this circumstance is "freeze" time. He couldn't go
138 * back to the past.
139 *
140 * We similarly do not allow the clock to be stepped more
141 * than one second, nor more than once per second. This allows
142 * a miscreant to make the clock march double-time, but no worse.
143 */
144 if (securelevel_gt(td->td_ucred, 1) != 0) {
145 if (delta.tv_sec < 0 || delta.tv_usec < 0) {
146 /*
147 * Update maxtime to latest time we've seen.
148 */
149 if (tv1.tv_sec > maxtime.tv_sec)
150 maxtime = tv1;
151 tv2 = *tv;
152 timevalsub(&tv2, &maxtime);
153 if (tv2.tv_sec < -1) {
154 tv->tv_sec = maxtime.tv_sec - 1;
155 printf("Time adjustment clamped to -1 second\n");
156 }
157 } else {
158 if (tv1.tv_sec == laststep.tv_sec)
159 return (EPERM);
160 if (delta.tv_sec > 1) {
161 tv->tv_sec = tv1.tv_sec + 1;
162 printf("Time adjustment clamped to +1 second\n");
163 }
164 laststep = *tv;
165 }
166 }
167
168 ts.tv_sec = tv->tv_sec;
169 ts.tv_nsec = tv->tv_usec * 1000;
170 tc_setclock(&ts);
171 resettodr();
172 return (0);
173 }
174
175 #ifndef _SYS_SYSPROTO_H_
176 struct clock_getcpuclockid2_args {
177 id_t id;
178 int which,
179 clockid_t *clock_id;
180 };
181 #endif
182 /* ARGSUSED */
183 int
184 sys_clock_getcpuclockid2(struct thread *td, struct clock_getcpuclockid2_args *uap)
185 {
186 clockid_t clk_id;
187 int error;
188
189 error = kern_clock_getcpuclockid2(td, uap->id, uap->which, &clk_id);
190 if (error == 0)
191 error = copyout(&clk_id, uap->clock_id, sizeof(clockid_t));
192 return (error);
193 }
194
195 int
196 kern_clock_getcpuclockid2(struct thread *td, id_t id, int which,
197 clockid_t *clk_id)
198 {
199 struct proc *p;
200 pid_t pid;
201 lwpid_t tid;
202 int error;
203
204 switch (which) {
205 case CPUCLOCK_WHICH_PID:
206 if (id != 0) {
207 error = pget(id, PGET_CANSEE | PGET_NOTID, &p);
208 if (error != 0)
209 return (error);
210 PROC_UNLOCK(p);
211 pid = id;
212 } else {
213 pid = td->td_proc->p_pid;
214 }
215 *clk_id = MAKE_PROCESS_CPUCLOCK(pid);
216 return (0);
217 case CPUCLOCK_WHICH_TID:
218 tid = id == 0 ? td->td_tid : id;
219 *clk_id = MAKE_THREAD_CPUCLOCK(tid);
220 return (0);
221 default:
222 return (EINVAL);
223 }
224 }
225
226 #ifndef _SYS_SYSPROTO_H_
227 struct clock_gettime_args {
228 clockid_t clock_id;
229 struct timespec *tp;
230 };
231 #endif
232 /* ARGSUSED */
233 int
234 sys_clock_gettime(struct thread *td, struct clock_gettime_args *uap)
235 {
236 struct timespec ats;
237 int error;
238
239 error = kern_clock_gettime(td, uap->clock_id, &ats);
240 if (error == 0)
241 error = copyout(&ats, uap->tp, sizeof(ats));
242
243 return (error);
244 }
245
246 static inline void
247 cputick2timespec(uint64_t runtime, struct timespec *ats)
248 {
249 uint64_t tr;
250 tr = cpu_tickrate();
251 ats->tv_sec = runtime / tr;
252 ats->tv_nsec = ((runtime % tr) * 1000000000ULL) / tr;
253 }
254
255 void
256 kern_thread_cputime(struct thread *targettd, struct timespec *ats)
257 {
258 uint64_t runtime, curtime, switchtime;
259
260 if (targettd == NULL) { /* current thread */
261 spinlock_enter();
262 switchtime = PCPU_GET(switchtime);
263 curtime = cpu_ticks();
264 runtime = curthread->td_runtime;
265 spinlock_exit();
266 runtime += curtime - switchtime;
267 } else {
268 PROC_LOCK_ASSERT(targettd->td_proc, MA_OWNED);
269 thread_lock(targettd);
270 runtime = targettd->td_runtime;
271 thread_unlock(targettd);
272 }
273 cputick2timespec(runtime, ats);
274 }
275
276 void
277 kern_process_cputime(struct proc *targetp, struct timespec *ats)
278 {
279 uint64_t runtime;
280 struct rusage ru;
281
282 PROC_LOCK_ASSERT(targetp, MA_OWNED);
283 PROC_STATLOCK(targetp);
284 rufetch(targetp, &ru);
285 runtime = targetp->p_rux.rux_runtime;
286 if (curthread->td_proc == targetp)
287 runtime += cpu_ticks() - PCPU_GET(switchtime);
288 PROC_STATUNLOCK(targetp);
289 cputick2timespec(runtime, ats);
290 }
291
292 static int
293 get_cputime(struct thread *td, clockid_t clock_id, struct timespec *ats)
294 {
295 struct proc *p, *p2;
296 struct thread *td2;
297 lwpid_t tid;
298 pid_t pid;
299 int error;
300
301 p = td->td_proc;
302 if ((clock_id & CPUCLOCK_PROCESS_BIT) == 0) {
303 tid = clock_id & CPUCLOCK_ID_MASK;
304 td2 = tdfind(tid, p->p_pid);
305 if (td2 == NULL)
306 return (EINVAL);
307 kern_thread_cputime(td2, ats);
308 PROC_UNLOCK(td2->td_proc);
309 } else {
310 pid = clock_id & CPUCLOCK_ID_MASK;
311 error = pget(pid, PGET_CANSEE, &p2);
312 if (error != 0)
313 return (EINVAL);
314 kern_process_cputime(p2, ats);
315 PROC_UNLOCK(p2);
316 }
317 return (0);
318 }
319
320 int
321 kern_clock_gettime(struct thread *td, clockid_t clock_id, struct timespec *ats)
322 {
323 struct timeval sys, user;
324 struct proc *p;
325
326 p = td->td_proc;
327 switch (clock_id) {
328 case CLOCK_REALTIME: /* Default to precise. */
329 case CLOCK_REALTIME_PRECISE:
330 nanotime(ats);
331 break;
332 case CLOCK_REALTIME_FAST:
333 getnanotime(ats);
334 break;
335 case CLOCK_VIRTUAL:
336 PROC_LOCK(p);
337 PROC_STATLOCK(p);
338 calcru(p, &user, &sys);
339 PROC_STATUNLOCK(p);
340 PROC_UNLOCK(p);
341 TIMEVAL_TO_TIMESPEC(&user, ats);
342 break;
343 case CLOCK_PROF:
344 PROC_LOCK(p);
345 PROC_STATLOCK(p);
346 calcru(p, &user, &sys);
347 PROC_STATUNLOCK(p);
348 PROC_UNLOCK(p);
349 timevaladd(&user, &sys);
350 TIMEVAL_TO_TIMESPEC(&user, ats);
351 break;
352 case CLOCK_MONOTONIC: /* Default to precise. */
353 case CLOCK_MONOTONIC_PRECISE:
354 case CLOCK_UPTIME:
355 case CLOCK_UPTIME_PRECISE:
356 nanouptime(ats);
357 break;
358 case CLOCK_UPTIME_FAST:
359 case CLOCK_MONOTONIC_FAST:
360 getnanouptime(ats);
361 break;
362 case CLOCK_SECOND:
363 ats->tv_sec = time_second;
364 ats->tv_nsec = 0;
365 break;
366 case CLOCK_THREAD_CPUTIME_ID:
367 kern_thread_cputime(NULL, ats);
368 break;
369 case CLOCK_PROCESS_CPUTIME_ID:
370 PROC_LOCK(p);
371 kern_process_cputime(p, ats);
372 PROC_UNLOCK(p);
373 break;
374 default:
375 if ((int)clock_id >= 0)
376 return (EINVAL);
377 return (get_cputime(td, clock_id, ats));
378 }
379 return (0);
380 }
381
382 #ifndef _SYS_SYSPROTO_H_
383 struct clock_settime_args {
384 clockid_t clock_id;
385 const struct timespec *tp;
386 };
387 #endif
388 /* ARGSUSED */
389 int
390 sys_clock_settime(struct thread *td, struct clock_settime_args *uap)
391 {
392 struct timespec ats;
393 int error;
394
395 if ((error = copyin(uap->tp, &ats, sizeof(ats))) != 0)
396 return (error);
397 return (kern_clock_settime(td, uap->clock_id, &ats));
398 }
399
400 static int allow_insane_settime = 0;
401 SYSCTL_INT(_debug, OID_AUTO, allow_insane_settime, CTLFLAG_RWTUN,
402 &allow_insane_settime, 0,
403 "do not perform possibly restrictive checks on settime(2) args");
404
405 int
406 kern_clock_settime(struct thread *td, clockid_t clock_id, struct timespec *ats)
407 {
408 struct timeval atv;
409 int error;
410
411 if ((error = priv_check(td, PRIV_CLOCK_SETTIME)) != 0)
412 return (error);
413 if (clock_id != CLOCK_REALTIME)
414 return (EINVAL);
415 if (!timespecvalid_interval(ats))
416 return (EINVAL);
417 if (!allow_insane_settime &&
418 (ats->tv_sec > 8000ULL * 365 * 24 * 60 * 60 ||
419 ats->tv_sec < utc_offset()))
420 return (EINVAL);
421 /* XXX Don't convert nsec->usec and back */
422 TIMESPEC_TO_TIMEVAL(&atv, ats);
423 error = settime(td, &atv);
424 return (error);
425 }
426
427 #ifndef _SYS_SYSPROTO_H_
428 struct clock_getres_args {
429 clockid_t clock_id;
430 struct timespec *tp;
431 };
432 #endif
433 int
434 sys_clock_getres(struct thread *td, struct clock_getres_args *uap)
435 {
436 struct timespec ts;
437 int error;
438
439 if (uap->tp == NULL)
440 return (0);
441
442 error = kern_clock_getres(td, uap->clock_id, &ts);
443 if (error == 0)
444 error = copyout(&ts, uap->tp, sizeof(ts));
445 return (error);
446 }
447
448 int
449 kern_clock_getres(struct thread *td, clockid_t clock_id, struct timespec *ts)
450 {
451
452 ts->tv_sec = 0;
453 switch (clock_id) {
454 case CLOCK_REALTIME:
455 case CLOCK_REALTIME_FAST:
456 case CLOCK_REALTIME_PRECISE:
457 case CLOCK_MONOTONIC:
458 case CLOCK_MONOTONIC_FAST:
459 case CLOCK_MONOTONIC_PRECISE:
460 case CLOCK_UPTIME:
461 case CLOCK_UPTIME_FAST:
462 case CLOCK_UPTIME_PRECISE:
463 /*
464 * Round up the result of the division cheaply by adding 1.
465 * Rounding up is especially important if rounding down
466 * would give 0. Perfect rounding is unimportant.
467 */
468 ts->tv_nsec = NS_PER_SEC / tc_getfrequency() + 1;
469 break;
470 case CLOCK_VIRTUAL:
471 case CLOCK_PROF:
472 /* Accurately round up here because we can do so cheaply. */
473 ts->tv_nsec = howmany(NS_PER_SEC, hz);
474 break;
475 case CLOCK_SECOND:
476 ts->tv_sec = 1;
477 ts->tv_nsec = 0;
478 break;
479 case CLOCK_THREAD_CPUTIME_ID:
480 case CLOCK_PROCESS_CPUTIME_ID:
481 cputime:
482 ts->tv_nsec = 1000000000 / cpu_tickrate() + 1;
483 break;
484 default:
485 if ((int)clock_id < 0)
486 goto cputime;
487 return (EINVAL);
488 }
489 return (0);
490 }
491
492 int
493 kern_nanosleep(struct thread *td, struct timespec *rqt, struct timespec *rmt)
494 {
495
496 return (kern_clock_nanosleep(td, CLOCK_REALTIME, TIMER_RELTIME, rqt,
497 rmt));
498 }
499
500 static uint8_t nanowait[MAXCPU];
501
502 int
503 kern_clock_nanosleep(struct thread *td, clockid_t clock_id, int flags,
504 const struct timespec *rqt, struct timespec *rmt)
505 {
506 struct timespec ts, now;
507 sbintime_t sbt, sbtt, prec, tmp;
508 time_t over;
509 int error;
510 bool is_abs_real;
511
512 if (rqt->tv_nsec < 0 || rqt->tv_nsec >= NS_PER_SEC)
513 return (EINVAL);
514 if ((flags & ~TIMER_ABSTIME) != 0)
515 return (EINVAL);
516 switch (clock_id) {
517 case CLOCK_REALTIME:
518 case CLOCK_REALTIME_PRECISE:
519 case CLOCK_REALTIME_FAST:
520 case CLOCK_SECOND:
521 is_abs_real = (flags & TIMER_ABSTIME) != 0;
522 break;
523 case CLOCK_MONOTONIC:
524 case CLOCK_MONOTONIC_PRECISE:
525 case CLOCK_MONOTONIC_FAST:
526 case CLOCK_UPTIME:
527 case CLOCK_UPTIME_PRECISE:
528 case CLOCK_UPTIME_FAST:
529 is_abs_real = false;
530 break;
531 case CLOCK_VIRTUAL:
532 case CLOCK_PROF:
533 case CLOCK_PROCESS_CPUTIME_ID:
534 return (ENOTSUP);
535 case CLOCK_THREAD_CPUTIME_ID:
536 default:
537 return (EINVAL);
538 }
539 do {
540 ts = *rqt;
541 if ((flags & TIMER_ABSTIME) != 0) {
542 if (is_abs_real)
543 td->td_rtcgen =
544 atomic_load_acq_int(&rtc_generation);
545 error = kern_clock_gettime(td, clock_id, &now);
546 KASSERT(error == 0, ("kern_clock_gettime: %d", error));
547 timespecsub(&ts, &now, &ts);
548 }
549 if (ts.tv_sec < 0 || (ts.tv_sec == 0 && ts.tv_nsec == 0)) {
550 error = EWOULDBLOCK;
551 break;
552 }
553 if (ts.tv_sec > INT32_MAX / 2) {
554 over = ts.tv_sec - INT32_MAX / 2;
555 ts.tv_sec -= over;
556 } else
557 over = 0;
558 tmp = tstosbt(ts);
559 prec = tmp;
560 prec >>= tc_precexp;
561 if (TIMESEL(&sbt, tmp))
562 sbt += tc_tick_sbt;
563 sbt += tmp;
564 error = tsleep_sbt(&nanowait[curcpu], PWAIT | PCATCH, "nanslp",
565 sbt, prec, C_ABSOLUTE);
566 } while (error == 0 && is_abs_real && td->td_rtcgen == 0);
567 td->td_rtcgen = 0;
568 if (error != EWOULDBLOCK) {
569 if (TIMESEL(&sbtt, tmp))
570 sbtt += tc_tick_sbt;
571 if (sbtt >= sbt)
572 return (0);
573 if (error == ERESTART)
574 error = EINTR;
575 if ((flags & TIMER_ABSTIME) == 0 && rmt != NULL) {
576 ts = sbttots(sbt - sbtt);
577 ts.tv_sec += over;
578 if (ts.tv_sec < 0)
579 timespecclear(&ts);
580 *rmt = ts;
581 }
582 return (error);
583 }
584 return (0);
585 }
586
587 #ifndef _SYS_SYSPROTO_H_
588 struct nanosleep_args {
589 struct timespec *rqtp;
590 struct timespec *rmtp;
591 };
592 #endif
593 /* ARGSUSED */
594 int
595 sys_nanosleep(struct thread *td, struct nanosleep_args *uap)
596 {
597
598 return (user_clock_nanosleep(td, CLOCK_REALTIME, TIMER_RELTIME,
599 uap->rqtp, uap->rmtp));
600 }
601
602 #ifndef _SYS_SYSPROTO_H_
603 struct clock_nanosleep_args {
604 clockid_t clock_id;
605 int flags;
606 struct timespec *rqtp;
607 struct timespec *rmtp;
608 };
609 #endif
610 /* ARGSUSED */
611 int
612 sys_clock_nanosleep(struct thread *td, struct clock_nanosleep_args *uap)
613 {
614 int error;
615
616 error = user_clock_nanosleep(td, uap->clock_id, uap->flags, uap->rqtp,
617 uap->rmtp);
618 return (kern_posix_error(td, error));
619 }
620
621 static int
622 user_clock_nanosleep(struct thread *td, clockid_t clock_id, int flags,
623 const struct timespec *ua_rqtp, struct timespec *ua_rmtp)
624 {
625 struct timespec rmt, rqt;
626 int error, error2;
627
628 error = copyin(ua_rqtp, &rqt, sizeof(rqt));
629 if (error)
630 return (error);
631 error = kern_clock_nanosleep(td, clock_id, flags, &rqt, &rmt);
632 if (error == EINTR && ua_rmtp != NULL && (flags & TIMER_ABSTIME) == 0) {
633 error2 = copyout(&rmt, ua_rmtp, sizeof(rmt));
634 if (error2 != 0)
635 error = error2;
636 }
637 return (error);
638 }
639
640 #ifndef _SYS_SYSPROTO_H_
641 struct gettimeofday_args {
642 struct timeval *tp;
643 struct timezone *tzp;
644 };
645 #endif
646 /* ARGSUSED */
647 int
648 sys_gettimeofday(struct thread *td, struct gettimeofday_args *uap)
649 {
650 struct timeval atv;
651 struct timezone rtz;
652 int error = 0;
653
654 if (uap->tp) {
655 microtime(&atv);
656 error = copyout(&atv, uap->tp, sizeof (atv));
657 }
658 if (error == 0 && uap->tzp != NULL) {
659 rtz.tz_minuteswest = 0;
660 rtz.tz_dsttime = 0;
661 error = copyout(&rtz, uap->tzp, sizeof (rtz));
662 }
663 return (error);
664 }
665
666 #ifndef _SYS_SYSPROTO_H_
667 struct settimeofday_args {
668 struct timeval *tv;
669 struct timezone *tzp;
670 };
671 #endif
672 /* ARGSUSED */
673 int
674 sys_settimeofday(struct thread *td, struct settimeofday_args *uap)
675 {
676 struct timeval atv, *tvp;
677 struct timezone atz, *tzp;
678 int error;
679
680 if (uap->tv) {
681 error = copyin(uap->tv, &atv, sizeof(atv));
682 if (error)
683 return (error);
684 tvp = &atv;
685 } else
686 tvp = NULL;
687 if (uap->tzp) {
688 error = copyin(uap->tzp, &atz, sizeof(atz));
689 if (error)
690 return (error);
691 tzp = &atz;
692 } else
693 tzp = NULL;
694 return (kern_settimeofday(td, tvp, tzp));
695 }
696
697 int
698 kern_settimeofday(struct thread *td, struct timeval *tv, struct timezone *tzp)
699 {
700 int error;
701
702 error = priv_check(td, PRIV_SETTIMEOFDAY);
703 if (error)
704 return (error);
705 /* Verify all parameters before changing time. */
706 if (tv) {
707 if (tv->tv_usec < 0 || tv->tv_usec >= 1000000 ||
708 tv->tv_sec < 0)
709 return (EINVAL);
710 error = settime(td, tv);
711 }
712 return (error);
713 }
714
715 /*
716 * Get value of an interval timer. The process virtual and profiling virtual
717 * time timers are kept in the p_stats area, since they can be swapped out.
718 * These are kept internally in the way they are specified externally: in
719 * time until they expire.
720 *
721 * The real time interval timer is kept in the process table slot for the
722 * process, and its value (it_value) is kept as an absolute time rather than
723 * as a delta, so that it is easy to keep periodic real-time signals from
724 * drifting.
725 *
726 * Virtual time timers are processed in the hardclock() routine of
727 * kern_clock.c. The real time timer is processed by a timeout routine,
728 * called from the softclock() routine. Since a callout may be delayed in
729 * real time due to interrupt processing in the system, it is possible for
730 * the real time timeout routine (realitexpire, given below), to be delayed
731 * in real time past when it is supposed to occur. It does not suffice,
732 * therefore, to reload the real timer .it_value from the real time timers
733 * .it_interval. Rather, we compute the next time in absolute time the timer
734 * should go off.
735 */
736 #ifndef _SYS_SYSPROTO_H_
737 struct getitimer_args {
738 u_int which;
739 struct itimerval *itv;
740 };
741 #endif
742 int
743 sys_getitimer(struct thread *td, struct getitimer_args *uap)
744 {
745 struct itimerval aitv;
746 int error;
747
748 error = kern_getitimer(td, uap->which, &aitv);
749 if (error != 0)
750 return (error);
751 return (copyout(&aitv, uap->itv, sizeof (struct itimerval)));
752 }
753
754 int
755 kern_getitimer(struct thread *td, u_int which, struct itimerval *aitv)
756 {
757 struct proc *p = td->td_proc;
758 struct timeval ctv;
759
760 if (which > ITIMER_PROF)
761 return (EINVAL);
762
763 if (which == ITIMER_REAL) {
764 /*
765 * Convert from absolute to relative time in .it_value
766 * part of real time timer. If time for real time timer
767 * has passed return 0, else return difference between
768 * current time and time for the timer to go off.
769 */
770 PROC_LOCK(p);
771 *aitv = p->p_realtimer;
772 PROC_UNLOCK(p);
773 if (timevalisset(&aitv->it_value)) {
774 microuptime(&ctv);
775 if (timevalcmp(&aitv->it_value, &ctv, <))
776 timevalclear(&aitv->it_value);
777 else
778 timevalsub(&aitv->it_value, &ctv);
779 }
780 } else {
781 PROC_ITIMLOCK(p);
782 *aitv = p->p_stats->p_timer[which];
783 PROC_ITIMUNLOCK(p);
784 }
785 #ifdef KTRACE
786 if (KTRPOINT(td, KTR_STRUCT))
787 ktritimerval(aitv);
788 #endif
789 return (0);
790 }
791
792 #ifndef _SYS_SYSPROTO_H_
793 struct setitimer_args {
794 u_int which;
795 struct itimerval *itv, *oitv;
796 };
797 #endif
798 int
799 sys_setitimer(struct thread *td, struct setitimer_args *uap)
800 {
801 struct itimerval aitv, oitv;
802 int error;
803
804 if (uap->itv == NULL) {
805 uap->itv = uap->oitv;
806 return (sys_getitimer(td, (struct getitimer_args *)uap));
807 }
808
809 if ((error = copyin(uap->itv, &aitv, sizeof(struct itimerval))))
810 return (error);
811 error = kern_setitimer(td, uap->which, &aitv, &oitv);
812 if (error != 0 || uap->oitv == NULL)
813 return (error);
814 return (copyout(&oitv, uap->oitv, sizeof(struct itimerval)));
815 }
816
817 int
818 kern_setitimer(struct thread *td, u_int which, struct itimerval *aitv,
819 struct itimerval *oitv)
820 {
821 struct proc *p = td->td_proc;
822 struct timeval ctv;
823 sbintime_t sbt, pr;
824
825 if (aitv == NULL)
826 return (kern_getitimer(td, which, oitv));
827
828 if (which > ITIMER_PROF)
829 return (EINVAL);
830 #ifdef KTRACE
831 if (KTRPOINT(td, KTR_STRUCT))
832 ktritimerval(aitv);
833 #endif
834 if (itimerfix(&aitv->it_value) ||
835 aitv->it_value.tv_sec > INT32_MAX / 2)
836 return (EINVAL);
837 if (!timevalisset(&aitv->it_value))
838 timevalclear(&aitv->it_interval);
839 else if (itimerfix(&aitv->it_interval) ||
840 aitv->it_interval.tv_sec > INT32_MAX / 2)
841 return (EINVAL);
842
843 if (which == ITIMER_REAL) {
844 PROC_LOCK(p);
845 if (timevalisset(&p->p_realtimer.it_value))
846 callout_stop(&p->p_itcallout);
847 microuptime(&ctv);
848 if (timevalisset(&aitv->it_value)) {
849 pr = tvtosbt(aitv->it_value) >> tc_precexp;
850 timevaladd(&aitv->it_value, &ctv);
851 sbt = tvtosbt(aitv->it_value);
852 callout_reset_sbt(&p->p_itcallout, sbt, pr,
853 realitexpire, p, C_ABSOLUTE);
854 }
855 *oitv = p->p_realtimer;
856 p->p_realtimer = *aitv;
857 PROC_UNLOCK(p);
858 if (timevalisset(&oitv->it_value)) {
859 if (timevalcmp(&oitv->it_value, &ctv, <))
860 timevalclear(&oitv->it_value);
861 else
862 timevalsub(&oitv->it_value, &ctv);
863 }
864 } else {
865 if (aitv->it_interval.tv_sec == 0 &&
866 aitv->it_interval.tv_usec != 0 &&
867 aitv->it_interval.tv_usec < tick)
868 aitv->it_interval.tv_usec = tick;
869 if (aitv->it_value.tv_sec == 0 &&
870 aitv->it_value.tv_usec != 0 &&
871 aitv->it_value.tv_usec < tick)
872 aitv->it_value.tv_usec = tick;
873 PROC_ITIMLOCK(p);
874 *oitv = p->p_stats->p_timer[which];
875 p->p_stats->p_timer[which] = *aitv;
876 PROC_ITIMUNLOCK(p);
877 }
878 #ifdef KTRACE
879 if (KTRPOINT(td, KTR_STRUCT))
880 ktritimerval(oitv);
881 #endif
882 return (0);
883 }
884
885 static void
886 realitexpire_reset_callout(struct proc *p, sbintime_t *isbtp)
887 {
888 sbintime_t prec;
889
890 prec = isbtp == NULL ? tvtosbt(p->p_realtimer.it_interval) : *isbtp;
891 callout_reset_sbt(&p->p_itcallout, tvtosbt(p->p_realtimer.it_value),
892 prec >> tc_precexp, realitexpire, p, C_ABSOLUTE);
893 }
894
895 void
896 itimer_proc_continue(struct proc *p)
897 {
898 struct timeval ctv;
899 struct itimer *it;
900 int id;
901
902 PROC_LOCK_ASSERT(p, MA_OWNED);
903
904 if ((p->p_flag2 & P2_ITSTOPPED) != 0) {
905 p->p_flag2 &= ~P2_ITSTOPPED;
906 microuptime(&ctv);
907 if (timevalcmp(&p->p_realtimer.it_value, &ctv, >=))
908 realitexpire(p);
909 else
910 realitexpire_reset_callout(p, NULL);
911 }
912
913 if (p->p_itimers != NULL) {
914 for (id = 3; id < TIMER_MAX; id++) {
915 it = p->p_itimers->its_timers[id];
916 if (it == NULL)
917 continue;
918 if ((it->it_flags & ITF_PSTOPPED) != 0) {
919 ITIMER_LOCK(it);
920 if ((it->it_flags & ITF_PSTOPPED) != 0) {
921 it->it_flags &= ~ITF_PSTOPPED;
922 if ((it->it_flags & ITF_DELETING) == 0)
923 realtimer_expire_l(it, true);
924 }
925 ITIMER_UNLOCK(it);
926 }
927 }
928 }
929 }
930
931 /*
932 * Real interval timer expired:
933 * send process whose timer expired an alarm signal.
934 * If time is not set up to reload, then just return.
935 * Else compute next time timer should go off which is > current time.
936 * This is where delay in processing this timeout causes multiple
937 * SIGALRM calls to be compressed into one.
938 * tvtohz() always adds 1 to allow for the time until the next clock
939 * interrupt being strictly less than 1 clock tick, but we don't want
940 * that here since we want to appear to be in sync with the clock
941 * interrupt even when we're delayed.
942 */
943 static void
944 realitexpire(void *arg)
945 {
946 struct proc *p;
947 struct timeval ctv;
948 sbintime_t isbt;
949
950 p = (struct proc *)arg;
951 kern_psignal(p, SIGALRM);
952 if (!timevalisset(&p->p_realtimer.it_interval)) {
953 timevalclear(&p->p_realtimer.it_value);
954 return;
955 }
956
957 isbt = tvtosbt(p->p_realtimer.it_interval);
958 if (isbt >= sbt_timethreshold)
959 getmicrouptime(&ctv);
960 else
961 microuptime(&ctv);
962 do {
963 timevaladd(&p->p_realtimer.it_value,
964 &p->p_realtimer.it_interval);
965 } while (timevalcmp(&p->p_realtimer.it_value, &ctv, <=));
966
967 if (P_SHOULDSTOP(p) || P_KILLED(p)) {
968 p->p_flag2 |= P2_ITSTOPPED;
969 return;
970 }
971
972 p->p_flag2 &= ~P2_ITSTOPPED;
973 realitexpire_reset_callout(p, &isbt);
974 }
975
976 /*
977 * Check that a proposed value to load into the .it_value or
978 * .it_interval part of an interval timer is acceptable, and
979 * fix it to have at least minimal value (i.e. if it is less
980 * than the resolution of the clock, round it up.)
981 */
982 int
983 itimerfix(struct timeval *tv)
984 {
985
986 if (tv->tv_sec < 0 || tv->tv_usec < 0 || tv->tv_usec >= 1000000)
987 return (EINVAL);
988 if (tv->tv_sec == 0 && tv->tv_usec != 0 &&
989 tv->tv_usec < (u_int)tick / 16)
990 tv->tv_usec = (u_int)tick / 16;
991 return (0);
992 }
993
994 /*
995 * Decrement an interval timer by a specified number
996 * of microseconds, which must be less than a second,
997 * i.e. < 1000000. If the timer expires, then reload
998 * it. In this case, carry over (usec - old value) to
999 * reduce the value reloaded into the timer so that
1000 * the timer does not drift. This routine assumes
1001 * that it is called in a context where the timers
1002 * on which it is operating cannot change in value.
1003 */
1004 int
1005 itimerdecr(struct itimerval *itp, int usec)
1006 {
1007
1008 if (itp->it_value.tv_usec < usec) {
1009 if (itp->it_value.tv_sec == 0) {
1010 /* expired, and already in next interval */
1011 usec -= itp->it_value.tv_usec;
1012 goto expire;
1013 }
1014 itp->it_value.tv_usec += 1000000;
1015 itp->it_value.tv_sec--;
1016 }
1017 itp->it_value.tv_usec -= usec;
1018 usec = 0;
1019 if (timevalisset(&itp->it_value))
1020 return (1);
1021 /* expired, exactly at end of interval */
1022 expire:
1023 if (timevalisset(&itp->it_interval)) {
1024 itp->it_value = itp->it_interval;
1025 itp->it_value.tv_usec -= usec;
1026 if (itp->it_value.tv_usec < 0) {
1027 itp->it_value.tv_usec += 1000000;
1028 itp->it_value.tv_sec--;
1029 }
1030 } else
1031 itp->it_value.tv_usec = 0; /* sec is already 0 */
1032 return (0);
1033 }
1034
1035 /*
1036 * Add and subtract routines for timevals.
1037 * N.B.: subtract routine doesn't deal with
1038 * results which are before the beginning,
1039 * it just gets very confused in this case.
1040 * Caveat emptor.
1041 */
1042 void
1043 timevaladd(struct timeval *t1, const struct timeval *t2)
1044 {
1045
1046 t1->tv_sec += t2->tv_sec;
1047 t1->tv_usec += t2->tv_usec;
1048 timevalfix(t1);
1049 }
1050
1051 void
1052 timevalsub(struct timeval *t1, const struct timeval *t2)
1053 {
1054
1055 t1->tv_sec -= t2->tv_sec;
1056 t1->tv_usec -= t2->tv_usec;
1057 timevalfix(t1);
1058 }
1059
1060 static void
1061 timevalfix(struct timeval *t1)
1062 {
1063
1064 if (t1->tv_usec < 0) {
1065 t1->tv_sec--;
1066 t1->tv_usec += 1000000;
1067 }
1068 if (t1->tv_usec >= 1000000) {
1069 t1->tv_sec++;
1070 t1->tv_usec -= 1000000;
1071 }
1072 }
1073
1074 /*
1075 * ratecheck(): simple time-based rate-limit checking.
1076 */
1077 int
1078 ratecheck(struct timeval *lasttime, const struct timeval *mininterval)
1079 {
1080 struct timeval tv, delta;
1081 int rv = 0;
1082
1083 getmicrouptime(&tv); /* NB: 10ms precision */
1084 delta = tv;
1085 timevalsub(&delta, lasttime);
1086
1087 /*
1088 * check for 0,0 is so that the message will be seen at least once,
1089 * even if interval is huge.
1090 */
1091 if (timevalcmp(&delta, mininterval, >=) ||
1092 (lasttime->tv_sec == 0 && lasttime->tv_usec == 0)) {
1093 *lasttime = tv;
1094 rv = 1;
1095 }
1096
1097 return (rv);
1098 }
1099
1100 /*
1101 * ppsratecheck(): packets (or events) per second limitation.
1102 *
1103 * Return 0 if the limit is to be enforced (e.g. the caller
1104 * should drop a packet because of the rate limitation).
1105 *
1106 * maxpps of 0 always causes zero to be returned. maxpps of -1
1107 * always causes 1 to be returned; this effectively defeats rate
1108 * limiting.
1109 *
1110 * Note that we maintain the struct timeval for compatibility
1111 * with other bsd systems. We reuse the storage and just monitor
1112 * clock ticks for minimal overhead.
1113 */
1114 int
1115 ppsratecheck(struct timeval *lasttime, int *curpps, int maxpps)
1116 {
1117 int now;
1118
1119 /*
1120 * Reset the last time and counter if this is the first call
1121 * or more than a second has passed since the last update of
1122 * lasttime.
1123 */
1124 now = ticks;
1125 if (lasttime->tv_sec == 0 || (u_int)(now - lasttime->tv_sec) >= hz) {
1126 lasttime->tv_sec = now;
1127 *curpps = 1;
1128 return (maxpps != 0);
1129 } else {
1130 (*curpps)++; /* NB: ignore potential overflow */
1131 return (maxpps < 0 || *curpps <= maxpps);
1132 }
1133 }
1134
1135 static void
1136 itimer_start(void)
1137 {
1138 static const struct kclock rt_clock = {
1139 .timer_create = realtimer_create,
1140 .timer_delete = realtimer_delete,
1141 .timer_settime = realtimer_settime,
1142 .timer_gettime = realtimer_gettime,
1143 };
1144
1145 itimer_zone = uma_zcreate("itimer", sizeof(struct itimer),
1146 NULL, NULL, itimer_init, itimer_fini, UMA_ALIGN_PTR, 0);
1147 register_posix_clock(CLOCK_REALTIME, &rt_clock);
1148 register_posix_clock(CLOCK_MONOTONIC, &rt_clock);
1149 p31b_setcfg(CTL_P1003_1B_TIMERS, 200112L);
1150 p31b_setcfg(CTL_P1003_1B_DELAYTIMER_MAX, INT_MAX);
1151 p31b_setcfg(CTL_P1003_1B_TIMER_MAX, TIMER_MAX);
1152 }
1153
1154 static int
1155 register_posix_clock(int clockid, const struct kclock *clk)
1156 {
1157 if ((unsigned)clockid >= MAX_CLOCKS) {
1158 printf("%s: invalid clockid\n", __func__);
1159 return (0);
1160 }
1161 posix_clocks[clockid] = *clk;
1162 return (1);
1163 }
1164
1165 static int
1166 itimer_init(void *mem, int size, int flags)
1167 {
1168 struct itimer *it;
1169
1170 it = (struct itimer *)mem;
1171 mtx_init(&it->it_mtx, "itimer lock", NULL, MTX_DEF);
1172 return (0);
1173 }
1174
1175 static void
1176 itimer_fini(void *mem, int size)
1177 {
1178 struct itimer *it;
1179
1180 it = (struct itimer *)mem;
1181 mtx_destroy(&it->it_mtx);
1182 }
1183
1184 static void
1185 itimer_enter(struct itimer *it)
1186 {
1187
1188 mtx_assert(&it->it_mtx, MA_OWNED);
1189 it->it_usecount++;
1190 }
1191
1192 static void
1193 itimer_leave(struct itimer *it)
1194 {
1195
1196 mtx_assert(&it->it_mtx, MA_OWNED);
1197 KASSERT(it->it_usecount > 0, ("invalid it_usecount"));
1198
1199 if (--it->it_usecount == 0 && (it->it_flags & ITF_WANTED) != 0)
1200 wakeup(it);
1201 }
1202
1203 #ifndef _SYS_SYSPROTO_H_
1204 struct ktimer_create_args {
1205 clockid_t clock_id;
1206 struct sigevent * evp;
1207 int * timerid;
1208 };
1209 #endif
1210 int
1211 sys_ktimer_create(struct thread *td, struct ktimer_create_args *uap)
1212 {
1213 struct sigevent *evp, ev;
1214 int id;
1215 int error;
1216
1217 if (uap->evp == NULL) {
1218 evp = NULL;
1219 } else {
1220 error = copyin(uap->evp, &ev, sizeof(ev));
1221 if (error != 0)
1222 return (error);
1223 evp = &ev;
1224 }
1225 error = kern_ktimer_create(td, uap->clock_id, evp, &id, -1);
1226 if (error == 0) {
1227 error = copyout(&id, uap->timerid, sizeof(int));
1228 if (error != 0)
1229 kern_ktimer_delete(td, id);
1230 }
1231 return (error);
1232 }
1233
1234 int
1235 kern_ktimer_create(struct thread *td, clockid_t clock_id, struct sigevent *evp,
1236 int *timerid, int preset_id)
1237 {
1238 struct proc *p = td->td_proc;
1239 struct itimer *it;
1240 int id;
1241 int error;
1242
1243 if (clock_id < 0 || clock_id >= MAX_CLOCKS)
1244 return (EINVAL);
1245
1246 if (posix_clocks[clock_id].timer_create == NULL)
1247 return (EINVAL);
1248
1249 if (evp != NULL) {
1250 if (evp->sigev_notify != SIGEV_NONE &&
1251 evp->sigev_notify != SIGEV_SIGNAL &&
1252 evp->sigev_notify != SIGEV_THREAD_ID)
1253 return (EINVAL);
1254 if ((evp->sigev_notify == SIGEV_SIGNAL ||
1255 evp->sigev_notify == SIGEV_THREAD_ID) &&
1256 !_SIG_VALID(evp->sigev_signo))
1257 return (EINVAL);
1258 }
1259
1260 if (p->p_itimers == NULL)
1261 itimers_alloc(p);
1262
1263 it = uma_zalloc(itimer_zone, M_WAITOK);
1264 it->it_flags = 0;
1265 it->it_usecount = 0;
1266 timespecclear(&it->it_time.it_value);
1267 timespecclear(&it->it_time.it_interval);
1268 it->it_overrun = 0;
1269 it->it_overrun_last = 0;
1270 it->it_clockid = clock_id;
1271 it->it_proc = p;
1272 ksiginfo_init(&it->it_ksi);
1273 it->it_ksi.ksi_flags |= KSI_INS | KSI_EXT;
1274 error = CLOCK_CALL(clock_id, timer_create, (it));
1275 if (error != 0)
1276 goto out;
1277
1278 PROC_LOCK(p);
1279 if (preset_id != -1) {
1280 KASSERT(preset_id >= 0 && preset_id < 3, ("invalid preset_id"));
1281 id = preset_id;
1282 if (p->p_itimers->its_timers[id] != NULL) {
1283 PROC_UNLOCK(p);
1284 error = 0;
1285 goto out;
1286 }
1287 } else {
1288 /*
1289 * Find a free timer slot, skipping those reserved
1290 * for setitimer().
1291 */
1292 for (id = 3; id < TIMER_MAX; id++)
1293 if (p->p_itimers->its_timers[id] == NULL)
1294 break;
1295 if (id == TIMER_MAX) {
1296 PROC_UNLOCK(p);
1297 error = EAGAIN;
1298 goto out;
1299 }
1300 }
1301 p->p_itimers->its_timers[id] = it;
1302 if (evp != NULL)
1303 it->it_sigev = *evp;
1304 else {
1305 it->it_sigev.sigev_notify = SIGEV_SIGNAL;
1306 switch (clock_id) {
1307 default:
1308 case CLOCK_REALTIME:
1309 it->it_sigev.sigev_signo = SIGALRM;
1310 break;
1311 case CLOCK_VIRTUAL:
1312 it->it_sigev.sigev_signo = SIGVTALRM;
1313 break;
1314 case CLOCK_PROF:
1315 it->it_sigev.sigev_signo = SIGPROF;
1316 break;
1317 }
1318 it->it_sigev.sigev_value.sival_int = id;
1319 }
1320
1321 if (it->it_sigev.sigev_notify == SIGEV_SIGNAL ||
1322 it->it_sigev.sigev_notify == SIGEV_THREAD_ID) {
1323 it->it_ksi.ksi_signo = it->it_sigev.sigev_signo;
1324 it->it_ksi.ksi_code = SI_TIMER;
1325 it->it_ksi.ksi_value = it->it_sigev.sigev_value;
1326 it->it_ksi.ksi_timerid = id;
1327 }
1328 PROC_UNLOCK(p);
1329 *timerid = id;
1330 return (0);
1331
1332 out:
1333 ITIMER_LOCK(it);
1334 CLOCK_CALL(it->it_clockid, timer_delete, (it));
1335 ITIMER_UNLOCK(it);
1336 uma_zfree(itimer_zone, it);
1337 return (error);
1338 }
1339
1340 #ifndef _SYS_SYSPROTO_H_
1341 struct ktimer_delete_args {
1342 int timerid;
1343 };
1344 #endif
1345 int
1346 sys_ktimer_delete(struct thread *td, struct ktimer_delete_args *uap)
1347 {
1348
1349 return (kern_ktimer_delete(td, uap->timerid));
1350 }
1351
1352 static struct itimer *
1353 itimer_find(struct proc *p, int timerid)
1354 {
1355 struct itimer *it;
1356
1357 PROC_LOCK_ASSERT(p, MA_OWNED);
1358 if ((p->p_itimers == NULL) ||
1359 (timerid < 0) || (timerid >= TIMER_MAX) ||
1360 (it = p->p_itimers->its_timers[timerid]) == NULL) {
1361 return (NULL);
1362 }
1363 ITIMER_LOCK(it);
1364 if ((it->it_flags & ITF_DELETING) != 0) {
1365 ITIMER_UNLOCK(it);
1366 it = NULL;
1367 }
1368 return (it);
1369 }
1370
1371 int
1372 kern_ktimer_delete(struct thread *td, int timerid)
1373 {
1374 struct proc *p = td->td_proc;
1375 struct itimer *it;
1376
1377 PROC_LOCK(p);
1378 it = itimer_find(p, timerid);
1379 if (it == NULL) {
1380 PROC_UNLOCK(p);
1381 return (EINVAL);
1382 }
1383 PROC_UNLOCK(p);
1384
1385 it->it_flags |= ITF_DELETING;
1386 while (it->it_usecount > 0) {
1387 it->it_flags |= ITF_WANTED;
1388 msleep(it, &it->it_mtx, PPAUSE, "itimer", 0);
1389 }
1390 it->it_flags &= ~ITF_WANTED;
1391 CLOCK_CALL(it->it_clockid, timer_delete, (it));
1392 ITIMER_UNLOCK(it);
1393
1394 PROC_LOCK(p);
1395 if (KSI_ONQ(&it->it_ksi))
1396 sigqueue_take(&it->it_ksi);
1397 p->p_itimers->its_timers[timerid] = NULL;
1398 PROC_UNLOCK(p);
1399 uma_zfree(itimer_zone, it);
1400 return (0);
1401 }
1402
1403 #ifndef _SYS_SYSPROTO_H_
1404 struct ktimer_settime_args {
1405 int timerid;
1406 int flags;
1407 const struct itimerspec * value;
1408 struct itimerspec * ovalue;
1409 };
1410 #endif
1411 int
1412 sys_ktimer_settime(struct thread *td, struct ktimer_settime_args *uap)
1413 {
1414 struct itimerspec val, oval, *ovalp;
1415 int error;
1416
1417 error = copyin(uap->value, &val, sizeof(val));
1418 if (error != 0)
1419 return (error);
1420 ovalp = uap->ovalue != NULL ? &oval : NULL;
1421 error = kern_ktimer_settime(td, uap->timerid, uap->flags, &val, ovalp);
1422 if (error == 0 && uap->ovalue != NULL)
1423 error = copyout(ovalp, uap->ovalue, sizeof(*ovalp));
1424 return (error);
1425 }
1426
1427 int
1428 kern_ktimer_settime(struct thread *td, int timer_id, int flags,
1429 struct itimerspec *val, struct itimerspec *oval)
1430 {
1431 struct proc *p;
1432 struct itimer *it;
1433 int error;
1434
1435 p = td->td_proc;
1436 PROC_LOCK(p);
1437 if (timer_id < 3 || (it = itimer_find(p, timer_id)) == NULL) {
1438 PROC_UNLOCK(p);
1439 error = EINVAL;
1440 } else {
1441 PROC_UNLOCK(p);
1442 itimer_enter(it);
1443 error = CLOCK_CALL(it->it_clockid, timer_settime, (it,
1444 flags, val, oval));
1445 itimer_leave(it);
1446 ITIMER_UNLOCK(it);
1447 }
1448 return (error);
1449 }
1450
1451 #ifndef _SYS_SYSPROTO_H_
1452 struct ktimer_gettime_args {
1453 int timerid;
1454 struct itimerspec * value;
1455 };
1456 #endif
1457 int
1458 sys_ktimer_gettime(struct thread *td, struct ktimer_gettime_args *uap)
1459 {
1460 struct itimerspec val;
1461 int error;
1462
1463 error = kern_ktimer_gettime(td, uap->timerid, &val);
1464 if (error == 0)
1465 error = copyout(&val, uap->value, sizeof(val));
1466 return (error);
1467 }
1468
1469 int
1470 kern_ktimer_gettime(struct thread *td, int timer_id, struct itimerspec *val)
1471 {
1472 struct proc *p;
1473 struct itimer *it;
1474 int error;
1475
1476 p = td->td_proc;
1477 PROC_LOCK(p);
1478 if (timer_id < 3 || (it = itimer_find(p, timer_id)) == NULL) {
1479 PROC_UNLOCK(p);
1480 error = EINVAL;
1481 } else {
1482 PROC_UNLOCK(p);
1483 itimer_enter(it);
1484 error = CLOCK_CALL(it->it_clockid, timer_gettime, (it, val));
1485 itimer_leave(it);
1486 ITIMER_UNLOCK(it);
1487 }
1488 return (error);
1489 }
1490
1491 #ifndef _SYS_SYSPROTO_H_
1492 struct timer_getoverrun_args {
1493 int timerid;
1494 };
1495 #endif
1496 int
1497 sys_ktimer_getoverrun(struct thread *td, struct ktimer_getoverrun_args *uap)
1498 {
1499
1500 return (kern_ktimer_getoverrun(td, uap->timerid));
1501 }
1502
1503 int
1504 kern_ktimer_getoverrun(struct thread *td, int timer_id)
1505 {
1506 struct proc *p = td->td_proc;
1507 struct itimer *it;
1508 int error ;
1509
1510 PROC_LOCK(p);
1511 if (timer_id < 3 ||
1512 (it = itimer_find(p, timer_id)) == NULL) {
1513 PROC_UNLOCK(p);
1514 error = EINVAL;
1515 } else {
1516 td->td_retval[0] = it->it_overrun_last;
1517 ITIMER_UNLOCK(it);
1518 PROC_UNLOCK(p);
1519 error = 0;
1520 }
1521 return (error);
1522 }
1523
1524 static int
1525 realtimer_create(struct itimer *it)
1526 {
1527 callout_init_mtx(&it->it_callout, &it->it_mtx, 0);
1528 return (0);
1529 }
1530
1531 static int
1532 realtimer_delete(struct itimer *it)
1533 {
1534 mtx_assert(&it->it_mtx, MA_OWNED);
1535
1536 /*
1537 * clear timer's value and interval to tell realtimer_expire
1538 * to not rearm the timer.
1539 */
1540 timespecclear(&it->it_time.it_value);
1541 timespecclear(&it->it_time.it_interval);
1542 ITIMER_UNLOCK(it);
1543 callout_drain(&it->it_callout);
1544 ITIMER_LOCK(it);
1545 return (0);
1546 }
1547
1548 static int
1549 realtimer_gettime(struct itimer *it, struct itimerspec *ovalue)
1550 {
1551 struct timespec cts;
1552
1553 mtx_assert(&it->it_mtx, MA_OWNED);
1554
1555 realtimer_clocktime(it->it_clockid, &cts);
1556 *ovalue = it->it_time;
1557 if (ovalue->it_value.tv_sec != 0 || ovalue->it_value.tv_nsec != 0) {
1558 timespecsub(&ovalue->it_value, &cts, &ovalue->it_value);
1559 if (ovalue->it_value.tv_sec < 0 ||
1560 (ovalue->it_value.tv_sec == 0 &&
1561 ovalue->it_value.tv_nsec == 0)) {
1562 ovalue->it_value.tv_sec = 0;
1563 ovalue->it_value.tv_nsec = 1;
1564 }
1565 }
1566 return (0);
1567 }
1568
1569 static int
1570 realtimer_settime(struct itimer *it, int flags, struct itimerspec *value,
1571 struct itimerspec *ovalue)
1572 {
1573 struct timespec cts, ts;
1574 struct timeval tv;
1575 struct itimerspec val;
1576
1577 mtx_assert(&it->it_mtx, MA_OWNED);
1578
1579 val = *value;
1580 if (itimespecfix(&val.it_value))
1581 return (EINVAL);
1582
1583 if (timespecisset(&val.it_value)) {
1584 if (itimespecfix(&val.it_interval))
1585 return (EINVAL);
1586 } else {
1587 timespecclear(&val.it_interval);
1588 }
1589
1590 if (ovalue != NULL)
1591 realtimer_gettime(it, ovalue);
1592
1593 it->it_time = val;
1594 if (timespecisset(&val.it_value)) {
1595 realtimer_clocktime(it->it_clockid, &cts);
1596 ts = val.it_value;
1597 if ((flags & TIMER_ABSTIME) == 0) {
1598 /* Convert to absolute time. */
1599 timespecadd(&it->it_time.it_value, &cts,
1600 &it->it_time.it_value);
1601 } else {
1602 timespecsub(&ts, &cts, &ts);
1603 /*
1604 * We don't care if ts is negative, tztohz will
1605 * fix it.
1606 */
1607 }
1608 TIMESPEC_TO_TIMEVAL(&tv, &ts);
1609 callout_reset(&it->it_callout, tvtohz(&tv), realtimer_expire,
1610 it);
1611 } else {
1612 callout_stop(&it->it_callout);
1613 }
1614
1615 return (0);
1616 }
1617
1618 static void
1619 realtimer_clocktime(clockid_t id, struct timespec *ts)
1620 {
1621 if (id == CLOCK_REALTIME)
1622 getnanotime(ts);
1623 else /* CLOCK_MONOTONIC */
1624 getnanouptime(ts);
1625 }
1626
1627 int
1628 itimer_accept(struct proc *p, int timerid, ksiginfo_t *ksi)
1629 {
1630 struct itimer *it;
1631
1632 PROC_LOCK_ASSERT(p, MA_OWNED);
1633 it = itimer_find(p, timerid);
1634 if (it != NULL) {
1635 ksi->ksi_overrun = it->it_overrun;
1636 it->it_overrun_last = it->it_overrun;
1637 it->it_overrun = 0;
1638 ITIMER_UNLOCK(it);
1639 return (0);
1640 }
1641 return (EINVAL);
1642 }
1643
1644 static int
1645 itimespecfix(struct timespec *ts)
1646 {
1647
1648 if (!timespecvalid_interval(ts))
1649 return (EINVAL);
1650 if ((UINT64_MAX - ts->tv_nsec) / NS_PER_SEC < ts->tv_sec)
1651 return (EINVAL);
1652 if (ts->tv_sec == 0 && ts->tv_nsec != 0 && ts->tv_nsec < tick * 1000)
1653 ts->tv_nsec = tick * 1000;
1654 return (0);
1655 }
1656
1657 #define timespectons(tsp) \
1658 ((uint64_t)(tsp)->tv_sec * NS_PER_SEC + (tsp)->tv_nsec)
1659 #define timespecfromns(ns) (struct timespec){ \
1660 .tv_sec = (ns) / NS_PER_SEC, \
1661 .tv_nsec = (ns) % NS_PER_SEC \
1662 }
1663
1664 static void
1665 realtimer_expire_l(struct itimer *it, bool proc_locked)
1666 {
1667 struct timespec cts, ts;
1668 struct timeval tv;
1669 struct proc *p;
1670 uint64_t interval, now, overruns, value;
1671
1672 realtimer_clocktime(it->it_clockid, &cts);
1673 /* Only fire if time is reached. */
1674 if (timespeccmp(&cts, &it->it_time.it_value, >=)) {
1675 if (timespecisset(&it->it_time.it_interval)) {
1676 timespecadd(&it->it_time.it_value,
1677 &it->it_time.it_interval,
1678 &it->it_time.it_value);
1679
1680 interval = timespectons(&it->it_time.it_interval);
1681 value = timespectons(&it->it_time.it_value);
1682 now = timespectons(&cts);
1683
1684 if (now >= value) {
1685 /*
1686 * We missed at least one period.
1687 */
1688 overruns = howmany(now - value + 1, interval);
1689 if (it->it_overrun + overruns >=
1690 it->it_overrun &&
1691 it->it_overrun + overruns <= INT_MAX) {
1692 it->it_overrun += (int)overruns;
1693 } else {
1694 it->it_overrun = INT_MAX;
1695 it->it_ksi.ksi_errno = ERANGE;
1696 }
1697 value =
1698 now + interval - (now - value) % interval;
1699 it->it_time.it_value = timespecfromns(value);
1700 }
1701 } else {
1702 /* single shot timer ? */
1703 timespecclear(&it->it_time.it_value);
1704 }
1705
1706 p = it->it_proc;
1707 if (timespecisset(&it->it_time.it_value)) {
1708 if (P_SHOULDSTOP(p) || P_KILLED(p)) {
1709 it->it_flags |= ITF_PSTOPPED;
1710 } else {
1711 timespecsub(&it->it_time.it_value, &cts, &ts);
1712 TIMESPEC_TO_TIMEVAL(&tv, &ts);
1713 callout_reset(&it->it_callout, tvtohz(&tv),
1714 realtimer_expire, it);
1715 }
1716 }
1717
1718 itimer_enter(it);
1719 ITIMER_UNLOCK(it);
1720 if (proc_locked)
1721 PROC_UNLOCK(p);
1722 itimer_fire(it);
1723 if (proc_locked)
1724 PROC_LOCK(p);
1725 ITIMER_LOCK(it);
1726 itimer_leave(it);
1727 } else if (timespecisset(&it->it_time.it_value)) {
1728 p = it->it_proc;
1729 if (P_SHOULDSTOP(p) || P_KILLED(p)) {
1730 it->it_flags |= ITF_PSTOPPED;
1731 } else {
1732 ts = it->it_time.it_value;
1733 timespecsub(&ts, &cts, &ts);
1734 TIMESPEC_TO_TIMEVAL(&tv, &ts);
1735 callout_reset(&it->it_callout, tvtohz(&tv),
1736 realtimer_expire, it);
1737 }
1738 }
1739 }
1740
1741 /* Timeout callback for realtime timer */
1742 static void
1743 realtimer_expire(void *arg)
1744 {
1745 realtimer_expire_l(arg, false);
1746 }
1747
1748 static void
1749 itimer_fire(struct itimer *it)
1750 {
1751 struct proc *p = it->it_proc;
1752 struct thread *td;
1753
1754 if (it->it_sigev.sigev_notify == SIGEV_SIGNAL ||
1755 it->it_sigev.sigev_notify == SIGEV_THREAD_ID) {
1756 if (sigev_findtd(p, &it->it_sigev, &td) != 0) {
1757 ITIMER_LOCK(it);
1758 timespecclear(&it->it_time.it_value);
1759 timespecclear(&it->it_time.it_interval);
1760 callout_stop(&it->it_callout);
1761 ITIMER_UNLOCK(it);
1762 return;
1763 }
1764 if (!KSI_ONQ(&it->it_ksi)) {
1765 it->it_ksi.ksi_errno = 0;
1766 ksiginfo_set_sigev(&it->it_ksi, &it->it_sigev);
1767 tdsendsignal(p, td, it->it_ksi.ksi_signo, &it->it_ksi);
1768 } else {
1769 if (it->it_overrun < INT_MAX)
1770 it->it_overrun++;
1771 else
1772 it->it_ksi.ksi_errno = ERANGE;
1773 }
1774 PROC_UNLOCK(p);
1775 }
1776 }
1777
1778 static void
1779 itimers_alloc(struct proc *p)
1780 {
1781 struct itimers *its;
1782
1783 its = malloc(sizeof (struct itimers), M_SUBPROC, M_WAITOK | M_ZERO);
1784 PROC_LOCK(p);
1785 if (p->p_itimers == NULL) {
1786 p->p_itimers = its;
1787 PROC_UNLOCK(p);
1788 }
1789 else {
1790 PROC_UNLOCK(p);
1791 free(its, M_SUBPROC);
1792 }
1793 }
1794
1795 /* Clean up timers when some process events are being triggered. */
1796 static void
1797 itimers_event_exit_exec(int start_idx, struct proc *p)
1798 {
1799 struct itimers *its;
1800 struct itimer *it;
1801 int i;
1802
1803 its = p->p_itimers;
1804 if (its == NULL)
1805 return;
1806
1807 for (i = start_idx; i < TIMER_MAX; ++i) {
1808 if ((it = its->its_timers[i]) != NULL)
1809 kern_ktimer_delete(curthread, i);
1810 }
1811 if (its->its_timers[0] == NULL && its->its_timers[1] == NULL &&
1812 its->its_timers[2] == NULL) {
1813 /* Synchronize with itimer_proc_continue(). */
1814 PROC_LOCK(p);
1815 p->p_itimers = NULL;
1816 PROC_UNLOCK(p);
1817 free(its, M_SUBPROC);
1818 }
1819 }
1820
1821 void
1822 itimers_exec(struct proc *p)
1823 {
1824 /*
1825 * According to susv3, XSI interval timers should be inherited
1826 * by new image.
1827 */
1828 itimers_event_exit_exec(3, p);
1829 }
1830
1831 void
1832 itimers_exit(struct proc *p)
1833 {
1834 itimers_event_exit_exec(0, p);
1835 }
Cache object: 74b1c4269f56326881bc9168013bbfc3
|