FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_time.c
1 /* $NetBSD: kern_time.c,v 1.155.4.3 2009/12/10 23:10:38 snj Exp $ */
2
3 /*-
4 * Copyright (c) 2000, 2004, 2005, 2007, 2008, 2009 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Christopher G. Demetriou, and by Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * Copyright (c) 1982, 1986, 1989, 1993
34 * The Regents of the University of California. All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 * 3. Neither the name of the University nor the names of its contributors
45 * may be used to endorse or promote products derived from this software
46 * without specific prior written permission.
47 *
48 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
52 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58 * SUCH DAMAGE.
59 *
60 * @(#)kern_time.c 8.4 (Berkeley) 5/26/95
61 */
62
63 #include <sys/cdefs.h>
64 __KERNEL_RCSID(0, "$NetBSD: kern_time.c,v 1.155.4.3 2009/12/10 23:10:38 snj Exp $");
65
66 #include <sys/param.h>
67 #include <sys/resourcevar.h>
68 #include <sys/kernel.h>
69 #include <sys/systm.h>
70 #include <sys/proc.h>
71 #include <sys/vnode.h>
72 #include <sys/signalvar.h>
73 #include <sys/syslog.h>
74 #include <sys/timetc.h>
75 #include <sys/timex.h>
76 #include <sys/kauth.h>
77 #include <sys/mount.h>
78 #include <sys/sa.h>
79 #include <sys/savar.h>
80 #include <sys/syscallargs.h>
81 #include <sys/cpu.h>
82
83 #include <uvm/uvm_extern.h>
84
85 #include "opt_sa.h"
86
87 static void timer_intr(void *);
88 static void itimerfire(struct ptimer *);
89 static void itimerfree(struct ptimers *, int);
90
91 kmutex_t timer_lock;
92
93 static void *timer_sih;
94 static TAILQ_HEAD(, ptimer) timer_queue;
95
96 POOL_INIT(ptimer_pool, sizeof(struct ptimer), 0, 0, 0, "ptimerpl",
97 &pool_allocator_nointr, IPL_NONE);
98 POOL_INIT(ptimers_pool, sizeof(struct ptimers), 0, 0, 0, "ptimerspl",
99 &pool_allocator_nointr, IPL_NONE);
100
101 /*
102 * Initialize timekeeping.
103 */
104 void
105 time_init(void)
106 {
107
108 /* nothing yet */
109 }
110
111 void
112 time_init2(void)
113 {
114
115 TAILQ_INIT(&timer_queue);
116 mutex_init(&timer_lock, MUTEX_DEFAULT, IPL_SCHED);
117 timer_sih = softint_establish(SOFTINT_CLOCK | SOFTINT_MPSAFE,
118 timer_intr, NULL);
119 }
120
121 /* Time of day and interval timer support.
122 *
123 * These routines provide the kernel entry points to get and set
124 * the time-of-day and per-process interval timers. Subroutines
125 * here provide support for adding and subtracting timeval structures
126 * and decrementing interval timers, optionally reloading the interval
127 * timers when they expire.
128 */
129
130 /* This function is used by clock_settime and settimeofday */
131 static int
132 settime1(struct proc *p, struct timespec *ts, bool check_kauth)
133 {
134 struct timeval delta, tv;
135 struct timeval now;
136 struct timespec ts1;
137 int s;
138
139 TIMESPEC_TO_TIMEVAL(&tv, ts);
140
141 /* WHAT DO WE DO ABOUT PENDING REAL-TIME TIMEOUTS??? */
142 s = splclock();
143 microtime(&now);
144 timersub(&tv, &now, &delta);
145
146 if (check_kauth && kauth_authorize_system(kauth_cred_get(),
147 KAUTH_SYSTEM_TIME, KAUTH_REQ_SYSTEM_TIME_SYSTEM, ts, &delta,
148 KAUTH_ARG(check_kauth ? false : true)) != 0) {
149 splx(s);
150 return (EPERM);
151 }
152
153 #ifdef notyet
154 if ((delta.tv_sec < 86400) && securelevel > 0) { /* XXX elad - notyet */
155 splx(s);
156 return (EPERM);
157 }
158 #endif
159
160 TIMEVAL_TO_TIMESPEC(&tv, &ts1);
161 tc_setclock(&ts1);
162
163 timeradd(&boottime, &delta, &boottime);
164
165 resettodr();
166 splx(s);
167
168 return (0);
169 }
170
171 int
172 settime(struct proc *p, struct timespec *ts)
173 {
174 return (settime1(p, ts, true));
175 }
176
177 /* ARGSUSED */
178 int
179 sys_clock_gettime(struct lwp *l, const struct sys_clock_gettime_args *uap,
180 register_t *retval)
181 {
182 /* {
183 syscallarg(clockid_t) clock_id;
184 syscallarg(struct timespec *) tp;
185 } */
186 clockid_t clock_id;
187 struct timespec ats;
188
189 clock_id = SCARG(uap, clock_id);
190 switch (clock_id) {
191 case CLOCK_REALTIME:
192 nanotime(&ats);
193 break;
194 case CLOCK_MONOTONIC:
195 nanouptime(&ats);
196 break;
197 default:
198 return (EINVAL);
199 }
200
201 return copyout(&ats, SCARG(uap, tp), sizeof(ats));
202 }
203
204 /* ARGSUSED */
205 int
206 sys_clock_settime(struct lwp *l, const struct sys_clock_settime_args *uap,
207 register_t *retval)
208 {
209 /* {
210 syscallarg(clockid_t) clock_id;
211 syscallarg(const struct timespec *) tp;
212 } */
213
214 return clock_settime1(l->l_proc, SCARG(uap, clock_id), SCARG(uap, tp),
215 true);
216 }
217
218
219 int
220 clock_settime1(struct proc *p, clockid_t clock_id, const struct timespec *tp,
221 bool check_kauth)
222 {
223 struct timespec ats;
224 int error;
225
226 if ((error = copyin(tp, &ats, sizeof(ats))) != 0)
227 return (error);
228
229 switch (clock_id) {
230 case CLOCK_REALTIME:
231 if ((error = settime1(p, &ats, check_kauth)) != 0)
232 return (error);
233 break;
234 case CLOCK_MONOTONIC:
235 return (EINVAL); /* read-only clock */
236 default:
237 return (EINVAL);
238 }
239
240 return 0;
241 }
242
243 int
244 sys_clock_getres(struct lwp *l, const struct sys_clock_getres_args *uap,
245 register_t *retval)
246 {
247 /* {
248 syscallarg(clockid_t) clock_id;
249 syscallarg(struct timespec *) tp;
250 } */
251 clockid_t clock_id;
252 struct timespec ts;
253 int error = 0;
254
255 clock_id = SCARG(uap, clock_id);
256 switch (clock_id) {
257 case CLOCK_REALTIME:
258 case CLOCK_MONOTONIC:
259 ts.tv_sec = 0;
260 if (tc_getfrequency() > 1000000000)
261 ts.tv_nsec = 1;
262 else
263 ts.tv_nsec = 1000000000 / tc_getfrequency();
264 break;
265 default:
266 return (EINVAL);
267 }
268
269 if (SCARG(uap, tp))
270 error = copyout(&ts, SCARG(uap, tp), sizeof(ts));
271
272 return error;
273 }
274
275 /* ARGSUSED */
276 int
277 sys_nanosleep(struct lwp *l, const struct sys_nanosleep_args *uap,
278 register_t *retval)
279 {
280 /* {
281 syscallarg(struct timespec *) rqtp;
282 syscallarg(struct timespec *) rmtp;
283 } */
284 struct timespec rmt, rqt;
285 int error, error1;
286
287 error = copyin(SCARG(uap, rqtp), &rqt, sizeof(struct timespec));
288 if (error)
289 return (error);
290
291 error = nanosleep1(l, &rqt, SCARG(uap, rmtp) ? &rmt : NULL);
292 if (SCARG(uap, rmtp) == NULL || (error != 0 && error != EINTR))
293 return error;
294
295 error1 = copyout(&rmt, SCARG(uap, rmtp), sizeof(rmt));
296 return error1 ? error1 : error;
297 }
298
299 int
300 nanosleep1(struct lwp *l, struct timespec *rqt, struct timespec *rmt)
301 {
302 struct timespec rmtstart;
303 int error, timo;
304
305 if (itimespecfix(rqt))
306 return (EINVAL);
307
308 timo = tstohz(rqt);
309 /*
310 * Avoid inadvertantly sleeping forever
311 */
312 if (timo == 0)
313 timo = 1;
314 getnanouptime(&rmtstart);
315 again:
316 error = kpause("nanoslp", true, timo, NULL);
317 if (rmt != NULL || error == 0) {
318 struct timespec rmtend;
319 struct timespec t0;
320 struct timespec *t;
321
322 getnanouptime(&rmtend);
323 t = (rmt != NULL) ? rmt : &t0;
324 timespecsub(&rmtend, &rmtstart, t);
325 timespecsub(rqt, t, t);
326 if (t->tv_sec < 0)
327 timespecclear(t);
328 if (error == 0) {
329 timo = tstohz(t);
330 if (timo > 0)
331 goto again;
332 }
333 }
334
335 if (error == ERESTART)
336 error = EINTR;
337 if (error == EWOULDBLOCK)
338 error = 0;
339
340 return error;
341 }
342
343 /* ARGSUSED */
344 int
345 sys_gettimeofday(struct lwp *l, const struct sys_gettimeofday_args *uap,
346 register_t *retval)
347 {
348 /* {
349 syscallarg(struct timeval *) tp;
350 syscallarg(void *) tzp; really "struct timezone *";
351 } */
352 struct timeval atv;
353 int error = 0;
354 struct timezone tzfake;
355
356 if (SCARG(uap, tp)) {
357 microtime(&atv);
358 error = copyout(&atv, SCARG(uap, tp), sizeof(atv));
359 if (error)
360 return (error);
361 }
362 if (SCARG(uap, tzp)) {
363 /*
364 * NetBSD has no kernel notion of time zone, so we just
365 * fake up a timezone struct and return it if demanded.
366 */
367 tzfake.tz_minuteswest = 0;
368 tzfake.tz_dsttime = 0;
369 error = copyout(&tzfake, SCARG(uap, tzp), sizeof(tzfake));
370 }
371 return (error);
372 }
373
374 /* ARGSUSED */
375 int
376 sys_settimeofday(struct lwp *l, const struct sys_settimeofday_args *uap,
377 register_t *retval)
378 {
379 /* {
380 syscallarg(const struct timeval *) tv;
381 syscallarg(const void *) tzp; really "const struct timezone *";
382 } */
383
384 return settimeofday1(SCARG(uap, tv), true, SCARG(uap, tzp), l, true);
385 }
386
387 int
388 settimeofday1(const struct timeval *utv, bool userspace,
389 const void *utzp, struct lwp *l, bool check_kauth)
390 {
391 struct timeval atv;
392 struct timespec ts;
393 int error;
394
395 /* Verify all parameters before changing time. */
396
397 /*
398 * NetBSD has no kernel notion of time zone, and only an
399 * obsolete program would try to set it, so we log a warning.
400 */
401 if (utzp)
402 log(LOG_WARNING, "pid %d attempted to set the "
403 "(obsolete) kernel time zone\n", l->l_proc->p_pid);
404
405 if (utv == NULL)
406 return 0;
407
408 if (userspace) {
409 if ((error = copyin(utv, &atv, sizeof(atv))) != 0)
410 return error;
411 utv = &atv;
412 }
413
414 TIMEVAL_TO_TIMESPEC(utv, &ts);
415 return settime1(l->l_proc, &ts, check_kauth);
416 }
417
418 int time_adjusted; /* set if an adjustment is made */
419
420 /* ARGSUSED */
421 int
422 sys_adjtime(struct lwp *l, const struct sys_adjtime_args *uap,
423 register_t *retval)
424 {
425 /* {
426 syscallarg(const struct timeval *) delta;
427 syscallarg(struct timeval *) olddelta;
428 } */
429 int error;
430
431 if ((error = kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_TIME,
432 KAUTH_REQ_SYSTEM_TIME_ADJTIME, NULL, NULL, NULL)) != 0)
433 return (error);
434
435 return adjtime1(SCARG(uap, delta), SCARG(uap, olddelta), l->l_proc);
436 }
437
438 int
439 adjtime1(const struct timeval *delta, struct timeval *olddelta, struct proc *p)
440 {
441 struct timeval atv;
442 int error = 0;
443
444 extern int64_t time_adjtime; /* in kern_ntptime.c */
445
446 if (olddelta) {
447 mutex_spin_enter(&timecounter_lock);
448 atv.tv_sec = time_adjtime / 1000000;
449 atv.tv_usec = time_adjtime % 1000000;
450 mutex_spin_exit(&timecounter_lock);
451 if (atv.tv_usec < 0) {
452 atv.tv_usec += 1000000;
453 atv.tv_sec--;
454 }
455 error = copyout(&atv, olddelta, sizeof(struct timeval));
456 if (error)
457 return (error);
458 }
459
460 if (delta) {
461 error = copyin(delta, &atv, sizeof(struct timeval));
462 if (error)
463 return (error);
464
465 mutex_spin_enter(&timecounter_lock);
466 time_adjtime = (int64_t)atv.tv_sec * 1000000 +
467 atv.tv_usec;
468 if (time_adjtime) {
469 /* We need to save the system time during shutdown */
470 time_adjusted |= 1;
471 }
472 mutex_spin_exit(&timecounter_lock);
473 }
474
475 return error;
476 }
477
478 /*
479 * Interval timer support. Both the BSD getitimer() family and the POSIX
480 * timer_*() family of routines are supported.
481 *
482 * All timers are kept in an array pointed to by p_timers, which is
483 * allocated on demand - many processes don't use timers at all. The
484 * first three elements in this array are reserved for the BSD timers:
485 * element 0 is ITIMER_REAL, element 1 is ITIMER_VIRTUAL, and element
486 * 2 is ITIMER_PROF. The rest may be allocated by the timer_create()
487 * syscall.
488 *
489 * Realtime timers are kept in the ptimer structure as an absolute
490 * time; virtual time timers are kept as a linked list of deltas.
491 * Virtual time timers are processed in the hardclock() routine of
492 * kern_clock.c. The real time timer is processed by a callout
493 * routine, called from the softclock() routine. Since a callout may
494 * be delayed in real time due to interrupt processing in the system,
495 * it is possible for the real time timeout routine (realtimeexpire,
496 * given below), to be delayed in real time past when it is supposed
497 * to occur. It does not suffice, therefore, to reload the real timer
498 * .it_value from the real time timers .it_interval. Rather, we
499 * compute the next time in absolute time the timer should go off. */
500
501 /* Allocate a POSIX realtime timer. */
502 int
503 sys_timer_create(struct lwp *l, const struct sys_timer_create_args *uap,
504 register_t *retval)
505 {
506 /* {
507 syscallarg(clockid_t) clock_id;
508 syscallarg(struct sigevent *) evp;
509 syscallarg(timer_t *) timerid;
510 } */
511
512 return timer_create1(SCARG(uap, timerid), SCARG(uap, clock_id),
513 SCARG(uap, evp), copyin, l);
514 }
515
516 int
517 timer_create1(timer_t *tid, clockid_t id, struct sigevent *evp,
518 copyin_t fetch_event, struct lwp *l)
519 {
520 int error;
521 timer_t timerid;
522 struct ptimers *pts;
523 struct ptimer *pt;
524 struct proc *p;
525
526 p = l->l_proc;
527
528 if (id < CLOCK_REALTIME || id > CLOCK_PROF)
529 return (EINVAL);
530
531 if ((pts = p->p_timers) == NULL)
532 pts = timers_alloc(p);
533
534 pt = pool_get(&ptimer_pool, PR_WAITOK);
535 if (evp != NULL) {
536 if (((error =
537 (*fetch_event)(evp, &pt->pt_ev, sizeof(pt->pt_ev))) != 0) ||
538 ((pt->pt_ev.sigev_notify < SIGEV_NONE) ||
539 (pt->pt_ev.sigev_notify > SIGEV_SA)) ||
540 (pt->pt_ev.sigev_notify == SIGEV_SIGNAL &&
541 (pt->pt_ev.sigev_signo <= 0 ||
542 pt->pt_ev.sigev_signo >= NSIG))) {
543 pool_put(&ptimer_pool, pt);
544 return (error ? error : EINVAL);
545 }
546 }
547
548 /* Find a free timer slot, skipping those reserved for setitimer(). */
549 mutex_spin_enter(&timer_lock);
550 for (timerid = 3; timerid < TIMER_MAX; timerid++)
551 if (pts->pts_timers[timerid] == NULL)
552 break;
553 if (timerid == TIMER_MAX) {
554 mutex_spin_exit(&timer_lock);
555 pool_put(&ptimer_pool, pt);
556 return EAGAIN;
557 }
558 if (evp == NULL) {
559 pt->pt_ev.sigev_notify = SIGEV_SIGNAL;
560 switch (id) {
561 case CLOCK_REALTIME:
562 pt->pt_ev.sigev_signo = SIGALRM;
563 break;
564 case CLOCK_VIRTUAL:
565 pt->pt_ev.sigev_signo = SIGVTALRM;
566 break;
567 case CLOCK_PROF:
568 pt->pt_ev.sigev_signo = SIGPROF;
569 break;
570 }
571 pt->pt_ev.sigev_value.sival_int = timerid;
572 }
573 pt->pt_info.ksi_signo = pt->pt_ev.sigev_signo;
574 pt->pt_info.ksi_errno = 0;
575 pt->pt_info.ksi_code = 0;
576 pt->pt_info.ksi_pid = p->p_pid;
577 pt->pt_info.ksi_uid = kauth_cred_getuid(l->l_cred);
578 pt->pt_info.ksi_value = pt->pt_ev.sigev_value;
579 pt->pt_type = id;
580 pt->pt_proc = p;
581 pt->pt_overruns = 0;
582 pt->pt_poverruns = 0;
583 pt->pt_entry = timerid;
584 pt->pt_queued = false;
585 timespecclear(&pt->pt_time.it_value);
586 if (id == CLOCK_REALTIME)
587 callout_init(&pt->pt_ch, 0);
588 else
589 pt->pt_active = 0;
590
591 pts->pts_timers[timerid] = pt;
592 mutex_spin_exit(&timer_lock);
593
594 return copyout(&timerid, tid, sizeof(timerid));
595 }
596
597 /* Delete a POSIX realtime timer */
598 int
599 sys_timer_delete(struct lwp *l, const struct sys_timer_delete_args *uap,
600 register_t *retval)
601 {
602 /* {
603 syscallarg(timer_t) timerid;
604 } */
605 struct proc *p = l->l_proc;
606 timer_t timerid;
607 struct ptimers *pts;
608 struct ptimer *pt, *ptn;
609
610 timerid = SCARG(uap, timerid);
611 pts = p->p_timers;
612
613 if (pts == NULL || timerid < 2 || timerid >= TIMER_MAX)
614 return (EINVAL);
615
616 mutex_spin_enter(&timer_lock);
617 if ((pt = pts->pts_timers[timerid]) == NULL) {
618 mutex_spin_exit(&timer_lock);
619 return (EINVAL);
620 }
621 if (pt->pt_type != CLOCK_REALTIME) {
622 if (pt->pt_active) {
623 ptn = LIST_NEXT(pt, pt_list);
624 LIST_REMOVE(pt, pt_list);
625 for ( ; ptn; ptn = LIST_NEXT(ptn, pt_list))
626 timespecadd(&pt->pt_time.it_value,
627 &ptn->pt_time.it_value,
628 &ptn->pt_time.it_value);
629 pt->pt_active = 0;
630 }
631 }
632 itimerfree(pts, timerid);
633
634 return (0);
635 }
636
637 /*
638 * Set up the given timer. The value in pt->pt_time.it_value is taken
639 * to be an absolute time for CLOCK_REALTIME timers and a relative
640 * time for virtual timers.
641 * Must be called at splclock().
642 */
643 void
644 timer_settime(struct ptimer *pt)
645 {
646 struct ptimer *ptn, *pptn;
647 struct ptlist *ptl;
648
649 KASSERT(mutex_owned(&timer_lock));
650
651 if (pt->pt_type == CLOCK_REALTIME) {
652 callout_stop(&pt->pt_ch);
653 if (timespecisset(&pt->pt_time.it_value)) {
654 /*
655 * Don't need to check tshzto() return value, here.
656 * callout_reset() does it for us.
657 */
658 callout_reset(&pt->pt_ch, tshzto(&pt->pt_time.it_value),
659 realtimerexpire, pt);
660 }
661 } else {
662 if (pt->pt_active) {
663 ptn = LIST_NEXT(pt, pt_list);
664 LIST_REMOVE(pt, pt_list);
665 for ( ; ptn; ptn = LIST_NEXT(ptn, pt_list))
666 timespecadd(&pt->pt_time.it_value,
667 &ptn->pt_time.it_value,
668 &ptn->pt_time.it_value);
669 }
670 if (timespecisset(&pt->pt_time.it_value)) {
671 if (pt->pt_type == CLOCK_VIRTUAL)
672 ptl = &pt->pt_proc->p_timers->pts_virtual;
673 else
674 ptl = &pt->pt_proc->p_timers->pts_prof;
675
676 for (ptn = LIST_FIRST(ptl), pptn = NULL;
677 ptn && timespeccmp(&pt->pt_time.it_value,
678 &ptn->pt_time.it_value, >);
679 pptn = ptn, ptn = LIST_NEXT(ptn, pt_list))
680 timespecsub(&pt->pt_time.it_value,
681 &ptn->pt_time.it_value,
682 &pt->pt_time.it_value);
683
684 if (pptn)
685 LIST_INSERT_AFTER(pptn, pt, pt_list);
686 else
687 LIST_INSERT_HEAD(ptl, pt, pt_list);
688
689 for ( ; ptn ; ptn = LIST_NEXT(ptn, pt_list))
690 timespecsub(&ptn->pt_time.it_value,
691 &pt->pt_time.it_value,
692 &ptn->pt_time.it_value);
693
694 pt->pt_active = 1;
695 } else
696 pt->pt_active = 0;
697 }
698 }
699
700 void
701 timer_gettime(struct ptimer *pt, struct itimerspec *aits)
702 {
703 struct timespec now;
704 struct ptimer *ptn;
705
706 KASSERT(mutex_owned(&timer_lock));
707
708 *aits = pt->pt_time;
709 if (pt->pt_type == CLOCK_REALTIME) {
710 /*
711 * Convert from absolute to relative time in .it_value
712 * part of real time timer. If time for real time
713 * timer has passed return 0, else return difference
714 * between current time and time for the timer to go
715 * off.
716 */
717 if (timespecisset(&aits->it_value)) {
718 getnanotime(&now);
719 if (timespeccmp(&aits->it_value, &now, <))
720 timespecclear(&aits->it_value);
721 else
722 timespecsub(&aits->it_value, &now,
723 &aits->it_value);
724 }
725 } else if (pt->pt_active) {
726 if (pt->pt_type == CLOCK_VIRTUAL)
727 ptn = LIST_FIRST(&pt->pt_proc->p_timers->pts_virtual);
728 else
729 ptn = LIST_FIRST(&pt->pt_proc->p_timers->pts_prof);
730 for ( ; ptn && ptn != pt; ptn = LIST_NEXT(ptn, pt_list))
731 timespecadd(&aits->it_value,
732 &ptn->pt_time.it_value, &aits->it_value);
733 KASSERT(ptn != NULL); /* pt should be findable on the list */
734 } else
735 timespecclear(&aits->it_value);
736 }
737
738
739
740 /* Set and arm a POSIX realtime timer */
741 int
742 sys_timer_settime(struct lwp *l, const struct sys_timer_settime_args *uap,
743 register_t *retval)
744 {
745 /* {
746 syscallarg(timer_t) timerid;
747 syscallarg(int) flags;
748 syscallarg(const struct itimerspec *) value;
749 syscallarg(struct itimerspec *) ovalue;
750 } */
751 int error;
752 struct itimerspec value, ovalue, *ovp = NULL;
753
754 if ((error = copyin(SCARG(uap, value), &value,
755 sizeof(struct itimerspec))) != 0)
756 return (error);
757
758 if (SCARG(uap, ovalue))
759 ovp = &ovalue;
760
761 if ((error = dotimer_settime(SCARG(uap, timerid), &value, ovp,
762 SCARG(uap, flags), l->l_proc)) != 0)
763 return error;
764
765 if (ovp)
766 return copyout(&ovalue, SCARG(uap, ovalue),
767 sizeof(struct itimerspec));
768 return 0;
769 }
770
771 int
772 dotimer_settime(int timerid, struct itimerspec *value,
773 struct itimerspec *ovalue, int flags, struct proc *p)
774 {
775 struct timespec now;
776 struct itimerspec val, oval;
777 struct ptimers *pts;
778 struct ptimer *pt;
779
780 pts = p->p_timers;
781
782 if (pts == NULL || timerid < 2 || timerid >= TIMER_MAX)
783 return EINVAL;
784 val = *value;
785 if (itimespecfix(&val.it_value) || itimespecfix(&val.it_interval))
786 return EINVAL;
787
788 mutex_spin_enter(&timer_lock);
789 if ((pt = pts->pts_timers[timerid]) == NULL) {
790 mutex_spin_exit(&timer_lock);
791 return EINVAL;
792 }
793
794 oval = pt->pt_time;
795 pt->pt_time = val;
796
797 /*
798 * If we've been passed a relative time for a realtime timer,
799 * convert it to absolute; if an absolute time for a virtual
800 * timer, convert it to relative and make sure we don't set it
801 * to zero, which would cancel the timer, or let it go
802 * negative, which would confuse the comparison tests.
803 */
804 if (timespecisset(&pt->pt_time.it_value)) {
805 if (pt->pt_type == CLOCK_REALTIME) {
806 if ((flags & TIMER_ABSTIME) == 0) {
807 getnanotime(&now);
808 timespecadd(&pt->pt_time.it_value, &now,
809 &pt->pt_time.it_value);
810 }
811 } else {
812 if ((flags & TIMER_ABSTIME) != 0) {
813 getnanotime(&now);
814 timespecsub(&pt->pt_time.it_value, &now,
815 &pt->pt_time.it_value);
816 if (!timespecisset(&pt->pt_time.it_value) ||
817 pt->pt_time.it_value.tv_sec < 0) {
818 pt->pt_time.it_value.tv_sec = 0;
819 pt->pt_time.it_value.tv_nsec = 1;
820 }
821 }
822 }
823 }
824
825 timer_settime(pt);
826 mutex_spin_exit(&timer_lock);
827
828 if (ovalue)
829 *ovalue = oval;
830
831 return (0);
832 }
833
834 /* Return the time remaining until a POSIX timer fires. */
835 int
836 sys_timer_gettime(struct lwp *l, const struct sys_timer_gettime_args *uap,
837 register_t *retval)
838 {
839 /* {
840 syscallarg(timer_t) timerid;
841 syscallarg(struct itimerspec *) value;
842 } */
843 struct itimerspec its;
844 int error;
845
846 if ((error = dotimer_gettime(SCARG(uap, timerid), l->l_proc,
847 &its)) != 0)
848 return error;
849
850 return copyout(&its, SCARG(uap, value), sizeof(its));
851 }
852
853 int
854 dotimer_gettime(int timerid, struct proc *p, struct itimerspec *its)
855 {
856 struct ptimer *pt;
857 struct ptimers *pts;
858
859 pts = p->p_timers;
860 if (pts == NULL || timerid < 2 || timerid >= TIMER_MAX)
861 return (EINVAL);
862 mutex_spin_enter(&timer_lock);
863 if ((pt = pts->pts_timers[timerid]) == NULL) {
864 mutex_spin_exit(&timer_lock);
865 return (EINVAL);
866 }
867 timer_gettime(pt, its);
868 mutex_spin_exit(&timer_lock);
869
870 return 0;
871 }
872
873 /*
874 * Return the count of the number of times a periodic timer expired
875 * while a notification was already pending. The counter is reset when
876 * a timer expires and a notification can be posted.
877 */
878 int
879 sys_timer_getoverrun(struct lwp *l, const struct sys_timer_getoverrun_args *uap,
880 register_t *retval)
881 {
882 /* {
883 syscallarg(timer_t) timerid;
884 } */
885 struct proc *p = l->l_proc;
886 struct ptimers *pts;
887 int timerid;
888 struct ptimer *pt;
889
890 timerid = SCARG(uap, timerid);
891
892 pts = p->p_timers;
893 if (pts == NULL || timerid < 2 || timerid >= TIMER_MAX)
894 return (EINVAL);
895 mutex_spin_enter(&timer_lock);
896 if ((pt = pts->pts_timers[timerid]) == NULL) {
897 mutex_spin_exit(&timer_lock);
898 return (EINVAL);
899 }
900 *retval = pt->pt_poverruns;
901 mutex_spin_exit(&timer_lock);
902
903 return (0);
904 }
905
906 #ifdef KERN_SA
907 /* Glue function that triggers an upcall; called from userret(). */
908 void
909 timerupcall(struct lwp *l)
910 {
911 struct ptimers *pt = l->l_proc->p_timers;
912 struct proc *p = l->l_proc;
913 unsigned int i, fired, done;
914
915 KDASSERT(l->l_proc->p_sa);
916 /* Bail out if we do not own the virtual processor */
917 if (l->l_savp->savp_lwp != l)
918 return ;
919
920 mutex_enter(p->p_lock);
921
922 fired = pt->pts_fired;
923 done = 0;
924 while ((i = ffs(fired)) != 0) {
925 siginfo_t *si;
926 int mask = 1 << --i;
927 int f;
928
929 f = ~l->l_pflag & LP_SA_NOBLOCK;
930 l->l_pflag |= LP_SA_NOBLOCK;
931 si = siginfo_alloc(PR_WAITOK);
932 si->_info = pt->pts_timers[i]->pt_info.ksi_info;
933 if (sa_upcall(l, SA_UPCALL_SIGEV | SA_UPCALL_DEFER, NULL, l,
934 sizeof(*si), si, siginfo_free) != 0) {
935 siginfo_free(si);
936 /* XXX What do we do here?? */
937 } else
938 done |= mask;
939 fired &= ~mask;
940 l->l_pflag ^= f;
941 }
942 pt->pts_fired &= ~done;
943 if (pt->pts_fired == 0)
944 l->l_proc->p_timerpend = 0;
945
946 mutex_exit(p->p_lock);
947 }
948 #endif /* KERN_SA */
949
950 /*
951 * Real interval timer expired:
952 * send process whose timer expired an alarm signal.
953 * If time is not set up to reload, then just return.
954 * Else compute next time timer should go off which is > current time.
955 * This is where delay in processing this timeout causes multiple
956 * SIGALRM calls to be compressed into one.
957 */
958 void
959 realtimerexpire(void *arg)
960 {
961 uint64_t last_val, next_val, interval, now_ms;
962 struct timespec now, next;
963 struct ptimer *pt;
964 int backwards;
965
966 pt = arg;
967
968 mutex_spin_enter(&timer_lock);
969 itimerfire(pt);
970
971 if (!timespecisset(&pt->pt_time.it_interval)) {
972 timespecclear(&pt->pt_time.it_value);
973 mutex_spin_exit(&timer_lock);
974 return;
975 }
976
977 getnanotime(&now);
978 backwards = (timespeccmp(&pt->pt_time.it_value, &now, >));
979 timespecadd(&pt->pt_time.it_value, &pt->pt_time.it_interval, &next);
980 /* Handle the easy case of non-overflown timers first. */
981 if (!backwards && timespeccmp(&next, &now, >)) {
982 pt->pt_time.it_value = next;
983 } else {
984 now_ms = timespec2ns(&now);
985 last_val = timespec2ns(&pt->pt_time.it_value);
986 interval = timespec2ns(&pt->pt_time.it_interval);
987
988 next_val = now_ms +
989 (now_ms - last_val + interval - 1) % interval;
990
991 if (backwards)
992 next_val += interval;
993 else
994 pt->pt_overruns += (now_ms - last_val) / interval;
995
996 pt->pt_time.it_value.tv_sec = next_val / 1000000000;
997 pt->pt_time.it_value.tv_nsec = next_val % 1000000000;
998 }
999
1000 /*
1001 * Don't need to check tshzto() return value, here.
1002 * callout_reset() does it for us.
1003 */
1004 callout_reset(&pt->pt_ch, tshzto(&pt->pt_time.it_value),
1005 realtimerexpire, pt);
1006 mutex_spin_exit(&timer_lock);
1007 }
1008
1009 /* BSD routine to get the value of an interval timer. */
1010 /* ARGSUSED */
1011 int
1012 sys_getitimer(struct lwp *l, const struct sys_getitimer_args *uap,
1013 register_t *retval)
1014 {
1015 /* {
1016 syscallarg(int) which;
1017 syscallarg(struct itimerval *) itv;
1018 } */
1019 struct proc *p = l->l_proc;
1020 struct itimerval aitv;
1021 int error;
1022
1023 error = dogetitimer(p, SCARG(uap, which), &aitv);
1024 if (error)
1025 return error;
1026 return (copyout(&aitv, SCARG(uap, itv), sizeof(struct itimerval)));
1027 }
1028
1029 int
1030 dogetitimer(struct proc *p, int which, struct itimerval *itvp)
1031 {
1032 struct ptimers *pts;
1033 struct ptimer *pt;
1034 struct itimerspec its;
1035
1036 if ((u_int)which > ITIMER_PROF)
1037 return (EINVAL);
1038
1039 mutex_spin_enter(&timer_lock);
1040 pts = p->p_timers;
1041 if (pts == NULL || (pt = pts->pts_timers[which]) == NULL) {
1042 timerclear(&itvp->it_value);
1043 timerclear(&itvp->it_interval);
1044 } else {
1045 timer_gettime(pt, &its);
1046 TIMESPEC_TO_TIMEVAL(&itvp->it_value, &its.it_value);
1047 TIMESPEC_TO_TIMEVAL(&itvp->it_interval, &its.it_interval);
1048 }
1049 mutex_spin_exit(&timer_lock);
1050
1051 return 0;
1052 }
1053
1054 /* BSD routine to set/arm an interval timer. */
1055 /* ARGSUSED */
1056 int
1057 sys_setitimer(struct lwp *l, const struct sys_setitimer_args *uap,
1058 register_t *retval)
1059 {
1060 /* {
1061 syscallarg(int) which;
1062 syscallarg(const struct itimerval *) itv;
1063 syscallarg(struct itimerval *) oitv;
1064 } */
1065 struct proc *p = l->l_proc;
1066 int which = SCARG(uap, which);
1067 struct sys_getitimer_args getargs;
1068 const struct itimerval *itvp;
1069 struct itimerval aitv;
1070 int error;
1071
1072 if ((u_int)which > ITIMER_PROF)
1073 return (EINVAL);
1074 itvp = SCARG(uap, itv);
1075 if (itvp &&
1076 (error = copyin(itvp, &aitv, sizeof(struct itimerval)) != 0))
1077 return (error);
1078 if (SCARG(uap, oitv) != NULL) {
1079 SCARG(&getargs, which) = which;
1080 SCARG(&getargs, itv) = SCARG(uap, oitv);
1081 if ((error = sys_getitimer(l, &getargs, retval)) != 0)
1082 return (error);
1083 }
1084 if (itvp == 0)
1085 return (0);
1086
1087 return dosetitimer(p, which, &aitv);
1088 }
1089
1090 int
1091 dosetitimer(struct proc *p, int which, struct itimerval *itvp)
1092 {
1093 struct timespec now;
1094 struct ptimers *pts;
1095 struct ptimer *pt, *spare;
1096
1097 if (itimerfix(&itvp->it_value) || itimerfix(&itvp->it_interval))
1098 return (EINVAL);
1099
1100 /*
1101 * Don't bother allocating data structures if the process just
1102 * wants to clear the timer.
1103 */
1104 spare = NULL;
1105 pts = p->p_timers;
1106 retry:
1107 if (!timerisset(&itvp->it_value) && (pts == NULL ||
1108 pts->pts_timers[which] == NULL))
1109 return (0);
1110 if (pts == NULL)
1111 pts = timers_alloc(p);
1112 mutex_spin_enter(&timer_lock);
1113 pt = pts->pts_timers[which];
1114 if (pt == NULL) {
1115 if (spare == NULL) {
1116 mutex_spin_exit(&timer_lock);
1117 spare = pool_get(&ptimer_pool, PR_WAITOK);
1118 goto retry;
1119 }
1120 pt = spare;
1121 spare = NULL;
1122 pt->pt_ev.sigev_notify = SIGEV_SIGNAL;
1123 pt->pt_ev.sigev_value.sival_int = which;
1124 pt->pt_overruns = 0;
1125 pt->pt_proc = p;
1126 pt->pt_type = which;
1127 pt->pt_entry = which;
1128 pt->pt_queued = false;
1129 if (pt->pt_type == CLOCK_REALTIME)
1130 callout_init(&pt->pt_ch, CALLOUT_MPSAFE);
1131 else
1132 pt->pt_active = 0;
1133
1134 switch (which) {
1135 case ITIMER_REAL:
1136 pt->pt_ev.sigev_signo = SIGALRM;
1137 break;
1138 case ITIMER_VIRTUAL:
1139 pt->pt_ev.sigev_signo = SIGVTALRM;
1140 break;
1141 case ITIMER_PROF:
1142 pt->pt_ev.sigev_signo = SIGPROF;
1143 break;
1144 }
1145 pts->pts_timers[which] = pt;
1146 }
1147
1148 TIMEVAL_TO_TIMESPEC(&itvp->it_value, &pt->pt_time.it_value);
1149 TIMEVAL_TO_TIMESPEC(&itvp->it_interval, &pt->pt_time.it_interval);
1150
1151 if ((which == ITIMER_REAL) && timespecisset(&pt->pt_time.it_value)) {
1152 /* Convert to absolute time */
1153 /* XXX need to wrap in splclock for timecounters case? */
1154 getnanotime(&now);
1155 timespecadd(&pt->pt_time.it_value, &now, &pt->pt_time.it_value);
1156 }
1157 timer_settime(pt);
1158 mutex_spin_exit(&timer_lock);
1159 if (spare != NULL)
1160 pool_put(&ptimer_pool, spare);
1161
1162 return (0);
1163 }
1164
1165 /* Utility routines to manage the array of pointers to timers. */
1166 struct ptimers *
1167 timers_alloc(struct proc *p)
1168 {
1169 struct ptimers *pts;
1170 int i;
1171
1172 pts = pool_get(&ptimers_pool, PR_WAITOK);
1173 LIST_INIT(&pts->pts_virtual);
1174 LIST_INIT(&pts->pts_prof);
1175 for (i = 0; i < TIMER_MAX; i++)
1176 pts->pts_timers[i] = NULL;
1177 pts->pts_fired = 0;
1178 mutex_spin_enter(&timer_lock);
1179 if (p->p_timers == NULL) {
1180 p->p_timers = pts;
1181 mutex_spin_exit(&timer_lock);
1182 return pts;
1183 }
1184 mutex_spin_exit(&timer_lock);
1185 pool_put(&ptimers_pool, pts);
1186 return p->p_timers;
1187 }
1188
1189 /*
1190 * Clean up the per-process timers. If "which" is set to TIMERS_ALL,
1191 * then clean up all timers and free all the data structures. If
1192 * "which" is set to TIMERS_POSIX, only clean up the timers allocated
1193 * by timer_create(), not the BSD setitimer() timers, and only free the
1194 * structure if none of those remain.
1195 */
1196 void
1197 timers_free(struct proc *p, int which)
1198 {
1199 struct ptimers *pts;
1200 struct ptimer *ptn;
1201 struct timespec ts;
1202 int i;
1203
1204 if (p->p_timers == NULL)
1205 return;
1206
1207 pts = p->p_timers;
1208 mutex_spin_enter(&timer_lock);
1209 if (which == TIMERS_ALL) {
1210 p->p_timers = NULL;
1211 i = 0;
1212 } else {
1213 timespecclear(&ts);
1214 for (ptn = LIST_FIRST(&pts->pts_virtual);
1215 ptn && ptn != pts->pts_timers[ITIMER_VIRTUAL];
1216 ptn = LIST_NEXT(ptn, pt_list)) {
1217 KASSERT(ptn->pt_type != CLOCK_REALTIME);
1218 timespecadd(&ts, &ptn->pt_time.it_value, &ts);
1219 }
1220 LIST_FIRST(&pts->pts_virtual) = NULL;
1221 if (ptn) {
1222 KASSERT(ptn->pt_type != CLOCK_REALTIME);
1223 timespecadd(&ts, &ptn->pt_time.it_value,
1224 &ptn->pt_time.it_value);
1225 LIST_INSERT_HEAD(&pts->pts_virtual, ptn, pt_list);
1226 }
1227 timespecclear(&ts);
1228 for (ptn = LIST_FIRST(&pts->pts_prof);
1229 ptn && ptn != pts->pts_timers[ITIMER_PROF];
1230 ptn = LIST_NEXT(ptn, pt_list)) {
1231 KASSERT(ptn->pt_type != CLOCK_REALTIME);
1232 timespecadd(&ts, &ptn->pt_time.it_value, &ts);
1233 }
1234 LIST_FIRST(&pts->pts_prof) = NULL;
1235 if (ptn) {
1236 KASSERT(ptn->pt_type != CLOCK_REALTIME);
1237 timespecadd(&ts, &ptn->pt_time.it_value,
1238 &ptn->pt_time.it_value);
1239 LIST_INSERT_HEAD(&pts->pts_prof, ptn, pt_list);
1240 }
1241 i = 3;
1242 }
1243 for ( ; i < TIMER_MAX; i++) {
1244 if (pts->pts_timers[i] != NULL) {
1245 itimerfree(pts, i);
1246 mutex_spin_enter(&timer_lock);
1247 }
1248 }
1249 if (pts->pts_timers[0] == NULL && pts->pts_timers[1] == NULL &&
1250 pts->pts_timers[2] == NULL) {
1251 p->p_timers = NULL;
1252 mutex_spin_exit(&timer_lock);
1253 pool_put(&ptimers_pool, pts);
1254 } else
1255 mutex_spin_exit(&timer_lock);
1256 }
1257
1258 static void
1259 itimerfree(struct ptimers *pts, int index)
1260 {
1261 struct ptimer *pt;
1262
1263 KASSERT(mutex_owned(&timer_lock));
1264
1265 pt = pts->pts_timers[index];
1266 pts->pts_timers[index] = NULL;
1267 if (pt->pt_type == CLOCK_REALTIME)
1268 callout_halt(&pt->pt_ch, &timer_lock);
1269 else if (pt->pt_queued)
1270 TAILQ_REMOVE(&timer_queue, pt, pt_chain);
1271 mutex_spin_exit(&timer_lock);
1272 if (pt->pt_type == CLOCK_REALTIME)
1273 callout_destroy(&pt->pt_ch);
1274 pool_put(&ptimer_pool, pt);
1275 }
1276
1277 /*
1278 * Decrement an interval timer by a specified number
1279 * of nanoseconds, which must be less than a second,
1280 * i.e. < 1000000000. If the timer expires, then reload
1281 * it. In this case, carry over (nsec - old value) to
1282 * reduce the value reloaded into the timer so that
1283 * the timer does not drift. This routine assumes
1284 * that it is called in a context where the timers
1285 * on which it is operating cannot change in value.
1286 */
1287 static int
1288 itimerdecr(struct ptimer *pt, int nsec)
1289 {
1290 struct itimerspec *itp;
1291
1292 KASSERT(mutex_owned(&timer_lock));
1293
1294 itp = &pt->pt_time;
1295 if (itp->it_value.tv_nsec < nsec) {
1296 if (itp->it_value.tv_sec == 0) {
1297 /* expired, and already in next interval */
1298 nsec -= itp->it_value.tv_nsec;
1299 goto expire;
1300 }
1301 itp->it_value.tv_nsec += 1000000000;
1302 itp->it_value.tv_sec--;
1303 }
1304 itp->it_value.tv_nsec -= nsec;
1305 nsec = 0;
1306 if (timespecisset(&itp->it_value))
1307 return (1);
1308 /* expired, exactly at end of interval */
1309 expire:
1310 if (timespecisset(&itp->it_interval)) {
1311 itp->it_value = itp->it_interval;
1312 itp->it_value.tv_nsec -= nsec;
1313 if (itp->it_value.tv_nsec < 0) {
1314 itp->it_value.tv_nsec += 1000000000;
1315 itp->it_value.tv_sec--;
1316 }
1317 timer_settime(pt);
1318 } else
1319 itp->it_value.tv_nsec = 0; /* sec is already 0 */
1320 return (0);
1321 }
1322
1323 static void
1324 itimerfire(struct ptimer *pt)
1325 {
1326
1327 KASSERT(mutex_owned(&timer_lock));
1328
1329 /*
1330 * XXX Can overrun, but we don't do signal queueing yet, anyway.
1331 * XXX Relying on the clock interrupt is stupid.
1332 */
1333 if ((pt->pt_ev.sigev_notify == SIGEV_SA && pt->pt_proc->p_sa == NULL) ||
1334 (pt->pt_ev.sigev_notify != SIGEV_SIGNAL &&
1335 pt->pt_ev.sigev_notify != SIGEV_SA) || pt->pt_queued)
1336 return;
1337 TAILQ_INSERT_TAIL(&timer_queue, pt, pt_chain);
1338 pt->pt_queued = true;
1339 softint_schedule(timer_sih);
1340 }
1341
1342 void
1343 timer_tick(lwp_t *l, bool user)
1344 {
1345 struct ptimers *pts;
1346 struct ptimer *pt;
1347 proc_t *p;
1348
1349 p = l->l_proc;
1350 if (p->p_timers == NULL)
1351 return;
1352
1353 mutex_spin_enter(&timer_lock);
1354 if ((pts = l->l_proc->p_timers) != NULL) {
1355 /*
1356 * Run current process's virtual and profile time, as needed.
1357 */
1358 if (user && (pt = LIST_FIRST(&pts->pts_virtual)) != NULL)
1359 if (itimerdecr(pt, tick * 1000) == 0)
1360 itimerfire(pt);
1361 if ((pt = LIST_FIRST(&pts->pts_prof)) != NULL)
1362 if (itimerdecr(pt, tick * 1000) == 0)
1363 itimerfire(pt);
1364 }
1365 mutex_spin_exit(&timer_lock);
1366 }
1367
1368 #ifdef KERN_SA
1369 /*
1370 * timer_sa_intr:
1371 *
1372 * SIGEV_SA handling for timer_intr(). We are called (and return)
1373 * with the timer lock held. We know that the process had SA enabled
1374 * when this timer was enqueued. As timer_intr() is a soft interrupt
1375 * handler, SA should still be enabled by the time we get here.
1376 */
1377 static void
1378 timer_sa_intr(struct ptimer *pt, proc_t *p)
1379 {
1380 unsigned int i;
1381 struct sadata *sa;
1382 struct sadata_vp *vp;
1383
1384 /* Cause the process to generate an upcall when it returns. */
1385 if (!p->p_timerpend) {
1386 /*
1387 * XXX stop signals can be processed inside tsleep,
1388 * which can be inside sa_yield's inner loop, which
1389 * makes testing for sa_idle alone insuffucent to
1390 * determine if we really should call setrunnable.
1391 */
1392 pt->pt_poverruns = pt->pt_overruns;
1393 pt->pt_overruns = 0;
1394 i = 1 << pt->pt_entry;
1395 p->p_timers->pts_fired = i;
1396 p->p_timerpend = 1;
1397
1398 sa = p->p_sa;
1399 mutex_enter(&sa->sa_mutex);
1400 SLIST_FOREACH(vp, &sa->sa_vps, savp_next) {
1401 struct lwp *vp_lwp = vp->savp_lwp;
1402 lwp_lock(vp_lwp);
1403 lwp_need_userret(vp_lwp);
1404 if (vp_lwp->l_flag & LW_SA_IDLE) {
1405 vp_lwp->l_flag &= ~LW_SA_IDLE;
1406 lwp_unsleep(vp_lwp, true);
1407 break;
1408 }
1409 lwp_unlock(vp_lwp);
1410 }
1411 mutex_exit(&sa->sa_mutex);
1412 } else {
1413 i = 1 << pt->pt_entry;
1414 if ((p->p_timers->pts_fired & i) == 0) {
1415 pt->pt_poverruns = pt->pt_overruns;
1416 pt->pt_overruns = 0;
1417 p->p_timers->pts_fired |= i;
1418 } else
1419 pt->pt_overruns++;
1420 }
1421 }
1422 #endif /* KERN_SA */
1423
1424 static void
1425 timer_intr(void *cookie)
1426 {
1427 ksiginfo_t ksi;
1428 struct ptimer *pt;
1429 proc_t *p;
1430
1431 mutex_enter(proc_lock);
1432 mutex_spin_enter(&timer_lock);
1433 while ((pt = TAILQ_FIRST(&timer_queue)) != NULL) {
1434 TAILQ_REMOVE(&timer_queue, pt, pt_chain);
1435 KASSERT(pt->pt_queued);
1436 pt->pt_queued = false;
1437
1438 if (pt->pt_proc->p_timers == NULL) {
1439 /* Process is dying. */
1440 continue;
1441 }
1442 p = pt->pt_proc;
1443 #ifdef KERN_SA
1444 if (pt->pt_ev.sigev_notify == SIGEV_SA) {
1445 timer_sa_intr(pt, p);
1446 continue;
1447 }
1448 #endif /* KERN_SA */
1449 if (pt->pt_ev.sigev_notify != SIGEV_SIGNAL)
1450 continue;
1451 if (sigismember(&p->p_sigpend.sp_set, pt->pt_ev.sigev_signo)) {
1452 pt->pt_overruns++;
1453 continue;
1454 }
1455
1456 KSI_INIT(&ksi);
1457 ksi.ksi_signo = pt->pt_ev.sigev_signo;
1458 ksi.ksi_code = SI_TIMER;
1459 ksi.ksi_value = pt->pt_ev.sigev_value;
1460 pt->pt_poverruns = pt->pt_overruns;
1461 pt->pt_overruns = 0;
1462 mutex_spin_exit(&timer_lock);
1463 kpsignal(p, &ksi, NULL);
1464 mutex_spin_enter(&timer_lock);
1465 }
1466 mutex_spin_exit(&timer_lock);
1467 mutex_exit(proc_lock);
1468 }
Cache object: 6a1d9004cbca0120f068aa96353b2f86
|