1 /*-
2 * Copyright (c) 2010-2012 Alexander Motin <mav@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer,
10 * without modification, immediately at the beginning of the file.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD: releng/9.1/sys/kern/kern_clocksource.c 234167 2012-04-12 06:29:02Z mav $");
29
30 /*
31 * Common routines to manage event timers hardware.
32 */
33
34 #include "opt_device_polling.h"
35 #include "opt_kdtrace.h"
36
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/bus.h>
40 #include <sys/lock.h>
41 #include <sys/kdb.h>
42 #include <sys/ktr.h>
43 #include <sys/mutex.h>
44 #include <sys/proc.h>
45 #include <sys/kernel.h>
46 #include <sys/sched.h>
47 #include <sys/smp.h>
48 #include <sys/sysctl.h>
49 #include <sys/timeet.h>
50 #include <sys/timetc.h>
51
52 #include <machine/atomic.h>
53 #include <machine/clock.h>
54 #include <machine/cpu.h>
55 #include <machine/smp.h>
56
57 #ifdef KDTRACE_HOOKS
58 #include <sys/dtrace_bsd.h>
59 cyclic_clock_func_t cyclic_clock_func = NULL;
60 #endif
61
62 int cpu_can_deep_sleep = 0; /* C3 state is available. */
63 int cpu_disable_deep_sleep = 0; /* Timer dies in C3. */
64
65 static void setuptimer(void);
66 static void loadtimer(struct bintime *now, int first);
67 static int doconfigtimer(void);
68 static void configtimer(int start);
69 static int round_freq(struct eventtimer *et, int freq);
70
71 static void getnextcpuevent(struct bintime *event, int idle);
72 static void getnextevent(struct bintime *event);
73 static int handleevents(struct bintime *now, int fake);
74 #ifdef SMP
75 static void cpu_new_callout(int cpu, int ticks);
76 #endif
77
78 static struct mtx et_hw_mtx;
79
80 #define ET_HW_LOCK(state) \
81 { \
82 if (timer->et_flags & ET_FLAGS_PERCPU) \
83 mtx_lock_spin(&(state)->et_hw_mtx); \
84 else \
85 mtx_lock_spin(&et_hw_mtx); \
86 }
87
88 #define ET_HW_UNLOCK(state) \
89 { \
90 if (timer->et_flags & ET_FLAGS_PERCPU) \
91 mtx_unlock_spin(&(state)->et_hw_mtx); \
92 else \
93 mtx_unlock_spin(&et_hw_mtx); \
94 }
95
96 static struct eventtimer *timer = NULL;
97 static struct bintime timerperiod; /* Timer period for periodic mode. */
98 static struct bintime hardperiod; /* hardclock() events period. */
99 static struct bintime statperiod; /* statclock() events period. */
100 static struct bintime profperiod; /* profclock() events period. */
101 static struct bintime nexttick; /* Next global timer tick time. */
102 static struct bintime nexthard; /* Next global hardlock() event. */
103 static u_int busy = 0; /* Reconfiguration is in progress. */
104 static int profiling = 0; /* Profiling events enabled. */
105
106 static char timername[32]; /* Wanted timer. */
107 TUNABLE_STR("kern.eventtimer.timer", timername, sizeof(timername));
108
109 static int singlemul = 0; /* Multiplier for periodic mode. */
110 TUNABLE_INT("kern.eventtimer.singlemul", &singlemul);
111 SYSCTL_INT(_kern_eventtimer, OID_AUTO, singlemul, CTLFLAG_RW, &singlemul,
112 0, "Multiplier for periodic mode");
113
114 static u_int idletick = 0; /* Run periodic events when idle. */
115 TUNABLE_INT("kern.eventtimer.idletick", &idletick);
116 SYSCTL_UINT(_kern_eventtimer, OID_AUTO, idletick, CTLFLAG_RW, &idletick,
117 0, "Run periodic events when idle");
118
119 static u_int activetick = 1; /* Run all periodic events when active. */
120 TUNABLE_INT("kern.eventtimer.activetick", &activetick);
121 SYSCTL_UINT(_kern_eventtimer, OID_AUTO, activetick, CTLFLAG_RW, &activetick,
122 0, "Run all periodic events when active");
123
124 static int periodic = 0; /* Periodic or one-shot mode. */
125 static int want_periodic = 0; /* What mode to prefer. */
126 TUNABLE_INT("kern.eventtimer.periodic", &want_periodic);
127
128 struct pcpu_state {
129 struct mtx et_hw_mtx; /* Per-CPU timer mutex. */
130 u_int action; /* Reconfiguration requests. */
131 u_int handle; /* Immediate handle resuests. */
132 struct bintime now; /* Last tick time. */
133 struct bintime nextevent; /* Next scheduled event on this CPU. */
134 struct bintime nexttick; /* Next timer tick time. */
135 struct bintime nexthard; /* Next hardlock() event. */
136 struct bintime nextstat; /* Next statclock() event. */
137 struct bintime nextprof; /* Next profclock() event. */
138 #ifdef KDTRACE_HOOKS
139 struct bintime nextcyc; /* Next OpenSolaris cyclics event. */
140 #endif
141 int ipi; /* This CPU needs IPI. */
142 int idle; /* This CPU is in idle mode. */
143 };
144
145 static DPCPU_DEFINE(struct pcpu_state, timerstate);
146
147 #define FREQ2BT(freq, bt) \
148 { \
149 (bt)->sec = 0; \
150 (bt)->frac = ((uint64_t)0x8000000000000000 / (freq)) << 1; \
151 }
152 #define BT2FREQ(bt) \
153 (((uint64_t)0x8000000000000000 + ((bt)->frac >> 2)) / \
154 ((bt)->frac >> 1))
155
156 /*
157 * Timer broadcast IPI handler.
158 */
159 int
160 hardclockintr(void)
161 {
162 struct bintime now;
163 struct pcpu_state *state;
164 int done;
165
166 if (doconfigtimer() || busy)
167 return (FILTER_HANDLED);
168 state = DPCPU_PTR(timerstate);
169 now = state->now;
170 CTR4(KTR_SPARE2, "ipi at %d: now %d.%08x%08x",
171 curcpu, now.sec, (unsigned int)(now.frac >> 32),
172 (unsigned int)(now.frac & 0xffffffff));
173 done = handleevents(&now, 0);
174 return (done ? FILTER_HANDLED : FILTER_STRAY);
175 }
176
177 /*
178 * Handle all events for specified time on this CPU
179 */
180 static int
181 handleevents(struct bintime *now, int fake)
182 {
183 struct bintime t;
184 struct trapframe *frame;
185 struct pcpu_state *state;
186 uintfptr_t pc;
187 int usermode;
188 int done, runs;
189
190 CTR4(KTR_SPARE2, "handle at %d: now %d.%08x%08x",
191 curcpu, now->sec, (unsigned int)(now->frac >> 32),
192 (unsigned int)(now->frac & 0xffffffff));
193 done = 0;
194 if (fake) {
195 frame = NULL;
196 usermode = 0;
197 pc = 0;
198 } else {
199 frame = curthread->td_intr_frame;
200 usermode = TRAPF_USERMODE(frame);
201 pc = TRAPF_PC(frame);
202 }
203
204 state = DPCPU_PTR(timerstate);
205
206 runs = 0;
207 while (bintime_cmp(now, &state->nexthard, >=)) {
208 bintime_add(&state->nexthard, &hardperiod);
209 runs++;
210 }
211 if ((timer->et_flags & ET_FLAGS_PERCPU) == 0 &&
212 bintime_cmp(&state->nexthard, &nexthard, >))
213 nexthard = state->nexthard;
214 if (runs && fake < 2) {
215 hardclock_cnt(runs, usermode);
216 done = 1;
217 }
218 runs = 0;
219 while (bintime_cmp(now, &state->nextstat, >=)) {
220 bintime_add(&state->nextstat, &statperiod);
221 runs++;
222 }
223 if (runs && fake < 2) {
224 statclock_cnt(runs, usermode);
225 done = 1;
226 }
227 if (profiling) {
228 runs = 0;
229 while (bintime_cmp(now, &state->nextprof, >=)) {
230 bintime_add(&state->nextprof, &profperiod);
231 runs++;
232 }
233 if (runs && !fake) {
234 profclock_cnt(runs, usermode, pc);
235 done = 1;
236 }
237 } else
238 state->nextprof = state->nextstat;
239
240 #ifdef KDTRACE_HOOKS
241 if (fake == 0 && cyclic_clock_func != NULL &&
242 state->nextcyc.sec != -1 &&
243 bintime_cmp(now, &state->nextcyc, >=)) {
244 state->nextcyc.sec = -1;
245 (*cyclic_clock_func)(frame);
246 }
247 #endif
248
249 getnextcpuevent(&t, 0);
250 if (fake == 2) {
251 state->nextevent = t;
252 return (done);
253 }
254 ET_HW_LOCK(state);
255 if (!busy) {
256 state->idle = 0;
257 state->nextevent = t;
258 loadtimer(now, 0);
259 }
260 ET_HW_UNLOCK(state);
261 return (done);
262 }
263
264 /*
265 * Schedule binuptime of the next event on current CPU.
266 */
267 static void
268 getnextcpuevent(struct bintime *event, int idle)
269 {
270 struct bintime tmp;
271 struct pcpu_state *state;
272 int skip;
273
274 state = DPCPU_PTR(timerstate);
275 /* Handle hardclock() events. */
276 *event = state->nexthard;
277 if (idle || (!activetick && !profiling &&
278 (timer->et_flags & ET_FLAGS_PERCPU) == 0)) {
279 skip = idle ? 4 : (stathz / 2);
280 if (curcpu == CPU_FIRST() && tc_min_ticktock_freq > skip)
281 skip = tc_min_ticktock_freq;
282 skip = callout_tickstofirst(hz / skip) - 1;
283 CTR2(KTR_SPARE2, "skip at %d: %d", curcpu, skip);
284 tmp = hardperiod;
285 bintime_mul(&tmp, skip);
286 bintime_add(event, &tmp);
287 }
288 if (!idle) { /* If CPU is active - handle other types of events. */
289 if (bintime_cmp(event, &state->nextstat, >))
290 *event = state->nextstat;
291 if (profiling && bintime_cmp(event, &state->nextprof, >))
292 *event = state->nextprof;
293 }
294 #ifdef KDTRACE_HOOKS
295 if (state->nextcyc.sec != -1 && bintime_cmp(event, &state->nextcyc, >))
296 *event = state->nextcyc;
297 #endif
298 }
299
300 /*
301 * Schedule binuptime of the next event on all CPUs.
302 */
303 static void
304 getnextevent(struct bintime *event)
305 {
306 struct pcpu_state *state;
307 #ifdef SMP
308 int cpu;
309 #endif
310 int c, nonidle;
311
312 state = DPCPU_PTR(timerstate);
313 *event = state->nextevent;
314 c = curcpu;
315 nonidle = !state->idle;
316 if ((timer->et_flags & ET_FLAGS_PERCPU) == 0) {
317 #ifdef SMP
318 CPU_FOREACH(cpu) {
319 if (curcpu == cpu)
320 continue;
321 state = DPCPU_ID_PTR(cpu, timerstate);
322 nonidle += !state->idle;
323 if (bintime_cmp(event, &state->nextevent, >)) {
324 *event = state->nextevent;
325 c = cpu;
326 }
327 }
328 #endif
329 if (nonidle != 0 && bintime_cmp(event, &nexthard, >))
330 *event = nexthard;
331 }
332 CTR5(KTR_SPARE2, "next at %d: next %d.%08x%08x by %d",
333 curcpu, event->sec, (unsigned int)(event->frac >> 32),
334 (unsigned int)(event->frac & 0xffffffff), c);
335 }
336
337 /* Hardware timer callback function. */
338 static void
339 timercb(struct eventtimer *et, void *arg)
340 {
341 struct bintime now;
342 struct bintime *next;
343 struct pcpu_state *state;
344 #ifdef SMP
345 int cpu, bcast;
346 #endif
347
348 /* Do not touch anything if somebody reconfiguring timers. */
349 if (busy)
350 return;
351 /* Update present and next tick times. */
352 state = DPCPU_PTR(timerstate);
353 if (et->et_flags & ET_FLAGS_PERCPU) {
354 next = &state->nexttick;
355 } else
356 next = &nexttick;
357 if (periodic) {
358 now = *next; /* Ex-next tick time becomes present time. */
359 bintime_add(next, &timerperiod); /* Next tick in 1 period. */
360 } else {
361 binuptime(&now); /* Get present time from hardware. */
362 next->sec = -1; /* Next tick is not scheduled yet. */
363 }
364 state->now = now;
365 CTR4(KTR_SPARE2, "intr at %d: now %d.%08x%08x",
366 curcpu, now.sec, (unsigned int)(now.frac >> 32),
367 (unsigned int)(now.frac & 0xffffffff));
368
369 #ifdef SMP
370 /* Prepare broadcasting to other CPUs for non-per-CPU timers. */
371 bcast = 0;
372 if ((et->et_flags & ET_FLAGS_PERCPU) == 0 && smp_started) {
373 CPU_FOREACH(cpu) {
374 state = DPCPU_ID_PTR(cpu, timerstate);
375 ET_HW_LOCK(state);
376 state->now = now;
377 if (bintime_cmp(&now, &state->nextevent, >=)) {
378 state->nextevent.sec++;
379 if (curcpu != cpu) {
380 state->ipi = 1;
381 bcast = 1;
382 }
383 }
384 ET_HW_UNLOCK(state);
385 }
386 }
387 #endif
388
389 /* Handle events for this time on this CPU. */
390 handleevents(&now, 0);
391
392 #ifdef SMP
393 /* Broadcast interrupt to other CPUs for non-per-CPU timers. */
394 if (bcast) {
395 CPU_FOREACH(cpu) {
396 if (curcpu == cpu)
397 continue;
398 state = DPCPU_ID_PTR(cpu, timerstate);
399 if (state->ipi) {
400 state->ipi = 0;
401 ipi_cpu(cpu, IPI_HARDCLOCK);
402 }
403 }
404 }
405 #endif
406 }
407
408 /*
409 * Load new value into hardware timer.
410 */
411 static void
412 loadtimer(struct bintime *now, int start)
413 {
414 struct pcpu_state *state;
415 struct bintime new;
416 struct bintime *next;
417 uint64_t tmp;
418 int eq;
419
420 if (timer->et_flags & ET_FLAGS_PERCPU) {
421 state = DPCPU_PTR(timerstate);
422 next = &state->nexttick;
423 } else
424 next = &nexttick;
425 if (periodic) {
426 if (start) {
427 /*
428 * Try to start all periodic timers aligned
429 * to period to make events synchronous.
430 */
431 tmp = ((uint64_t)now->sec << 36) + (now->frac >> 28);
432 tmp = (tmp % (timerperiod.frac >> 28)) << 28;
433 new.sec = 0;
434 new.frac = timerperiod.frac - tmp;
435 if (new.frac < tmp) /* Left less then passed. */
436 bintime_add(&new, &timerperiod);
437 CTR5(KTR_SPARE2, "load p at %d: now %d.%08x first in %d.%08x",
438 curcpu, now->sec, (unsigned int)(now->frac >> 32),
439 new.sec, (unsigned int)(new.frac >> 32));
440 *next = new;
441 bintime_add(next, now);
442 et_start(timer, &new, &timerperiod);
443 }
444 } else {
445 getnextevent(&new);
446 eq = bintime_cmp(&new, next, ==);
447 CTR5(KTR_SPARE2, "load at %d: next %d.%08x%08x eq %d",
448 curcpu, new.sec, (unsigned int)(new.frac >> 32),
449 (unsigned int)(new.frac & 0xffffffff),
450 eq);
451 if (!eq) {
452 *next = new;
453 bintime_sub(&new, now);
454 et_start(timer, &new, NULL);
455 }
456 }
457 }
458
459 /*
460 * Prepare event timer parameters after configuration changes.
461 */
462 static void
463 setuptimer(void)
464 {
465 int freq;
466
467 if (periodic && (timer->et_flags & ET_FLAGS_PERIODIC) == 0)
468 periodic = 0;
469 else if (!periodic && (timer->et_flags & ET_FLAGS_ONESHOT) == 0)
470 periodic = 1;
471 singlemul = MIN(MAX(singlemul, 1), 20);
472 freq = hz * singlemul;
473 while (freq < (profiling ? profhz : stathz))
474 freq += hz;
475 freq = round_freq(timer, freq);
476 FREQ2BT(freq, &timerperiod);
477 }
478
479 /*
480 * Reconfigure specified per-CPU timer on other CPU. Called from IPI handler.
481 */
482 static int
483 doconfigtimer(void)
484 {
485 struct bintime now;
486 struct pcpu_state *state;
487
488 state = DPCPU_PTR(timerstate);
489 switch (atomic_load_acq_int(&state->action)) {
490 case 1:
491 binuptime(&now);
492 ET_HW_LOCK(state);
493 loadtimer(&now, 1);
494 ET_HW_UNLOCK(state);
495 state->handle = 0;
496 atomic_store_rel_int(&state->action, 0);
497 return (1);
498 case 2:
499 ET_HW_LOCK(state);
500 et_stop(timer);
501 ET_HW_UNLOCK(state);
502 state->handle = 0;
503 atomic_store_rel_int(&state->action, 0);
504 return (1);
505 }
506 if (atomic_readandclear_int(&state->handle) && !busy) {
507 binuptime(&now);
508 handleevents(&now, 0);
509 return (1);
510 }
511 return (0);
512 }
513
514 /*
515 * Reconfigure specified timer.
516 * For per-CPU timers use IPI to make other CPUs to reconfigure.
517 */
518 static void
519 configtimer(int start)
520 {
521 struct bintime now, next;
522 struct pcpu_state *state;
523 int cpu;
524
525 if (start) {
526 setuptimer();
527 binuptime(&now);
528 }
529 critical_enter();
530 ET_HW_LOCK(DPCPU_PTR(timerstate));
531 if (start) {
532 /* Initialize time machine parameters. */
533 next = now;
534 bintime_add(&next, &timerperiod);
535 if (periodic)
536 nexttick = next;
537 else
538 nexttick.sec = -1;
539 CPU_FOREACH(cpu) {
540 state = DPCPU_ID_PTR(cpu, timerstate);
541 state->now = now;
542 state->nextevent = next;
543 if (periodic)
544 state->nexttick = next;
545 else
546 state->nexttick.sec = -1;
547 state->nexthard = next;
548 state->nextstat = next;
549 state->nextprof = next;
550 hardclock_sync(cpu);
551 }
552 busy = 0;
553 /* Start global timer or per-CPU timer of this CPU. */
554 loadtimer(&now, 1);
555 } else {
556 busy = 1;
557 /* Stop global timer or per-CPU timer of this CPU. */
558 et_stop(timer);
559 }
560 ET_HW_UNLOCK(DPCPU_PTR(timerstate));
561 #ifdef SMP
562 /* If timer is global or there is no other CPUs yet - we are done. */
563 if ((timer->et_flags & ET_FLAGS_PERCPU) == 0 || !smp_started) {
564 critical_exit();
565 return;
566 }
567 /* Set reconfigure flags for other CPUs. */
568 CPU_FOREACH(cpu) {
569 state = DPCPU_ID_PTR(cpu, timerstate);
570 atomic_store_rel_int(&state->action,
571 (cpu == curcpu) ? 0 : ( start ? 1 : 2));
572 }
573 /* Broadcast reconfigure IPI. */
574 ipi_all_but_self(IPI_HARDCLOCK);
575 /* Wait for reconfiguration completed. */
576 restart:
577 cpu_spinwait();
578 CPU_FOREACH(cpu) {
579 if (cpu == curcpu)
580 continue;
581 state = DPCPU_ID_PTR(cpu, timerstate);
582 if (atomic_load_acq_int(&state->action))
583 goto restart;
584 }
585 #endif
586 critical_exit();
587 }
588
589 /*
590 * Calculate nearest frequency supported by hardware timer.
591 */
592 static int
593 round_freq(struct eventtimer *et, int freq)
594 {
595 uint64_t div;
596
597 if (et->et_frequency != 0) {
598 div = lmax((et->et_frequency + freq / 2) / freq, 1);
599 if (et->et_flags & ET_FLAGS_POW2DIV)
600 div = 1 << (flsl(div + div / 2) - 1);
601 freq = (et->et_frequency + div / 2) / div;
602 }
603 if (et->et_min_period.sec > 0)
604 freq = 0;
605 else if (et->et_min_period.frac != 0)
606 freq = min(freq, BT2FREQ(&et->et_min_period));
607 if (et->et_max_period.sec == 0 && et->et_max_period.frac != 0)
608 freq = max(freq, BT2FREQ(&et->et_max_period));
609 return (freq);
610 }
611
612 /*
613 * Configure and start event timers (BSP part).
614 */
615 void
616 cpu_initclocks_bsp(void)
617 {
618 struct pcpu_state *state;
619 int base, div, cpu;
620
621 mtx_init(&et_hw_mtx, "et_hw_mtx", NULL, MTX_SPIN);
622 CPU_FOREACH(cpu) {
623 state = DPCPU_ID_PTR(cpu, timerstate);
624 mtx_init(&state->et_hw_mtx, "et_hw_mtx", NULL, MTX_SPIN);
625 #ifdef KDTRACE_HOOKS
626 state->nextcyc.sec = -1;
627 #endif
628 }
629 #ifdef SMP
630 callout_new_inserted = cpu_new_callout;
631 #endif
632 periodic = want_periodic;
633 /* Grab requested timer or the best of present. */
634 if (timername[0])
635 timer = et_find(timername, 0, 0);
636 if (timer == NULL && periodic) {
637 timer = et_find(NULL,
638 ET_FLAGS_PERIODIC, ET_FLAGS_PERIODIC);
639 }
640 if (timer == NULL) {
641 timer = et_find(NULL,
642 ET_FLAGS_ONESHOT, ET_FLAGS_ONESHOT);
643 }
644 if (timer == NULL && !periodic) {
645 timer = et_find(NULL,
646 ET_FLAGS_PERIODIC, ET_FLAGS_PERIODIC);
647 }
648 if (timer == NULL)
649 panic("No usable event timer found!");
650 et_init(timer, timercb, NULL, NULL);
651
652 /* Adapt to timer capabilities. */
653 if (periodic && (timer->et_flags & ET_FLAGS_PERIODIC) == 0)
654 periodic = 0;
655 else if (!periodic && (timer->et_flags & ET_FLAGS_ONESHOT) == 0)
656 periodic = 1;
657 if (timer->et_flags & ET_FLAGS_C3STOP)
658 cpu_disable_deep_sleep++;
659
660 /*
661 * We honor the requested 'hz' value.
662 * We want to run stathz in the neighborhood of 128hz.
663 * We would like profhz to run as often as possible.
664 */
665 if (singlemul <= 0 || singlemul > 20) {
666 if (hz >= 1500 || (hz % 128) == 0)
667 singlemul = 1;
668 else if (hz >= 750)
669 singlemul = 2;
670 else
671 singlemul = 4;
672 }
673 if (periodic) {
674 base = round_freq(timer, hz * singlemul);
675 singlemul = max((base + hz / 2) / hz, 1);
676 hz = (base + singlemul / 2) / singlemul;
677 if (base <= 128)
678 stathz = base;
679 else {
680 div = base / 128;
681 if (div >= singlemul && (div % singlemul) == 0)
682 div++;
683 stathz = base / div;
684 }
685 profhz = stathz;
686 while ((profhz + stathz) <= 128 * 64)
687 profhz += stathz;
688 profhz = round_freq(timer, profhz);
689 } else {
690 hz = round_freq(timer, hz);
691 stathz = round_freq(timer, 127);
692 profhz = round_freq(timer, stathz * 64);
693 }
694 tick = 1000000 / hz;
695 FREQ2BT(hz, &hardperiod);
696 FREQ2BT(stathz, &statperiod);
697 FREQ2BT(profhz, &profperiod);
698 ET_LOCK();
699 configtimer(1);
700 ET_UNLOCK();
701 }
702
703 /*
704 * Start per-CPU event timers on APs.
705 */
706 void
707 cpu_initclocks_ap(void)
708 {
709 struct bintime now;
710 struct pcpu_state *state;
711
712 state = DPCPU_PTR(timerstate);
713 binuptime(&now);
714 ET_HW_LOCK(state);
715 if ((timer->et_flags & ET_FLAGS_PERCPU) == 0 && periodic) {
716 state->now = nexttick;
717 bintime_sub(&state->now, &timerperiod);
718 } else
719 state->now = now;
720 hardclock_sync(curcpu);
721 handleevents(&state->now, 2);
722 if (timer->et_flags & ET_FLAGS_PERCPU)
723 loadtimer(&now, 1);
724 ET_HW_UNLOCK(state);
725 }
726
727 /*
728 * Switch to profiling clock rates.
729 */
730 void
731 cpu_startprofclock(void)
732 {
733
734 ET_LOCK();
735 if (periodic) {
736 configtimer(0);
737 profiling = 1;
738 configtimer(1);
739 } else
740 profiling = 1;
741 ET_UNLOCK();
742 }
743
744 /*
745 * Switch to regular clock rates.
746 */
747 void
748 cpu_stopprofclock(void)
749 {
750
751 ET_LOCK();
752 if (periodic) {
753 configtimer(0);
754 profiling = 0;
755 configtimer(1);
756 } else
757 profiling = 0;
758 ET_UNLOCK();
759 }
760
761 /*
762 * Switch to idle mode (all ticks handled).
763 */
764 void
765 cpu_idleclock(void)
766 {
767 struct bintime now, t;
768 struct pcpu_state *state;
769
770 if (idletick || busy ||
771 (periodic && (timer->et_flags & ET_FLAGS_PERCPU))
772 #ifdef DEVICE_POLLING
773 || curcpu == CPU_FIRST()
774 #endif
775 )
776 return;
777 state = DPCPU_PTR(timerstate);
778 if (periodic)
779 now = state->now;
780 else
781 binuptime(&now);
782 CTR4(KTR_SPARE2, "idle at %d: now %d.%08x%08x",
783 curcpu, now.sec, (unsigned int)(now.frac >> 32),
784 (unsigned int)(now.frac & 0xffffffff));
785 getnextcpuevent(&t, 1);
786 ET_HW_LOCK(state);
787 state->idle = 1;
788 state->nextevent = t;
789 if (!periodic)
790 loadtimer(&now, 0);
791 ET_HW_UNLOCK(state);
792 }
793
794 /*
795 * Switch to active mode (skip empty ticks).
796 */
797 void
798 cpu_activeclock(void)
799 {
800 struct bintime now;
801 struct pcpu_state *state;
802 struct thread *td;
803
804 state = DPCPU_PTR(timerstate);
805 if (state->idle == 0 || busy)
806 return;
807 if (periodic)
808 now = state->now;
809 else
810 binuptime(&now);
811 CTR4(KTR_SPARE2, "active at %d: now %d.%08x%08x",
812 curcpu, now.sec, (unsigned int)(now.frac >> 32),
813 (unsigned int)(now.frac & 0xffffffff));
814 spinlock_enter();
815 td = curthread;
816 td->td_intr_nesting_level++;
817 handleevents(&now, 1);
818 td->td_intr_nesting_level--;
819 spinlock_exit();
820 }
821
822 #ifdef KDTRACE_HOOKS
823 void
824 clocksource_cyc_set(const struct bintime *t)
825 {
826 struct bintime now;
827 struct pcpu_state *state;
828
829 state = DPCPU_PTR(timerstate);
830 if (periodic)
831 now = state->now;
832 else
833 binuptime(&now);
834
835 CTR4(KTR_SPARE2, "set_cyc at %d: now %d.%08x%08x",
836 curcpu, now.sec, (unsigned int)(now.frac >> 32),
837 (unsigned int)(now.frac & 0xffffffff));
838 CTR4(KTR_SPARE2, "set_cyc at %d: t %d.%08x%08x",
839 curcpu, t->sec, (unsigned int)(t->frac >> 32),
840 (unsigned int)(t->frac & 0xffffffff));
841
842 ET_HW_LOCK(state);
843 if (bintime_cmp(t, &state->nextcyc, ==)) {
844 ET_HW_UNLOCK(state);
845 return;
846 }
847 state->nextcyc = *t;
848 if (bintime_cmp(&state->nextcyc, &state->nextevent, >=)) {
849 ET_HW_UNLOCK(state);
850 return;
851 }
852 state->nextevent = state->nextcyc;
853 if (!periodic)
854 loadtimer(&now, 0);
855 ET_HW_UNLOCK(state);
856 }
857 #endif
858
859 #ifdef SMP
860 static void
861 cpu_new_callout(int cpu, int ticks)
862 {
863 struct bintime tmp;
864 struct pcpu_state *state;
865
866 CTR3(KTR_SPARE2, "new co at %d: on %d in %d",
867 curcpu, cpu, ticks);
868 state = DPCPU_ID_PTR(cpu, timerstate);
869 ET_HW_LOCK(state);
870 if (state->idle == 0 || busy) {
871 ET_HW_UNLOCK(state);
872 return;
873 }
874 /*
875 * If timer is periodic - just update next event time for target CPU.
876 * If timer is global - there is chance it is already programmed.
877 */
878 if (periodic || (timer->et_flags & ET_FLAGS_PERCPU) == 0) {
879 tmp = hardperiod;
880 bintime_mul(&tmp, ticks - 1);
881 bintime_add(&tmp, &state->nexthard);
882 if (bintime_cmp(&tmp, &state->nextevent, <))
883 state->nextevent = tmp;
884 if (periodic ||
885 bintime_cmp(&state->nextevent, &nexttick, >=)) {
886 ET_HW_UNLOCK(state);
887 return;
888 }
889 }
890 /*
891 * Otherwise we have to wake that CPU up, as we can't get present
892 * bintime to reprogram global timer from here. If timer is per-CPU,
893 * we by definition can't do it from here.
894 */
895 ET_HW_UNLOCK(state);
896 if (timer->et_flags & ET_FLAGS_PERCPU) {
897 state->handle = 1;
898 ipi_cpu(cpu, IPI_HARDCLOCK);
899 } else {
900 if (!cpu_idle_wakeup(cpu))
901 ipi_cpu(cpu, IPI_AST);
902 }
903 }
904 #endif
905
906 /*
907 * Report or change the active event timers hardware.
908 */
909 static int
910 sysctl_kern_eventtimer_timer(SYSCTL_HANDLER_ARGS)
911 {
912 char buf[32];
913 struct eventtimer *et;
914 int error;
915
916 ET_LOCK();
917 et = timer;
918 snprintf(buf, sizeof(buf), "%s", et->et_name);
919 ET_UNLOCK();
920 error = sysctl_handle_string(oidp, buf, sizeof(buf), req);
921 ET_LOCK();
922 et = timer;
923 if (error != 0 || req->newptr == NULL ||
924 strcasecmp(buf, et->et_name) == 0) {
925 ET_UNLOCK();
926 return (error);
927 }
928 et = et_find(buf, 0, 0);
929 if (et == NULL) {
930 ET_UNLOCK();
931 return (ENOENT);
932 }
933 configtimer(0);
934 et_free(timer);
935 if (et->et_flags & ET_FLAGS_C3STOP)
936 cpu_disable_deep_sleep++;
937 if (timer->et_flags & ET_FLAGS_C3STOP)
938 cpu_disable_deep_sleep--;
939 periodic = want_periodic;
940 timer = et;
941 et_init(timer, timercb, NULL, NULL);
942 configtimer(1);
943 ET_UNLOCK();
944 return (error);
945 }
946 SYSCTL_PROC(_kern_eventtimer, OID_AUTO, timer,
947 CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE,
948 0, 0, sysctl_kern_eventtimer_timer, "A", "Chosen event timer");
949
950 /*
951 * Report or change the active event timer periodicity.
952 */
953 static int
954 sysctl_kern_eventtimer_periodic(SYSCTL_HANDLER_ARGS)
955 {
956 int error, val;
957
958 val = periodic;
959 error = sysctl_handle_int(oidp, &val, 0, req);
960 if (error != 0 || req->newptr == NULL)
961 return (error);
962 ET_LOCK();
963 configtimer(0);
964 periodic = want_periodic = val;
965 configtimer(1);
966 ET_UNLOCK();
967 return (error);
968 }
969 SYSCTL_PROC(_kern_eventtimer, OID_AUTO, periodic,
970 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE,
971 0, 0, sysctl_kern_eventtimer_periodic, "I", "Enable event timer periodic mode");
Cache object: cbe8720675bbb665d623c2da32d8414f
|