FreeBSD/Linux Kernel Cross Reference
sys/mips/mips/tick.c
1 /*-
2 * Copyright (c) 2006-2007 Bruce M. Simpson.
3 * Copyright (c) 2003-2004 Juli Mallett.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27
28 /*
29 * Simple driver for the 32-bit interval counter built in to all
30 * MIPS32 CPUs.
31 */
32
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD: releng/11.2/sys/mips/mips/tick.c 331722 2018-03-29 02:50:57Z eadler $");
35
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/sysctl.h>
39 #include <sys/bus.h>
40 #include <sys/kernel.h>
41 #include <sys/module.h>
42 #include <sys/rman.h>
43 #include <sys/power.h>
44 #include <sys/smp.h>
45 #include <sys/time.h>
46 #include <sys/timeet.h>
47 #include <sys/timetc.h>
48
49 #include <machine/hwfunc.h>
50 #include <machine/clock.h>
51 #include <machine/locore.h>
52 #include <machine/md_var.h>
53
54 #ifdef INTRNG
55 #include <machine/intr.h>
56 #endif
57
58 uint64_t counter_freq;
59
60 struct timecounter *platform_timecounter;
61
62 static DPCPU_DEFINE(uint32_t, cycles_per_tick);
63 static uint32_t cycles_per_usec;
64
65 static DPCPU_DEFINE(volatile uint32_t, counter_upper);
66 static DPCPU_DEFINE(volatile uint32_t, counter_lower_last);
67 static DPCPU_DEFINE(uint32_t, compare_ticks);
68 static DPCPU_DEFINE(uint32_t, lost_ticks);
69
70 struct clock_softc {
71 int intr_rid;
72 struct resource *intr_res;
73 void *intr_handler;
74 struct timecounter tc;
75 struct eventtimer et;
76 };
77 static struct clock_softc *softc;
78
79 /*
80 * Device methods
81 */
82 static int clock_probe(device_t);
83 static void clock_identify(driver_t *, device_t);
84 static int clock_attach(device_t);
85 static unsigned counter_get_timecount(struct timecounter *tc);
86
87 void
88 mips_timer_early_init(uint64_t clock_hz)
89 {
90 /* Initialize clock early so that we can use DELAY sooner */
91 counter_freq = clock_hz;
92 cycles_per_usec = (clock_hz / (1000 * 1000));
93 }
94
95 void
96 platform_initclocks(void)
97 {
98
99 if (platform_timecounter != NULL)
100 tc_init(platform_timecounter);
101 }
102
103 static uint64_t
104 tick_ticker(void)
105 {
106 uint64_t ret;
107 uint32_t ticktock;
108 uint32_t t_lower_last, t_upper;
109
110 /*
111 * Disable preemption because we are working with cpu specific data.
112 */
113 critical_enter();
114
115 /*
116 * Note that even though preemption is disabled, interrupts are
117 * still enabled. In particular there is a race with clock_intr()
118 * reading the values of 'counter_upper' and 'counter_lower_last'.
119 *
120 * XXX this depends on clock_intr() being executed periodically
121 * so that 'counter_upper' and 'counter_lower_last' are not stale.
122 */
123 do {
124 t_upper = DPCPU_GET(counter_upper);
125 t_lower_last = DPCPU_GET(counter_lower_last);
126 } while (t_upper != DPCPU_GET(counter_upper));
127
128 ticktock = mips_rd_count();
129
130 critical_exit();
131
132 /* COUNT register wrapped around */
133 if (ticktock < t_lower_last)
134 t_upper++;
135
136 ret = ((uint64_t)t_upper << 32) | ticktock;
137 return (ret);
138 }
139
140 void
141 mips_timer_init_params(uint64_t platform_counter_freq, int double_count)
142 {
143
144 /*
145 * XXX: Do not use printf here: uart code 8250 may use DELAY so this
146 * function should be called before cninit.
147 */
148 counter_freq = platform_counter_freq;
149 /*
150 * XXX: Some MIPS32 cores update the Count register only every two
151 * pipeline cycles.
152 * We know this because of status registers in CP0, make it automatic.
153 */
154 if (double_count != 0)
155 counter_freq /= 2;
156
157 cycles_per_usec = counter_freq / (1 * 1000 * 1000);
158 set_cputicker(tick_ticker, counter_freq, 1);
159 }
160
161 static int
162 sysctl_machdep_counter_freq(SYSCTL_HANDLER_ARGS)
163 {
164 int error;
165 uint64_t freq;
166
167 if (softc == NULL)
168 return (EOPNOTSUPP);
169 freq = counter_freq;
170 error = sysctl_handle_64(oidp, &freq, sizeof(freq), req);
171 if (error == 0 && req->newptr != NULL) {
172 counter_freq = freq;
173 softc->et.et_frequency = counter_freq;
174 softc->tc.tc_frequency = counter_freq;
175 }
176 return (error);
177 }
178
179 SYSCTL_PROC(_machdep, OID_AUTO, counter_freq, CTLTYPE_U64 | CTLFLAG_RW,
180 NULL, 0, sysctl_machdep_counter_freq, "QU",
181 "Timecounter frequency in Hz");
182
183 static unsigned
184 counter_get_timecount(struct timecounter *tc)
185 {
186
187 return (mips_rd_count());
188 }
189
190 /*
191 * Wait for about n microseconds (at least!).
192 */
193 void
194 DELAY(int n)
195 {
196 uint32_t cur, last, delta, usecs;
197
198 /*
199 * This works by polling the timer and counting the number of
200 * microseconds that go by.
201 */
202 last = mips_rd_count();
203 delta = usecs = 0;
204
205 while (n > usecs) {
206 cur = mips_rd_count();
207
208 /* Check to see if the timer has wrapped around. */
209 if (cur < last)
210 delta += cur + (0xffffffff - last) + 1;
211 else
212 delta += cur - last;
213
214 last = cur;
215
216 if (delta >= cycles_per_usec) {
217 usecs += delta / cycles_per_usec;
218 delta %= cycles_per_usec;
219 }
220 }
221 }
222
223 static int
224 clock_start(struct eventtimer *et, sbintime_t first, sbintime_t period)
225 {
226 uint32_t fdiv, div, next;
227
228 if (period != 0) {
229 div = (et->et_frequency * period) >> 32;
230 } else
231 div = 0;
232 if (first != 0)
233 fdiv = (et->et_frequency * first) >> 32;
234 else
235 fdiv = div;
236 DPCPU_SET(cycles_per_tick, div);
237 next = mips_rd_count() + fdiv;
238 DPCPU_SET(compare_ticks, next);
239 mips_wr_compare(next);
240 return (0);
241 }
242
243 static int
244 clock_stop(struct eventtimer *et)
245 {
246
247 DPCPU_SET(cycles_per_tick, 0);
248 mips_wr_compare(0xffffffff);
249 return (0);
250 }
251
252 /*
253 * Device section of file below
254 */
255 static int
256 clock_intr(void *arg)
257 {
258 struct clock_softc *sc = (struct clock_softc *)arg;
259 uint32_t cycles_per_tick;
260 uint32_t count, compare_last, compare_next, lost_ticks;
261
262 cycles_per_tick = DPCPU_GET(cycles_per_tick);
263 /*
264 * Set next clock edge.
265 */
266 count = mips_rd_count();
267 compare_last = DPCPU_GET(compare_ticks);
268 if (cycles_per_tick > 0) {
269 compare_next = count + cycles_per_tick;
270 DPCPU_SET(compare_ticks, compare_next);
271 mips_wr_compare(compare_next);
272 } else /* In one-shot mode timer should be stopped after the event. */
273 mips_wr_compare(0xffffffff);
274
275 /* COUNT register wrapped around */
276 if (count < DPCPU_GET(counter_lower_last)) {
277 DPCPU_SET(counter_upper, DPCPU_GET(counter_upper) + 1);
278 }
279 DPCPU_SET(counter_lower_last, count);
280
281 if (cycles_per_tick > 0) {
282
283 /*
284 * Account for the "lost time" between when the timer interrupt
285 * fired and when 'clock_intr' actually started executing.
286 */
287 lost_ticks = DPCPU_GET(lost_ticks);
288 lost_ticks += count - compare_last;
289
290 /*
291 * If the COUNT and COMPARE registers are no longer in sync
292 * then make up some reasonable value for the 'lost_ticks'.
293 *
294 * This could happen, for e.g., after we resume normal
295 * operations after exiting the debugger.
296 */
297 if (lost_ticks > 2 * cycles_per_tick)
298 lost_ticks = cycles_per_tick;
299
300 while (lost_ticks >= cycles_per_tick) {
301 if (sc->et.et_active)
302 sc->et.et_event_cb(&sc->et, sc->et.et_arg);
303 lost_ticks -= cycles_per_tick;
304 }
305 DPCPU_SET(lost_ticks, lost_ticks);
306 }
307 if (sc->et.et_active)
308 sc->et.et_event_cb(&sc->et, sc->et.et_arg);
309 return (FILTER_HANDLED);
310 }
311
312 static int
313 clock_probe(device_t dev)
314 {
315
316 device_set_desc(dev, "Generic MIPS32 ticker");
317 return (BUS_PROBE_NOWILDCARD);
318 }
319
320 static void
321 clock_identify(driver_t * drv, device_t parent)
322 {
323
324 BUS_ADD_CHILD(parent, 0, "clock", 0);
325 }
326
327 static int
328 clock_attach(device_t dev)
329 {
330 struct clock_softc *sc;
331 #ifndef INTRNG
332 int error;
333 #endif
334
335 if (device_get_unit(dev) != 0)
336 panic("can't attach more clocks");
337
338 softc = sc = device_get_softc(dev);
339 #ifdef INTRNG
340 cpu_establish_hardintr("clock", clock_intr, NULL, sc, 5, INTR_TYPE_CLK,
341 NULL);
342 #else
343 sc->intr_rid = 0;
344 sc->intr_res = bus_alloc_resource(dev,
345 SYS_RES_IRQ, &sc->intr_rid, 5, 5, 1, RF_ACTIVE);
346 if (sc->intr_res == NULL) {
347 device_printf(dev, "failed to allocate irq\n");
348 return (ENXIO);
349 }
350 error = bus_setup_intr(dev, sc->intr_res, INTR_TYPE_CLK,
351 clock_intr, NULL, sc, &sc->intr_handler);
352 if (error != 0) {
353 device_printf(dev, "bus_setup_intr returned %d\n", error);
354 return (error);
355 }
356 #endif
357
358 sc->tc.tc_get_timecount = counter_get_timecount;
359 sc->tc.tc_counter_mask = 0xffffffff;
360 sc->tc.tc_frequency = counter_freq;
361 sc->tc.tc_name = "MIPS32";
362 sc->tc.tc_quality = 800;
363 sc->tc.tc_priv = sc;
364 tc_init(&sc->tc);
365 sc->et.et_name = "MIPS32";
366 sc->et.et_flags = ET_FLAGS_PERIODIC | ET_FLAGS_ONESHOT |
367 ET_FLAGS_PERCPU;
368 sc->et.et_quality = 800;
369 sc->et.et_frequency = counter_freq;
370 sc->et.et_min_period = 0x00004000LLU; /* To be safe. */
371 sc->et.et_max_period = (0xfffffffeLLU << 32) / sc->et.et_frequency;
372 sc->et.et_start = clock_start;
373 sc->et.et_stop = clock_stop;
374 sc->et.et_priv = sc;
375 et_register(&sc->et);
376 return (0);
377 }
378
379 static device_method_t clock_methods[] = {
380 /* Device interface */
381 DEVMETHOD(device_probe, clock_probe),
382 DEVMETHOD(device_identify, clock_identify),
383 DEVMETHOD(device_attach, clock_attach),
384 DEVMETHOD(device_detach, bus_generic_detach),
385 DEVMETHOD(device_shutdown, bus_generic_shutdown),
386
387 {0, 0}
388 };
389
390 static driver_t clock_driver = {
391 "clock",
392 clock_methods,
393 sizeof(struct clock_softc),
394 };
395
396 static devclass_t clock_devclass;
397
398 DRIVER_MODULE(clock, nexus, clock_driver, clock_devclass, 0, 0);
Cache object: 8ec905238dc7cefa7dbf0ff501900e3f
|