1 /*-
2 * Copyright (c) 2005 Poul-Henning Kamp
3 * Copyright (c) 2010 Alexander Motin <mav@FreeBSD.org>
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30
31 #include "opt_acpi.h"
32
33 #if defined(__amd64__)
34 #define DEV_APIC
35 #else
36 #include "opt_apic.h"
37 #endif
38 #include <sys/param.h>
39 #include <sys/conf.h>
40 #include <sys/bus.h>
41 #include <sys/kernel.h>
42 #include <sys/module.h>
43 #include <sys/proc.h>
44 #include <sys/rman.h>
45 #include <sys/mman.h>
46 #include <sys/time.h>
47 #include <sys/smp.h>
48 #include <sys/sysctl.h>
49 #include <sys/timeet.h>
50 #include <sys/timetc.h>
51 #include <sys/vdso.h>
52
53 #include <contrib/dev/acpica/include/acpi.h>
54 #include <contrib/dev/acpica/include/accommon.h>
55
56 #include <dev/acpica/acpivar.h>
57 #include <dev/acpica/acpi_hpet.h>
58
59 #ifdef DEV_APIC
60 #include "pcib_if.h"
61 #endif
62
63 #define HPET_VENDID_AMD 0x4353
64 #define HPET_VENDID_AMD2 0x1022
65 #define HPET_VENDID_HYGON 0x1d94
66 #define HPET_VENDID_INTEL 0x8086
67 #define HPET_VENDID_NVIDIA 0x10de
68 #define HPET_VENDID_SW 0x1166
69
70 ACPI_SERIAL_DECL(hpet, "ACPI HPET support");
71
72 static devclass_t hpet_devclass;
73
74 /* ACPI CA debugging */
75 #define _COMPONENT ACPI_TIMER
76 ACPI_MODULE_NAME("HPET")
77
78 struct hpet_softc {
79 device_t dev;
80 int mem_rid;
81 int intr_rid;
82 int irq;
83 int useirq;
84 int legacy_route;
85 int per_cpu;
86 uint32_t allowed_irqs;
87 struct resource *mem_res;
88 struct resource *intr_res;
89 void *intr_handle;
90 ACPI_HANDLE handle;
91 uint32_t acpi_uid;
92 uint64_t freq;
93 uint32_t caps;
94 struct timecounter tc;
95 struct hpet_timer {
96 struct eventtimer et;
97 struct hpet_softc *sc;
98 int num;
99 int mode;
100 #define TIMER_STOPPED 0
101 #define TIMER_PERIODIC 1
102 #define TIMER_ONESHOT 2
103 int intr_rid;
104 int irq;
105 int pcpu_cpu;
106 int pcpu_misrouted;
107 int pcpu_master;
108 int pcpu_slaves[MAXCPU];
109 struct resource *intr_res;
110 void *intr_handle;
111 uint32_t caps;
112 uint32_t vectors;
113 uint32_t div;
114 uint32_t next;
115 char name[8];
116 } t[32];
117 int num_timers;
118 struct cdev *pdev;
119 int mmap_allow;
120 int mmap_allow_write;
121 };
122
123 static d_open_t hpet_open;
124 static d_mmap_t hpet_mmap;
125
126 static struct cdevsw hpet_cdevsw = {
127 .d_version = D_VERSION,
128 .d_name = "hpet",
129 .d_open = hpet_open,
130 .d_mmap = hpet_mmap,
131 };
132
133 static u_int hpet_get_timecount(struct timecounter *tc);
134 static void hpet_test(struct hpet_softc *sc);
135
136 static char *hpet_ids[] = { "PNP0103", NULL };
137
138 /* Knob to disable acpi_hpet device */
139 bool acpi_hpet_disabled = false;
140
141 static u_int
142 hpet_get_timecount(struct timecounter *tc)
143 {
144 struct hpet_softc *sc;
145
146 sc = tc->tc_priv;
147 return (bus_read_4(sc->mem_res, HPET_MAIN_COUNTER));
148 }
149
150 uint32_t
151 hpet_vdso_timehands(struct vdso_timehands *vdso_th, struct timecounter *tc)
152 {
153 struct hpet_softc *sc;
154
155 sc = tc->tc_priv;
156 vdso_th->th_algo = VDSO_TH_ALGO_X86_HPET;
157 vdso_th->th_x86_shift = 0;
158 vdso_th->th_x86_hpet_idx = device_get_unit(sc->dev);
159 vdso_th->th_x86_pvc_last_systime = 0;
160 vdso_th->th_x86_pvc_stable_mask = 0;
161 bzero(vdso_th->th_res, sizeof(vdso_th->th_res));
162 return (sc->mmap_allow != 0);
163 }
164
165 #ifdef COMPAT_FREEBSD32
166 uint32_t
167 hpet_vdso_timehands32(struct vdso_timehands32 *vdso_th32,
168 struct timecounter *tc)
169 {
170 struct hpet_softc *sc;
171
172 sc = tc->tc_priv;
173 vdso_th32->th_algo = VDSO_TH_ALGO_X86_HPET;
174 vdso_th32->th_x86_shift = 0;
175 vdso_th32->th_x86_hpet_idx = device_get_unit(sc->dev);
176 vdso_th32->th_x86_pvc_last_systime = 0;
177 vdso_th32->th_x86_pvc_stable_mask = 0;
178 bzero(vdso_th32->th_res, sizeof(vdso_th32->th_res));
179 return (sc->mmap_allow != 0);
180 }
181 #endif
182
183 static void
184 hpet_enable(struct hpet_softc *sc)
185 {
186 uint32_t val;
187
188 val = bus_read_4(sc->mem_res, HPET_CONFIG);
189 if (sc->legacy_route)
190 val |= HPET_CNF_LEG_RT;
191 else
192 val &= ~HPET_CNF_LEG_RT;
193 val |= HPET_CNF_ENABLE;
194 bus_write_4(sc->mem_res, HPET_CONFIG, val);
195 }
196
197 static void
198 hpet_disable(struct hpet_softc *sc)
199 {
200 uint32_t val;
201
202 val = bus_read_4(sc->mem_res, HPET_CONFIG);
203 val &= ~HPET_CNF_ENABLE;
204 bus_write_4(sc->mem_res, HPET_CONFIG, val);
205 }
206
207 static int
208 hpet_start(struct eventtimer *et, sbintime_t first, sbintime_t period)
209 {
210 struct hpet_timer *mt = (struct hpet_timer *)et->et_priv;
211 struct hpet_timer *t;
212 struct hpet_softc *sc = mt->sc;
213 uint32_t fdiv, now;
214
215 t = (mt->pcpu_master < 0) ? mt : &sc->t[mt->pcpu_slaves[curcpu]];
216 if (period != 0) {
217 t->mode = TIMER_PERIODIC;
218 t->div = (sc->freq * period) >> 32;
219 } else {
220 t->mode = TIMER_ONESHOT;
221 t->div = 0;
222 }
223 if (first != 0)
224 fdiv = (sc->freq * first) >> 32;
225 else
226 fdiv = t->div;
227 if (t->irq < 0)
228 bus_write_4(sc->mem_res, HPET_ISR, 1 << t->num);
229 t->caps |= HPET_TCNF_INT_ENB;
230 now = bus_read_4(sc->mem_res, HPET_MAIN_COUNTER);
231 restart:
232 t->next = now + fdiv;
233 if (t->mode == TIMER_PERIODIC && (t->caps & HPET_TCAP_PER_INT)) {
234 t->caps |= HPET_TCNF_TYPE;
235 bus_write_4(sc->mem_res, HPET_TIMER_CAP_CNF(t->num),
236 t->caps | HPET_TCNF_VAL_SET);
237 bus_write_4(sc->mem_res, HPET_TIMER_COMPARATOR(t->num),
238 t->next);
239 bus_write_4(sc->mem_res, HPET_TIMER_COMPARATOR(t->num),
240 t->div);
241 } else {
242 t->caps &= ~HPET_TCNF_TYPE;
243 bus_write_4(sc->mem_res, HPET_TIMER_CAP_CNF(t->num),
244 t->caps);
245 bus_write_4(sc->mem_res, HPET_TIMER_COMPARATOR(t->num),
246 t->next);
247 }
248 now = bus_read_4(sc->mem_res, HPET_MAIN_COUNTER);
249 if ((int32_t)(now - t->next + HPET_MIN_CYCLES) >= 0) {
250 fdiv *= 2;
251 goto restart;
252 }
253 return (0);
254 }
255
256 static int
257 hpet_stop(struct eventtimer *et)
258 {
259 struct hpet_timer *mt = (struct hpet_timer *)et->et_priv;
260 struct hpet_timer *t;
261 struct hpet_softc *sc = mt->sc;
262
263 t = (mt->pcpu_master < 0) ? mt : &sc->t[mt->pcpu_slaves[curcpu]];
264 t->mode = TIMER_STOPPED;
265 t->caps &= ~(HPET_TCNF_INT_ENB | HPET_TCNF_TYPE);
266 bus_write_4(sc->mem_res, HPET_TIMER_CAP_CNF(t->num), t->caps);
267 return (0);
268 }
269
270 static int
271 hpet_intr_single(void *arg)
272 {
273 struct hpet_timer *t = (struct hpet_timer *)arg;
274 struct hpet_timer *mt;
275 struct hpet_softc *sc = t->sc;
276 uint32_t now;
277
278 if (t->mode == TIMER_STOPPED)
279 return (FILTER_STRAY);
280 /* Check that per-CPU timer interrupt reached right CPU. */
281 if (t->pcpu_cpu >= 0 && t->pcpu_cpu != curcpu) {
282 if ((++t->pcpu_misrouted) % 32 == 0) {
283 printf("HPET interrupt routed to the wrong CPU"
284 " (timer %d CPU %d -> %d)!\n",
285 t->num, t->pcpu_cpu, curcpu);
286 }
287
288 /*
289 * Reload timer, hoping that next time may be more lucky
290 * (system will manage proper interrupt binding).
291 */
292 if ((t->mode == TIMER_PERIODIC &&
293 (t->caps & HPET_TCAP_PER_INT) == 0) ||
294 t->mode == TIMER_ONESHOT) {
295 t->next = bus_read_4(sc->mem_res, HPET_MAIN_COUNTER) +
296 sc->freq / 8;
297 bus_write_4(sc->mem_res, HPET_TIMER_COMPARATOR(t->num),
298 t->next);
299 }
300 return (FILTER_HANDLED);
301 }
302 if (t->mode == TIMER_PERIODIC &&
303 (t->caps & HPET_TCAP_PER_INT) == 0) {
304 t->next += t->div;
305 now = bus_read_4(sc->mem_res, HPET_MAIN_COUNTER);
306 if ((int32_t)((now + t->div / 2) - t->next) > 0)
307 t->next = now + t->div / 2;
308 bus_write_4(sc->mem_res,
309 HPET_TIMER_COMPARATOR(t->num), t->next);
310 } else if (t->mode == TIMER_ONESHOT)
311 t->mode = TIMER_STOPPED;
312 mt = (t->pcpu_master < 0) ? t : &sc->t[t->pcpu_master];
313 if (mt->et.et_active)
314 mt->et.et_event_cb(&mt->et, mt->et.et_arg);
315 return (FILTER_HANDLED);
316 }
317
318 static int
319 hpet_intr(void *arg)
320 {
321 struct hpet_softc *sc = (struct hpet_softc *)arg;
322 int i;
323 uint32_t val;
324
325 val = bus_read_4(sc->mem_res, HPET_ISR);
326 if (val) {
327 bus_write_4(sc->mem_res, HPET_ISR, val);
328 val &= sc->useirq;
329 for (i = 0; i < sc->num_timers; i++) {
330 if ((val & (1 << i)) == 0)
331 continue;
332 hpet_intr_single(&sc->t[i]);
333 }
334 return (FILTER_HANDLED);
335 }
336 return (FILTER_STRAY);
337 }
338
339 uint32_t
340 hpet_get_uid(device_t dev)
341 {
342 struct hpet_softc *sc;
343
344 sc = device_get_softc(dev);
345 return (sc->acpi_uid);
346 }
347
348 static ACPI_STATUS
349 hpet_find(ACPI_HANDLE handle, UINT32 level, void *context,
350 void **status)
351 {
352 char **ids;
353 uint32_t id = (uint32_t)(uintptr_t)context;
354 uint32_t uid = 0;
355
356 for (ids = hpet_ids; *ids != NULL; ids++) {
357 if (acpi_MatchHid(handle, *ids))
358 break;
359 }
360 if (*ids == NULL)
361 return (AE_OK);
362 if (ACPI_FAILURE(acpi_GetInteger(handle, "_UID", &uid)) ||
363 id == uid)
364 *status = acpi_get_device(handle);
365 return (AE_OK);
366 }
367
368 /*
369 * Find an existing IRQ resource that matches the requested IRQ range
370 * and return its RID. If one is not found, use a new RID.
371 */
372 static int
373 hpet_find_irq_rid(device_t dev, u_long start, u_long end)
374 {
375 rman_res_t irq;
376 int error, rid;
377
378 for (rid = 0;; rid++) {
379 error = bus_get_resource(dev, SYS_RES_IRQ, rid, &irq, NULL);
380 if (error != 0 || (start <= irq && irq <= end))
381 return (rid);
382 }
383 }
384
385 static int
386 hpet_open(struct cdev *cdev, int oflags, int devtype, struct thread *td)
387 {
388 struct hpet_softc *sc;
389
390 sc = cdev->si_drv1;
391 if (!sc->mmap_allow)
392 return (EPERM);
393 else
394 return (0);
395 }
396
397 static int
398 hpet_mmap(struct cdev *cdev, vm_ooffset_t offset, vm_paddr_t *paddr,
399 int nprot, vm_memattr_t *memattr)
400 {
401 struct hpet_softc *sc;
402
403 sc = cdev->si_drv1;
404 if (offset >= rman_get_size(sc->mem_res))
405 return (EINVAL);
406 if (!sc->mmap_allow_write && (nprot & PROT_WRITE))
407 return (EPERM);
408 *paddr = rman_get_start(sc->mem_res) + offset;
409 *memattr = VM_MEMATTR_UNCACHEABLE;
410
411 return (0);
412 }
413
414 /* Discover the HPET via the ACPI table of the same name. */
415 static void
416 hpet_identify(driver_t *driver, device_t parent)
417 {
418 ACPI_TABLE_HPET *hpet;
419 ACPI_STATUS status;
420 device_t child;
421 int i;
422
423 /* Only one HPET device can be added. */
424 if (devclass_get_device(hpet_devclass, 0))
425 return;
426 for (i = 1; ; i++) {
427 /* Search for HPET table. */
428 status = AcpiGetTable(ACPI_SIG_HPET, i, (ACPI_TABLE_HEADER **)&hpet);
429 if (ACPI_FAILURE(status))
430 return;
431 /* Search for HPET device with same ID. */
432 child = NULL;
433 AcpiWalkNamespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
434 100, hpet_find, NULL, (void *)(uintptr_t)hpet->Sequence,
435 (void *)&child);
436 /* If found - let it be probed in normal way. */
437 if (child) {
438 if (bus_get_resource(child, SYS_RES_MEMORY, 0,
439 NULL, NULL) != 0)
440 bus_set_resource(child, SYS_RES_MEMORY, 0,
441 hpet->Address.Address, HPET_MEM_WIDTH);
442 continue;
443 }
444 /* If not - create it from table info. */
445 child = BUS_ADD_CHILD(parent, 2, "hpet", 0);
446 if (child == NULL) {
447 printf("%s: can't add child\n", __func__);
448 continue;
449 }
450 bus_set_resource(child, SYS_RES_MEMORY, 0, hpet->Address.Address,
451 HPET_MEM_WIDTH);
452 }
453 }
454
455 static int
456 hpet_probe(device_t dev)
457 {
458 ACPI_FUNCTION_TRACE((char *)(uintptr_t) __func__);
459
460 if (acpi_disabled("hpet") || acpi_hpet_disabled)
461 return (ENXIO);
462 if (acpi_get_handle(dev) != NULL &&
463 ACPI_ID_PROBE(device_get_parent(dev), dev, hpet_ids) == NULL)
464 return (ENXIO);
465
466 device_set_desc(dev, "High Precision Event Timer");
467 return (0);
468 }
469
470 static int
471 hpet_attach(device_t dev)
472 {
473 struct hpet_softc *sc;
474 struct hpet_timer *t;
475 struct make_dev_args mda;
476 int i, j, num_msi, num_timers, num_percpu_et, num_percpu_t, cur_cpu;
477 int pcpu_master, error;
478 static int maxhpetet = 0;
479 uint32_t val, val2, cvectors, dvectors;
480 uint16_t vendor, rev;
481
482 ACPI_FUNCTION_TRACE((char *)(uintptr_t) __func__);
483
484 sc = device_get_softc(dev);
485 sc->dev = dev;
486 sc->handle = acpi_get_handle(dev);
487
488 sc->mem_rid = 0;
489 sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->mem_rid,
490 RF_ACTIVE);
491 if (sc->mem_res == NULL)
492 return (ENOMEM);
493
494 /* Validate that we can access the whole region. */
495 if (rman_get_size(sc->mem_res) < HPET_MEM_WIDTH) {
496 device_printf(dev, "memory region width %jd too small\n",
497 rman_get_size(sc->mem_res));
498 bus_free_resource(dev, SYS_RES_MEMORY, sc->mem_res);
499 return (ENXIO);
500 }
501
502 /* Be sure timer is enabled. */
503 hpet_enable(sc);
504
505 /* Read basic statistics about the timer. */
506 val = bus_read_4(sc->mem_res, HPET_PERIOD);
507 if (val == 0) {
508 device_printf(dev, "invalid period\n");
509 hpet_disable(sc);
510 bus_free_resource(dev, SYS_RES_MEMORY, sc->mem_res);
511 return (ENXIO);
512 }
513
514 sc->freq = (1000000000000000LL + val / 2) / val;
515 sc->caps = bus_read_4(sc->mem_res, HPET_CAPABILITIES);
516 vendor = (sc->caps & HPET_CAP_VENDOR_ID) >> 16;
517 rev = sc->caps & HPET_CAP_REV_ID;
518 num_timers = 1 + ((sc->caps & HPET_CAP_NUM_TIM) >> 8);
519 /*
520 * ATI/AMD violates IA-PC HPET (High Precision Event Timers)
521 * Specification and provides an off by one number
522 * of timers/comparators.
523 * Additionally, they use unregistered value in VENDOR_ID field.
524 */
525 if (vendor == HPET_VENDID_AMD && rev < 0x10 && num_timers > 0)
526 num_timers--;
527 sc->num_timers = num_timers;
528 if (bootverbose) {
529 device_printf(dev,
530 "vendor 0x%x, rev 0x%x, %jdHz%s, %d timers,%s\n",
531 vendor, rev, sc->freq,
532 (sc->caps & HPET_CAP_COUNT_SIZE) ? " 64bit" : "",
533 num_timers,
534 (sc->caps & HPET_CAP_LEG_RT) ? " legacy route" : "");
535 }
536 for (i = 0; i < num_timers; i++) {
537 t = &sc->t[i];
538 t->sc = sc;
539 t->num = i;
540 t->mode = TIMER_STOPPED;
541 t->intr_rid = -1;
542 t->irq = -1;
543 t->pcpu_cpu = -1;
544 t->pcpu_misrouted = 0;
545 t->pcpu_master = -1;
546 t->caps = bus_read_4(sc->mem_res, HPET_TIMER_CAP_CNF(i));
547 t->vectors = bus_read_4(sc->mem_res, HPET_TIMER_CAP_CNF(i) + 4);
548 if (bootverbose) {
549 device_printf(dev,
550 " t%d: irqs 0x%08x (%d)%s%s%s\n", i,
551 t->vectors, (t->caps & HPET_TCNF_INT_ROUTE) >> 9,
552 (t->caps & HPET_TCAP_FSB_INT_DEL) ? ", MSI" : "",
553 (t->caps & HPET_TCAP_SIZE) ? ", 64bit" : "",
554 (t->caps & HPET_TCAP_PER_INT) ? ", periodic" : "");
555 }
556 }
557 if (testenv("debug.acpi.hpet_test"))
558 hpet_test(sc);
559 /*
560 * Don't attach if the timer never increments. Since the spec
561 * requires it to be at least 10 MHz, it has to change in 1 us.
562 */
563 val = bus_read_4(sc->mem_res, HPET_MAIN_COUNTER);
564 DELAY(1);
565 val2 = bus_read_4(sc->mem_res, HPET_MAIN_COUNTER);
566 if (val == val2) {
567 device_printf(dev, "HPET never increments, disabling\n");
568 hpet_disable(sc);
569 bus_free_resource(dev, SYS_RES_MEMORY, sc->mem_res);
570 return (ENXIO);
571 }
572 /* Announce first HPET as timecounter. */
573 if (device_get_unit(dev) == 0) {
574 sc->tc.tc_get_timecount = hpet_get_timecount,
575 sc->tc.tc_counter_mask = ~0u,
576 sc->tc.tc_name = "HPET",
577 sc->tc.tc_quality = 950,
578 sc->tc.tc_frequency = sc->freq;
579 sc->tc.tc_priv = sc;
580 sc->tc.tc_fill_vdso_timehands = hpet_vdso_timehands;
581 #ifdef COMPAT_FREEBSD32
582 sc->tc.tc_fill_vdso_timehands32 = hpet_vdso_timehands32;
583 #endif
584 tc_init(&sc->tc);
585 }
586 /* If not disabled - setup and announce event timers. */
587 if (resource_int_value(device_get_name(dev), device_get_unit(dev),
588 "clock", &i) == 0 && i == 0)
589 return (0);
590
591 /* Check whether we can and want legacy routing. */
592 sc->legacy_route = 0;
593 resource_int_value(device_get_name(dev), device_get_unit(dev),
594 "legacy_route", &sc->legacy_route);
595 if ((sc->caps & HPET_CAP_LEG_RT) == 0)
596 sc->legacy_route = 0;
597 if (sc->legacy_route) {
598 sc->t[0].vectors = 0;
599 sc->t[1].vectors = 0;
600 }
601
602 /* Check what IRQs we want use. */
603 /* By default allow any PCI IRQs. */
604 sc->allowed_irqs = 0xffff0000;
605 /*
606 * HPETs in AMD chipsets before SB800 have problems with IRQs >= 16
607 * Lower are also not always working for different reasons.
608 * SB800 fixed it, but seems do not implements level triggering
609 * properly, that makes it very unreliable - it freezes after any
610 * interrupt loss. Avoid legacy IRQs for AMD.
611 */
612 if (vendor == HPET_VENDID_AMD || vendor == HPET_VENDID_AMD2 ||
613 vendor == HPET_VENDID_HYGON)
614 sc->allowed_irqs = 0x00000000;
615 /*
616 * NVidia MCP5x chipsets have number of unexplained interrupt
617 * problems. For some reason, using HPET interrupts breaks HDA sound.
618 */
619 if (vendor == HPET_VENDID_NVIDIA && rev <= 0x01)
620 sc->allowed_irqs = 0x00000000;
621 /*
622 * ServerWorks HT1000 reported to have problems with IRQs >= 16.
623 * Lower IRQs are working, but allowed mask is not set correctly.
624 * Legacy_route mode works fine.
625 */
626 if (vendor == HPET_VENDID_SW && rev <= 0x01)
627 sc->allowed_irqs = 0x00000000;
628 /*
629 * Neither QEMU nor VirtualBox report supported IRQs correctly.
630 * The only way to use HPET there is to specify IRQs manually
631 * and/or use legacy_route. Legacy_route mode works on both.
632 */
633 if (vm_guest)
634 sc->allowed_irqs = 0x00000000;
635 /* Let user override. */
636 resource_int_value(device_get_name(dev), device_get_unit(dev),
637 "allowed_irqs", &sc->allowed_irqs);
638
639 /* Get how much per-CPU timers we should try to provide. */
640 sc->per_cpu = 1;
641 resource_int_value(device_get_name(dev), device_get_unit(dev),
642 "per_cpu", &sc->per_cpu);
643
644 num_msi = 0;
645 sc->useirq = 0;
646 /* Find IRQ vectors for all timers. */
647 cvectors = sc->allowed_irqs & 0xffff0000;
648 dvectors = sc->allowed_irqs & 0x0000ffff;
649 if (sc->legacy_route)
650 dvectors &= 0x0000fefe;
651 for (i = 0; i < num_timers; i++) {
652 t = &sc->t[i];
653 if (sc->legacy_route && i < 2)
654 t->irq = (i == 0) ? 0 : 8;
655 #ifdef DEV_APIC
656 else if (t->caps & HPET_TCAP_FSB_INT_DEL) {
657 if ((j = PCIB_ALLOC_MSIX(
658 device_get_parent(device_get_parent(dev)), dev,
659 &t->irq))) {
660 device_printf(dev,
661 "Can't allocate interrupt for t%d: %d\n",
662 i, j);
663 }
664 }
665 #endif
666 else if (dvectors & t->vectors) {
667 t->irq = ffs(dvectors & t->vectors) - 1;
668 dvectors &= ~(1 << t->irq);
669 }
670 if (t->irq >= 0) {
671 t->intr_rid = hpet_find_irq_rid(dev, t->irq, t->irq);
672 t->intr_res = bus_alloc_resource(dev, SYS_RES_IRQ,
673 &t->intr_rid, t->irq, t->irq, 1, RF_ACTIVE);
674 if (t->intr_res == NULL) {
675 t->irq = -1;
676 device_printf(dev,
677 "Can't map interrupt for t%d.\n", i);
678 } else if (bus_setup_intr(dev, t->intr_res,
679 INTR_TYPE_CLK, hpet_intr_single, NULL, t,
680 &t->intr_handle) != 0) {
681 t->irq = -1;
682 device_printf(dev,
683 "Can't setup interrupt for t%d.\n", i);
684 } else {
685 bus_describe_intr(dev, t->intr_res,
686 t->intr_handle, "t%d", i);
687 num_msi++;
688 }
689 }
690 if (t->irq < 0 && (cvectors & t->vectors) != 0) {
691 cvectors &= t->vectors;
692 sc->useirq |= (1 << i);
693 }
694 }
695 if (sc->legacy_route && sc->t[0].irq < 0 && sc->t[1].irq < 0)
696 sc->legacy_route = 0;
697 if (sc->legacy_route)
698 hpet_enable(sc);
699 /* Group timers for per-CPU operation. */
700 num_percpu_et = min(num_msi / mp_ncpus, sc->per_cpu);
701 num_percpu_t = num_percpu_et * mp_ncpus;
702 pcpu_master = 0;
703 cur_cpu = CPU_FIRST();
704 for (i = 0; i < num_timers; i++) {
705 t = &sc->t[i];
706 if (t->irq >= 0 && num_percpu_t > 0) {
707 if (cur_cpu == CPU_FIRST())
708 pcpu_master = i;
709 t->pcpu_cpu = cur_cpu;
710 t->pcpu_master = pcpu_master;
711 sc->t[pcpu_master].
712 pcpu_slaves[cur_cpu] = i;
713 bus_bind_intr(dev, t->intr_res, cur_cpu);
714 cur_cpu = CPU_NEXT(cur_cpu);
715 num_percpu_t--;
716 } else if (t->irq >= 0)
717 bus_bind_intr(dev, t->intr_res, CPU_FIRST());
718 }
719 bus_write_4(sc->mem_res, HPET_ISR, 0xffffffff);
720 sc->irq = -1;
721 /* If at least one timer needs legacy IRQ - set it up. */
722 if (sc->useirq) {
723 j = i = fls(cvectors) - 1;
724 while (j > 0 && (cvectors & (1 << (j - 1))) != 0)
725 j--;
726 sc->intr_rid = hpet_find_irq_rid(dev, j, i);
727 sc->intr_res = bus_alloc_resource(dev, SYS_RES_IRQ,
728 &sc->intr_rid, j, i, 1, RF_SHAREABLE | RF_ACTIVE);
729 if (sc->intr_res == NULL)
730 device_printf(dev, "Can't map interrupt.\n");
731 else if (bus_setup_intr(dev, sc->intr_res, INTR_TYPE_CLK,
732 hpet_intr, NULL, sc, &sc->intr_handle) != 0) {
733 device_printf(dev, "Can't setup interrupt.\n");
734 } else {
735 sc->irq = rman_get_start(sc->intr_res);
736 /* Bind IRQ to BSP to avoid live migration. */
737 bus_bind_intr(dev, sc->intr_res, CPU_FIRST());
738 }
739 }
740 /* Program and announce event timers. */
741 for (i = 0; i < num_timers; i++) {
742 t = &sc->t[i];
743 t->caps &= ~(HPET_TCNF_FSB_EN | HPET_TCNF_INT_ROUTE);
744 t->caps &= ~(HPET_TCNF_VAL_SET | HPET_TCNF_INT_ENB);
745 t->caps &= ~(HPET_TCNF_INT_TYPE);
746 t->caps |= HPET_TCNF_32MODE;
747 if (t->irq >= 0 && sc->legacy_route && i < 2) {
748 /* Legacy route doesn't need more configuration. */
749 } else
750 #ifdef DEV_APIC
751 if ((t->caps & HPET_TCAP_FSB_INT_DEL) && t->irq >= 0) {
752 uint64_t addr;
753 uint32_t data;
754
755 if (PCIB_MAP_MSI(
756 device_get_parent(device_get_parent(dev)), dev,
757 t->irq, &addr, &data) == 0) {
758 bus_write_4(sc->mem_res,
759 HPET_TIMER_FSB_ADDR(i), addr);
760 bus_write_4(sc->mem_res,
761 HPET_TIMER_FSB_VAL(i), data);
762 t->caps |= HPET_TCNF_FSB_EN;
763 } else
764 t->irq = -2;
765 } else
766 #endif
767 if (t->irq >= 0)
768 t->caps |= (t->irq << 9);
769 else if (sc->irq >= 0 && (t->vectors & (1 << sc->irq)))
770 t->caps |= (sc->irq << 9) | HPET_TCNF_INT_TYPE;
771 bus_write_4(sc->mem_res, HPET_TIMER_CAP_CNF(i), t->caps);
772 /* Skip event timers without set up IRQ. */
773 if (t->irq < 0 &&
774 (sc->irq < 0 || (t->vectors & (1 << sc->irq)) == 0))
775 continue;
776 /* Announce the reset. */
777 if (maxhpetet == 0)
778 t->et.et_name = "HPET";
779 else {
780 sprintf(t->name, "HPET%d", maxhpetet);
781 t->et.et_name = t->name;
782 }
783 t->et.et_flags = ET_FLAGS_PERIODIC | ET_FLAGS_ONESHOT;
784 t->et.et_quality = 450;
785 if (t->pcpu_master >= 0) {
786 t->et.et_flags |= ET_FLAGS_PERCPU;
787 t->et.et_quality += 100;
788 } else if (mp_ncpus >= 8)
789 t->et.et_quality -= 100;
790 if ((t->caps & HPET_TCAP_PER_INT) == 0)
791 t->et.et_quality -= 10;
792 t->et.et_frequency = sc->freq;
793 t->et.et_min_period =
794 ((uint64_t)(HPET_MIN_CYCLES * 2) << 32) / sc->freq;
795 t->et.et_max_period = (0xfffffffeLLU << 32) / sc->freq;
796 t->et.et_start = hpet_start;
797 t->et.et_stop = hpet_stop;
798 t->et.et_priv = &sc->t[i];
799 if (t->pcpu_master < 0 || t->pcpu_master == i) {
800 et_register(&t->et);
801 maxhpetet++;
802 }
803 }
804 acpi_GetInteger(sc->handle, "_UID", &sc->acpi_uid);
805
806 make_dev_args_init(&mda);
807 mda.mda_devsw = &hpet_cdevsw;
808 mda.mda_uid = UID_ROOT;
809 mda.mda_gid = GID_WHEEL;
810 mda.mda_mode = 0644;
811 mda.mda_si_drv1 = sc;
812 error = make_dev_s(&mda, &sc->pdev, "hpet%d", device_get_unit(dev));
813 if (error == 0) {
814 sc->mmap_allow = 1;
815 TUNABLE_INT_FETCH("hw.acpi.hpet.mmap_allow",
816 &sc->mmap_allow);
817 sc->mmap_allow_write = 0;
818 TUNABLE_INT_FETCH("hw.acpi.hpet.mmap_allow_write",
819 &sc->mmap_allow_write);
820 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
821 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
822 OID_AUTO, "mmap_allow",
823 CTLFLAG_RW, &sc->mmap_allow, 0,
824 "Allow userland to memory map HPET");
825 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
826 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
827 OID_AUTO, "mmap_allow_write",
828 CTLFLAG_RW, &sc->mmap_allow_write, 0,
829 "Allow userland write to the HPET register space");
830 } else {
831 device_printf(dev, "could not create /dev/hpet%d, error %d\n",
832 device_get_unit(dev), error);
833 }
834
835 return (0);
836 }
837
838 static int
839 hpet_detach(device_t dev)
840 {
841 ACPI_FUNCTION_TRACE((char *)(uintptr_t) __func__);
842
843 /* XXX Without a tc_remove() function, we can't detach. */
844 return (EBUSY);
845 }
846
847 static int
848 hpet_suspend(device_t dev)
849 {
850 // struct hpet_softc *sc;
851
852 /*
853 * Disable the timer during suspend. The timer will not lose
854 * its state in S1 or S2, but we are required to disable
855 * it.
856 */
857 // sc = device_get_softc(dev);
858 // hpet_disable(sc);
859
860 return (0);
861 }
862
863 static int
864 hpet_resume(device_t dev)
865 {
866 struct hpet_softc *sc;
867 struct hpet_timer *t;
868 int i;
869
870 /* Re-enable the timer after a resume to keep the clock advancing. */
871 sc = device_get_softc(dev);
872 hpet_enable(sc);
873 /* Restart event timers that were running on suspend. */
874 for (i = 0; i < sc->num_timers; i++) {
875 t = &sc->t[i];
876 #ifdef DEV_APIC
877 if (t->irq >= 0 && (sc->legacy_route == 0 || i >= 2)) {
878 uint64_t addr;
879 uint32_t data;
880
881 if (PCIB_MAP_MSI(
882 device_get_parent(device_get_parent(dev)), dev,
883 t->irq, &addr, &data) == 0) {
884 bus_write_4(sc->mem_res,
885 HPET_TIMER_FSB_ADDR(i), addr);
886 bus_write_4(sc->mem_res,
887 HPET_TIMER_FSB_VAL(i), data);
888 }
889 }
890 #endif
891 if (t->mode == TIMER_STOPPED)
892 continue;
893 t->next = bus_read_4(sc->mem_res, HPET_MAIN_COUNTER);
894 if (t->mode == TIMER_PERIODIC &&
895 (t->caps & HPET_TCAP_PER_INT) != 0) {
896 t->caps |= HPET_TCNF_TYPE;
897 t->next += t->div;
898 bus_write_4(sc->mem_res, HPET_TIMER_CAP_CNF(t->num),
899 t->caps | HPET_TCNF_VAL_SET);
900 bus_write_4(sc->mem_res, HPET_TIMER_COMPARATOR(t->num),
901 t->next);
902 bus_read_4(sc->mem_res, HPET_TIMER_COMPARATOR(t->num));
903 bus_write_4(sc->mem_res, HPET_TIMER_COMPARATOR(t->num),
904 t->div);
905 } else {
906 t->next += sc->freq / 1024;
907 bus_write_4(sc->mem_res, HPET_TIMER_COMPARATOR(t->num),
908 t->next);
909 }
910 bus_write_4(sc->mem_res, HPET_ISR, 1 << t->num);
911 bus_write_4(sc->mem_res, HPET_TIMER_CAP_CNF(t->num), t->caps);
912 }
913 return (0);
914 }
915
916 /* Print some basic latency/rate information to assist in debugging. */
917 static void
918 hpet_test(struct hpet_softc *sc)
919 {
920 int i;
921 uint32_t u1, u2;
922 struct bintime b0, b1, b2;
923 struct timespec ts;
924
925 binuptime(&b0);
926 binuptime(&b0);
927 binuptime(&b1);
928 u1 = bus_read_4(sc->mem_res, HPET_MAIN_COUNTER);
929 for (i = 1; i < 1000; i++)
930 u2 = bus_read_4(sc->mem_res, HPET_MAIN_COUNTER);
931 binuptime(&b2);
932 u2 = bus_read_4(sc->mem_res, HPET_MAIN_COUNTER);
933
934 bintime_sub(&b2, &b1);
935 bintime_sub(&b1, &b0);
936 bintime_sub(&b2, &b1);
937 bintime2timespec(&b2, &ts);
938
939 device_printf(sc->dev, "%ld.%09ld: %u ... %u = %u\n",
940 (long)ts.tv_sec, ts.tv_nsec, u1, u2, u2 - u1);
941
942 device_printf(sc->dev, "time per call: %ld ns\n", ts.tv_nsec / 1000);
943 }
944
945 #ifdef DEV_APIC
946 static int
947 hpet_remap_intr(device_t dev, device_t child, u_int irq)
948 {
949 struct hpet_softc *sc = device_get_softc(dev);
950 struct hpet_timer *t;
951 uint64_t addr;
952 uint32_t data;
953 int error, i;
954
955 for (i = 0; i < sc->num_timers; i++) {
956 t = &sc->t[i];
957 if (t->irq != irq)
958 continue;
959 error = PCIB_MAP_MSI(
960 device_get_parent(device_get_parent(dev)), dev,
961 irq, &addr, &data);
962 if (error)
963 return (error);
964 hpet_disable(sc); /* Stop timer to avoid interrupt loss. */
965 bus_write_4(sc->mem_res, HPET_TIMER_FSB_ADDR(i), addr);
966 bus_write_4(sc->mem_res, HPET_TIMER_FSB_VAL(i), data);
967 hpet_enable(sc);
968 return (0);
969 }
970 return (ENOENT);
971 }
972 #endif
973
974 static device_method_t hpet_methods[] = {
975 /* Device interface */
976 DEVMETHOD(device_identify, hpet_identify),
977 DEVMETHOD(device_probe, hpet_probe),
978 DEVMETHOD(device_attach, hpet_attach),
979 DEVMETHOD(device_detach, hpet_detach),
980 DEVMETHOD(device_suspend, hpet_suspend),
981 DEVMETHOD(device_resume, hpet_resume),
982
983 #ifdef DEV_APIC
984 DEVMETHOD(bus_remap_intr, hpet_remap_intr),
985 #endif
986
987 DEVMETHOD_END
988 };
989
990 static driver_t hpet_driver = {
991 "hpet",
992 hpet_methods,
993 sizeof(struct hpet_softc),
994 };
995
996 DRIVER_MODULE(hpet, acpi, hpet_driver, hpet_devclass, 0, 0);
997 MODULE_DEPEND(hpet, acpi, 1, 1, 1);
Cache object: 968a3909a2a4eb0aa1ec2d8c622c3517
|