1 /*-
2 * Copyright (c) 2001 Mitsuru IWASAKI
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29
30 #include <sys/param.h>
31 #include <sys/bus.h>
32 #include <sys/condvar.h>
33 #include <sys/conf.h>
34 #include <sys/fcntl.h>
35 #include <sys/kernel.h>
36 #include <sys/malloc.h>
37 #include <sys/poll.h>
38 #include <sys/sysctl.h>
39 #include <sys/uio.h>
40 #include <vm/vm.h>
41 #include <vm/pmap.h>
42
43 #include <contrib/dev/acpica/acpi.h>
44 #include <contrib/dev/acpica/actables.h>
45
46 #include <dev/acpica/acpivar.h>
47 #include <dev/acpica/acpiio.h>
48
49 /*
50 * APM driver emulation
51 */
52
53 #include <machine/apm_bios.h>
54 #include <machine/pc/bios.h>
55
56 #include <i386/bios/apm.h>
57
58 SYSCTL_DECL(_debug_acpi);
59
60 uint32_t acpi_resume_beep;
61 TUNABLE_INT("debug.acpi.resume_beep", &acpi_resume_beep);
62 SYSCTL_UINT(_debug_acpi, OID_AUTO, resume_beep, CTLFLAG_RW, &acpi_resume_beep,
63 0, "Beep the PC speaker when resuming");
64 uint32_t acpi_reset_video;
65 TUNABLE_INT("hw.acpi.reset_video", &acpi_reset_video);
66
67 static int intr_model = ACPI_INTR_PIC;
68 static int apm_active;
69 static struct clonedevs *apm_clones;
70
71 MALLOC_DEFINE(M_APMDEV, "apmdev", "APM device emulation");
72
73 static d_open_t apmopen;
74 static d_close_t apmclose;
75 static d_write_t apmwrite;
76 static d_ioctl_t apmioctl;
77 static d_poll_t apmpoll;
78 static d_kqfilter_t apmkqfilter;
79 static void apmreadfiltdetach(struct knote *kn);
80 static int apmreadfilt(struct knote *kn, long hint);
81 static struct filterops apm_readfiltops =
82 { 1, NULL, apmreadfiltdetach, apmreadfilt };
83
84 static struct cdevsw apm_cdevsw = {
85 .d_version = D_VERSION,
86 .d_flags = D_TRACKCLOSE,
87 .d_open = apmopen,
88 .d_close = apmclose,
89 .d_write = apmwrite,
90 .d_ioctl = apmioctl,
91 .d_poll = apmpoll,
92 .d_name = "apm",
93 .d_kqfilter = apmkqfilter
94 };
95
96 static int
97 acpi_capm_convert_battstate(struct acpi_battinfo *battp)
98 {
99 int state;
100
101 state = APM_UNKNOWN;
102
103 if (battp->state & ACPI_BATT_STAT_DISCHARG) {
104 if (battp->cap >= 50)
105 state = 0; /* high */
106 else
107 state = 1; /* low */
108 }
109 if (battp->state & ACPI_BATT_STAT_CRITICAL)
110 state = 2; /* critical */
111 if (battp->state & ACPI_BATT_STAT_CHARGING)
112 state = 3; /* charging */
113
114 /* If still unknown, determine it based on the battery capacity. */
115 if (state == APM_UNKNOWN) {
116 if (battp->cap >= 50)
117 state = 0; /* high */
118 else
119 state = 1; /* low */
120 }
121
122 return (state);
123 }
124
125 static int
126 acpi_capm_convert_battflags(struct acpi_battinfo *battp)
127 {
128 int flags;
129
130 flags = 0;
131
132 if (battp->cap >= 50)
133 flags |= APM_BATT_HIGH;
134 else {
135 if (battp->state & ACPI_BATT_STAT_CRITICAL)
136 flags |= APM_BATT_CRITICAL;
137 else
138 flags |= APM_BATT_LOW;
139 }
140 if (battp->state & ACPI_BATT_STAT_CHARGING)
141 flags |= APM_BATT_CHARGING;
142 if (battp->state == ACPI_BATT_STAT_NOT_PRESENT)
143 flags = APM_BATT_NOT_PRESENT;
144
145 return (flags);
146 }
147
148 static int
149 acpi_capm_get_info(apm_info_t aip)
150 {
151 int acline;
152 struct acpi_battinfo batt;
153
154 aip->ai_infoversion = 1;
155 aip->ai_major = 1;
156 aip->ai_minor = 2;
157 aip->ai_status = apm_active;
158 aip->ai_capabilities= 0xff00; /* unknown */
159
160 if (acpi_acad_get_acline(&acline))
161 aip->ai_acline = APM_UNKNOWN; /* unknown */
162 else
163 aip->ai_acline = acline; /* on/off */
164
165 if (acpi_battery_get_battinfo(NULL, &batt) != 0) {
166 aip->ai_batt_stat = APM_UNKNOWN;
167 aip->ai_batt_life = APM_UNKNOWN;
168 aip->ai_batt_time = -1; /* unknown */
169 aip->ai_batteries = ~0U; /* unknown */
170 } else {
171 aip->ai_batt_stat = acpi_capm_convert_battstate(&batt);
172 aip->ai_batt_life = batt.cap;
173 aip->ai_batt_time = (batt.min == -1) ? -1 : batt.min * 60;
174 aip->ai_batteries = acpi_battery_get_units();
175 }
176
177 return (0);
178 }
179
180 static int
181 acpi_capm_get_pwstatus(apm_pwstatus_t app)
182 {
183 device_t dev;
184 int acline, unit, error;
185 struct acpi_battinfo batt;
186
187 if (app->ap_device != PMDV_ALLDEV &&
188 (app->ap_device < PMDV_BATT0 || app->ap_device > PMDV_BATT_ALL))
189 return (1);
190
191 if (app->ap_device == PMDV_ALLDEV)
192 error = acpi_battery_get_battinfo(NULL, &batt);
193 else {
194 unit = app->ap_device - PMDV_BATT0;
195 dev = devclass_get_device(devclass_find("battery"), unit);
196 if (dev != NULL)
197 error = acpi_battery_get_battinfo(dev, &batt);
198 else
199 error = ENXIO;
200 }
201 if (error)
202 return (1);
203
204 app->ap_batt_stat = acpi_capm_convert_battstate(&batt);
205 app->ap_batt_flag = acpi_capm_convert_battflags(&batt);
206 app->ap_batt_life = batt.cap;
207 app->ap_batt_time = (batt.min == -1) ? -1 : batt.min * 60;
208
209 if (acpi_acad_get_acline(&acline))
210 app->ap_acline = APM_UNKNOWN;
211 else
212 app->ap_acline = acline; /* on/off */
213
214 return (0);
215 }
216
217 /* Create single-use devices for /dev/apm and /dev/apmctl. */
218 static void
219 apm_clone(void *arg, struct ucred *cred, char *name, int namelen,
220 struct cdev **dev)
221 {
222 int ctl_dev, unit;
223
224 if (*dev != NULL)
225 return;
226 if (strcmp(name, "apmctl") == 0)
227 ctl_dev = TRUE;
228 else if (strcmp(name, "apm") == 0)
229 ctl_dev = FALSE;
230 else
231 return;
232
233 /* Always create a new device and unit number. */
234 unit = -1;
235 if (clone_create(&apm_clones, &apm_cdevsw, &unit, dev, 0)) {
236 if (ctl_dev) {
237 *dev = make_dev(&apm_cdevsw, unit2minor(unit),
238 UID_ROOT, GID_OPERATOR, 0660, "apmctl%d", unit);
239 } else {
240 *dev = make_dev(&apm_cdevsw, unit2minor(unit),
241 UID_ROOT, GID_OPERATOR, 0664, "apm%d", unit);
242 }
243 if (*dev != NULL) {
244 dev_ref(*dev);
245 (*dev)->si_flags |= SI_CHEAPCLONE;
246 }
247 }
248 }
249
250 /* Create a struct for tracking per-device suspend notification. */
251 static struct apm_clone_data *
252 apm_create_clone(struct cdev *dev, struct acpi_softc *acpi_sc)
253 {
254 struct apm_clone_data *clone;
255
256 clone = malloc(sizeof(*clone), M_APMDEV, M_WAITOK);
257 clone->cdev = dev;
258 clone->acpi_sc = acpi_sc;
259 clone->notify_status = APM_EV_NONE;
260 bzero(&clone->sel_read, sizeof(clone->sel_read));
261 knlist_init_mtx(&clone->sel_read.si_note, &acpi_mutex);
262
263 /*
264 * The acpi device is always managed by devd(8) and is considered
265 * writable (i.e., ack is required to allow suspend to proceed.)
266 */
267 if (strcmp("acpi", devtoname(dev)) == 0)
268 clone->flags = ACPI_EVF_DEVD | ACPI_EVF_WRITE;
269 else
270 clone->flags = ACPI_EVF_NONE;
271
272 ACPI_LOCK(acpi);
273 STAILQ_INSERT_TAIL(&acpi_sc->apm_cdevs, clone, entries);
274 ACPI_UNLOCK(acpi);
275 return (clone);
276 }
277
278 static int
279 apmopen(struct cdev *dev, int flag, int fmt, d_thread_t *td)
280 {
281 struct acpi_softc *acpi_sc;
282 struct apm_clone_data *clone;
283
284 acpi_sc = devclass_get_softc(devclass_find("acpi"), 0);
285 clone = apm_create_clone(dev, acpi_sc);
286 dev->si_drv1 = clone;
287
288 /* If the device is opened for write, record that. */
289 if ((flag & FWRITE) != 0)
290 clone->flags |= ACPI_EVF_WRITE;
291
292 return (0);
293 }
294
295 static int
296 apmclose(struct cdev *dev, int flag, int fmt, d_thread_t *td)
297 {
298 struct apm_clone_data *clone;
299 struct acpi_softc *acpi_sc;
300
301 clone = dev->si_drv1;
302 acpi_sc = clone->acpi_sc;
303
304 /* We are about to lose a reference so check if suspend should occur */
305 if (acpi_sc->acpi_next_sstate != 0 &&
306 clone->notify_status != APM_EV_ACKED)
307 acpi_AckSleepState(clone, 0);
308
309 /* Remove this clone's data from the list and free it. */
310 ACPI_LOCK(acpi);
311 STAILQ_REMOVE(&acpi_sc->apm_cdevs, clone, apm_clone_data, entries);
312 knlist_destroy(&clone->sel_read.si_note);
313 ACPI_UNLOCK(acpi);
314 free(clone, M_APMDEV);
315 destroy_dev_sched(dev);
316 return (0);
317 }
318
319 static int
320 apmioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, d_thread_t *td)
321 {
322 int error;
323 struct apm_clone_data *clone;
324 struct acpi_softc *acpi_sc;
325 struct apm_info info;
326 struct apm_event_info *ev_info;
327 apm_info_old_t aiop;
328
329 error = 0;
330 clone = dev->si_drv1;
331 acpi_sc = clone->acpi_sc;
332
333 switch (cmd) {
334 case APMIO_SUSPEND:
335 if ((flag & FWRITE) == 0)
336 return (EPERM);
337 if (acpi_sc->acpi_next_sstate == 0) {
338 if (acpi_sc->acpi_suspend_sx != ACPI_STATE_S5) {
339 error = acpi_ReqSleepState(acpi_sc,
340 acpi_sc->acpi_suspend_sx);
341 } else {
342 printf(
343 "power off via apm suspend not supported\n");
344 error = ENXIO;
345 }
346 } else
347 error = acpi_AckSleepState(clone, 0);
348 break;
349 case APMIO_STANDBY:
350 if ((flag & FWRITE) == 0)
351 return (EPERM);
352 if (acpi_sc->acpi_next_sstate == 0) {
353 if (acpi_sc->acpi_standby_sx != ACPI_STATE_S5) {
354 error = acpi_ReqSleepState(acpi_sc,
355 acpi_sc->acpi_standby_sx);
356 } else {
357 printf(
358 "power off via apm standby not supported\n");
359 error = ENXIO;
360 }
361 } else
362 error = acpi_AckSleepState(clone, 0);
363 break;
364 case APMIO_NEXTEVENT:
365 printf("apm nextevent start\n");
366 ACPI_LOCK(acpi);
367 if (acpi_sc->acpi_next_sstate != 0 && clone->notify_status ==
368 APM_EV_NONE) {
369 ev_info = (struct apm_event_info *)addr;
370 if (acpi_sc->acpi_next_sstate <= ACPI_STATE_S3)
371 ev_info->type = PMEV_STANDBYREQ;
372 else
373 ev_info->type = PMEV_SUSPENDREQ;
374 ev_info->index = 0;
375 clone->notify_status = APM_EV_NOTIFIED;
376 printf("apm event returning %d\n", ev_info->type);
377 } else
378 error = EAGAIN;
379 ACPI_UNLOCK(acpi);
380 break;
381 case APMIO_GETINFO_OLD:
382 if (acpi_capm_get_info(&info))
383 error = ENXIO;
384 aiop = (apm_info_old_t)addr;
385 aiop->ai_major = info.ai_major;
386 aiop->ai_minor = info.ai_minor;
387 aiop->ai_acline = info.ai_acline;
388 aiop->ai_batt_stat = info.ai_batt_stat;
389 aiop->ai_batt_life = info.ai_batt_life;
390 aiop->ai_status = info.ai_status;
391 break;
392 case APMIO_GETINFO:
393 if (acpi_capm_get_info((apm_info_t)addr))
394 error = ENXIO;
395 break;
396 case APMIO_GETPWSTATUS:
397 if (acpi_capm_get_pwstatus((apm_pwstatus_t)addr))
398 error = ENXIO;
399 break;
400 case APMIO_ENABLE:
401 if ((flag & FWRITE) == 0)
402 return (EPERM);
403 apm_active = 1;
404 break;
405 case APMIO_DISABLE:
406 if ((flag & FWRITE) == 0)
407 return (EPERM);
408 apm_active = 0;
409 break;
410 case APMIO_HALTCPU:
411 break;
412 case APMIO_NOTHALTCPU:
413 break;
414 case APMIO_DISPLAY:
415 if ((flag & FWRITE) == 0)
416 return (EPERM);
417 break;
418 case APMIO_BIOS:
419 if ((flag & FWRITE) == 0)
420 return (EPERM);
421 bzero(addr, sizeof(struct apm_bios_arg));
422 break;
423 default:
424 error = EINVAL;
425 break;
426 }
427
428 return (error);
429 }
430
431 static int
432 apmwrite(struct cdev *dev, struct uio *uio, int ioflag)
433 {
434 return (uio->uio_resid);
435 }
436
437 static int
438 apmpoll(struct cdev *dev, int events, d_thread_t *td)
439 {
440 struct apm_clone_data *clone;
441 int revents;
442
443 revents = 0;
444 ACPI_LOCK(acpi);
445 clone = dev->si_drv1;
446 if (clone->acpi_sc->acpi_next_sstate)
447 revents |= events & (POLLIN | POLLRDNORM);
448 else
449 selrecord(td, &clone->sel_read);
450 ACPI_UNLOCK(acpi);
451 return (revents);
452 }
453
454 static int
455 apmkqfilter(struct cdev *dev, struct knote *kn)
456 {
457 struct apm_clone_data *clone;
458
459 ACPI_LOCK(acpi);
460 clone = dev->si_drv1;
461 kn->kn_hook = clone;
462 kn->kn_fop = &apm_readfiltops;
463 knlist_add(&clone->sel_read.si_note, kn, 0);
464 ACPI_UNLOCK(acpi);
465 return (0);
466 }
467
468 static void
469 apmreadfiltdetach(struct knote *kn)
470 {
471 struct apm_clone_data *clone;
472
473 ACPI_LOCK(acpi);
474 clone = kn->kn_hook;
475 knlist_remove(&clone->sel_read.si_note, kn, 0);
476 ACPI_UNLOCK(acpi);
477 }
478
479 static int
480 apmreadfilt(struct knote *kn, long hint)
481 {
482 struct apm_clone_data *clone;
483 int sleeping;
484
485 ACPI_LOCK(acpi);
486 clone = kn->kn_hook;
487 sleeping = clone->acpi_sc->acpi_next_sstate ? 1 : 0;
488 ACPI_UNLOCK(acpi);
489 return (sleeping);
490 }
491
492 int
493 acpi_machdep_init(device_t dev)
494 {
495 struct acpi_softc *acpi_sc;
496
497 acpi_sc = devclass_get_softc(devclass_find("acpi"), 0);
498
499 /* Create a clone for /dev/acpi also. */
500 STAILQ_INIT(&acpi_sc->apm_cdevs);
501 acpi_sc->acpi_clone = apm_create_clone(acpi_sc->acpi_dev_t, acpi_sc);
502 clone_setup(&apm_clones);
503 EVENTHANDLER_REGISTER(dev_clone, apm_clone, 0, 1000);
504 acpi_install_wakeup_handler(acpi_sc);
505
506 if (intr_model == ACPI_INTR_PIC)
507 BUS_CONFIG_INTR(dev, AcpiGbl_FADT.SciInterrupt,
508 INTR_TRIGGER_LEVEL, INTR_POLARITY_LOW);
509 else
510 acpi_SetIntrModel(intr_model);
511
512 SYSCTL_ADD_UINT(&acpi_sc->acpi_sysctl_ctx,
513 SYSCTL_CHILDREN(acpi_sc->acpi_sysctl_tree), OID_AUTO,
514 "reset_video", CTLFLAG_RW, &acpi_reset_video, 0,
515 "Call the VESA reset BIOS vector on the resume path");
516
517 return (0);
518 }
519
520 void
521 acpi_SetDefaultIntrModel(int model)
522 {
523
524 intr_model = model;
525 }
526
527 /* Check BIOS date. If 1998 or older, disable ACPI. */
528 int
529 acpi_machdep_quirks(int *quirks)
530 {
531 char *va;
532 int year;
533
534 /* BIOS address 0xffff5 contains the date in the format mm/dd/yy. */
535 va = pmap_mapbios(0xffff0, 16);
536 sscanf(va + 11, "%2d", &year);
537 pmap_unmapbios((vm_offset_t)va, 16);
538
539 /*
540 * Date must be >= 1/1/1999 or we don't trust ACPI. Note that this
541 * check must be changed by my 114th birthday.
542 */
543 if (year > 90 && year < 99)
544 *quirks = ACPI_Q_BROKEN;
545
546 return (0);
547 }
548
549 void
550 acpi_cpu_c1()
551 {
552 __asm __volatile("sti; hlt");
553 }
554
555 /*
556 * Support for mapping ACPI tables during early boot. This abuses the
557 * crashdump map because the kernel cannot allocate KVA in
558 * pmap_mapbios() when this is used. This makes the following
559 * assumptions about how we use this KVA: pages 0 and 1 are used to
560 * map in the header of each table found via the RSDT or XSDT and
561 * pages 2 to n are used to map in the RSDT or XSDT. This has to use
562 * 2 pages for the table headers in case a header spans a page
563 * boundary.
564 *
565 * XXX: We don't ensure the table fits in the available address space
566 * in the crashdump map.
567 */
568
569 /*
570 * Map some memory using the crashdump map. 'offset' is an offset in
571 * pages into the crashdump map to use for the start of the mapping.
572 */
573 static void *
574 table_map(vm_paddr_t pa, int offset, vm_offset_t length)
575 {
576 vm_offset_t va, off;
577 void *data;
578
579 off = pa & PAGE_MASK;
580 length = roundup(length + off, PAGE_SIZE);
581 pa = pa & PG_FRAME;
582 va = (vm_offset_t)pmap_kenter_temporary(pa, offset) +
583 (offset * PAGE_SIZE);
584 data = (void *)(va + off);
585 length -= PAGE_SIZE;
586 while (length > 0) {
587 va += PAGE_SIZE;
588 pa += PAGE_SIZE;
589 length -= PAGE_SIZE;
590 pmap_kenter(va, pa);
591 invlpg(va);
592 }
593 return (data);
594 }
595
596 /* Unmap memory previously mapped with table_map(). */
597 static void
598 table_unmap(void *data, vm_offset_t length)
599 {
600 vm_offset_t va, off;
601
602 va = (vm_offset_t)data;
603 off = va & PAGE_MASK;
604 length = roundup(length + off, PAGE_SIZE);
605 va &= ~PAGE_MASK;
606 while (length > 0) {
607 pmap_kremove(va);
608 invlpg(va);
609 va += PAGE_SIZE;
610 length -= PAGE_SIZE;
611 }
612 }
613
614 /*
615 * Map a table at a given offset into the crashdump map. It first
616 * maps the header to determine the table length and then maps the
617 * entire table.
618 */
619 static void *
620 map_table(vm_paddr_t pa, int offset, const char *sig)
621 {
622 ACPI_TABLE_HEADER *header;
623 vm_offset_t length;
624 void *table;
625
626 header = table_map(pa, offset, sizeof(ACPI_TABLE_HEADER));
627 if (strncmp(header->Signature, sig, ACPI_NAME_SIZE) != 0) {
628 table_unmap(header, sizeof(ACPI_TABLE_HEADER));
629 return (NULL);
630 }
631 length = header->Length;
632 table_unmap(header, sizeof(ACPI_TABLE_HEADER));
633 table = table_map(pa, offset, length);
634 if (ACPI_FAILURE(AcpiTbChecksum(table, length))) {
635 if (bootverbose)
636 printf("ACPI: Failed checksum for table %s\n", sig);
637 #if (ACPI_CHECKSUM_ABORT)
638 table_unmap(table, length);
639 return (NULL);
640 #endif
641 }
642 return (table);
643 }
644
645 /*
646 * See if a given ACPI table is the requested table. Returns the
647 * length of the able if it matches or zero on failure.
648 */
649 static int
650 probe_table(vm_paddr_t address, const char *sig)
651 {
652 ACPI_TABLE_HEADER *table;
653
654 table = table_map(address, 0, sizeof(ACPI_TABLE_HEADER));
655 if (table == NULL) {
656 if (bootverbose)
657 printf("ACPI: Failed to map table at 0x%jx\n",
658 (uintmax_t)address);
659 return (0);
660 }
661 if (bootverbose)
662 printf("Table '%.4s' at 0x%jx\n", table->Signature,
663 (uintmax_t)address);
664
665 if (strncmp(table->Signature, sig, ACPI_NAME_SIZE) != 0) {
666 table_unmap(table, sizeof(ACPI_TABLE_HEADER));
667 return (0);
668 }
669 table_unmap(table, sizeof(ACPI_TABLE_HEADER));
670 return (1);
671 }
672
673 /*
674 * Try to map a table at a given physical address previously returned
675 * by acpi_find_table().
676 */
677 void *
678 acpi_map_table(vm_paddr_t pa, const char *sig)
679 {
680
681 return (map_table(pa, 0, sig));
682 }
683
684 /* Unmap a table previously mapped via acpi_map_table(). */
685 void
686 acpi_unmap_table(void *table)
687 {
688 ACPI_TABLE_HEADER *header;
689
690 header = (ACPI_TABLE_HEADER *)table;
691 table_unmap(table, header->Length);
692 }
693
694 /*
695 * Return the physical address of the requested table or zero if one
696 * is not found.
697 */
698 vm_paddr_t
699 acpi_find_table(const char *sig)
700 {
701 ACPI_PHYSICAL_ADDRESS rsdp_ptr;
702 ACPI_TABLE_RSDP *rsdp;
703 ACPI_TABLE_RSDT *rsdt;
704 ACPI_TABLE_XSDT *xsdt;
705 ACPI_TABLE_HEADER *table;
706 vm_paddr_t addr;
707 int i, count;
708
709 if (resource_disabled("acpi", 0))
710 return (0);
711
712 /*
713 * Map in the RSDP. Since ACPI uses AcpiOsMapMemory() which in turn
714 * calls pmap_mapbios() to find the RSDP, we assume that we can use
715 * pmap_mapbios() to map the RSDP.
716 */
717 if ((rsdp_ptr = AcpiOsGetRootPointer()) == 0)
718 return (0);
719 rsdp = pmap_mapbios(rsdp_ptr, sizeof(ACPI_TABLE_RSDP));
720 if (rsdp == NULL) {
721 if (bootverbose)
722 printf("ACPI: Failed to map RSDP\n");
723 return (0);
724 }
725
726 /*
727 * For ACPI >= 2.0, use the XSDT if it is available.
728 * Otherwise, use the RSDT. We map the XSDT or RSDT at page 2
729 * in the crashdump area. Pages 0 and 1 are used to map in the
730 * headers of candidate ACPI tables.
731 */
732 addr = 0;
733 if (rsdp->Revision >= 2 && rsdp->XsdtPhysicalAddress != 0) {
734 /*
735 * AcpiOsGetRootPointer only verifies the checksum for
736 * the version 1.0 portion of the RSDP. Version 2.0 has
737 * an additional checksum that we verify first.
738 */
739 if (AcpiTbChecksum((UINT8 *)rsdp, ACPI_RSDP_XCHECKSUM_LENGTH)) {
740 if (bootverbose)
741 printf("ACPI: RSDP failed extended checksum\n");
742 return (0);
743 }
744 xsdt = map_table(rsdp->XsdtPhysicalAddress, 2, ACPI_SIG_XSDT);
745 if (xsdt == NULL) {
746 if (bootverbose)
747 printf("ACPI: Failed to map XSDT\n");
748 return (0);
749 }
750 count = (xsdt->Header.Length - sizeof(ACPI_TABLE_HEADER)) /
751 sizeof(UINT64);
752 for (i = 0; i < count; i++)
753 if (probe_table(xsdt->TableOffsetEntry[i], sig)) {
754 addr = xsdt->TableOffsetEntry[i];
755 break;
756 }
757 acpi_unmap_table(xsdt);
758 } else {
759 rsdt = map_table(rsdp->RsdtPhysicalAddress, 2, ACPI_SIG_RSDT);
760 if (rsdt == NULL) {
761 if (bootverbose)
762 printf("ACPI: Failed to map RSDT\n");
763 return (0);
764 }
765 count = (rsdt->Header.Length - sizeof(ACPI_TABLE_HEADER)) /
766 sizeof(UINT32);
767 for (i = 0; i < count; i++)
768 if (probe_table(rsdt->TableOffsetEntry[i], sig)) {
769 addr = rsdt->TableOffsetEntry[i];
770 break;
771 }
772 acpi_unmap_table(rsdt);
773 }
774 pmap_unmapbios((vm_offset_t)rsdp, sizeof(ACPI_TABLE_RSDP));
775 if (addr == 0) {
776 if (bootverbose)
777 printf("ACPI: No %s table found\n", sig);
778 return (0);
779 }
780 if (bootverbose)
781 printf("%s: Found table at 0x%jx\n", sig, (uintmax_t)addr);
782
783 /*
784 * Verify that we can map the full table and that its checksum is
785 * correct, etc.
786 */
787 table = map_table(addr, 0, sig);
788 if (table == NULL)
789 return (0);
790 acpi_unmap_table(table);
791
792 return (addr);
793 }
Cache object: 493cbbd30ab376e3c77a30b75735418f
|