The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/acpica/acpi.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 2000 Takanori Watanabe <takawata@jp.freebsd.org>
    3  * Copyright (c) 2000 Mitsuru IWASAKI <iwasaki@jp.freebsd.org>
    4  * Copyright (c) 2000, 2001 Michael Smith
    5  * Copyright (c) 2000 BSDi
    6  * All rights reserved.
    7  *
    8  * Redistribution and use in source and binary forms, with or without
    9  * modification, are permitted provided that the following conditions
   10  * are met:
   11  * 1. Redistributions of source code must retain the above copyright
   12  *    notice, this list of conditions and the following disclaimer.
   13  * 2. Redistributions in binary form must reproduce the above copyright
   14  *    notice, this list of conditions and the following disclaimer in the
   15  *    documentation and/or other materials provided with the distribution.
   16  *
   17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   27  * SUCH DAMAGE.
   28  */
   29 
   30 #include <sys/cdefs.h>
   31 __FBSDID("$FreeBSD$");
   32 
   33 #include "opt_acpi.h"
   34 #include <sys/param.h>
   35 #include <sys/kernel.h>
   36 #include <sys/proc.h>
   37 #include <sys/fcntl.h>
   38 #include <sys/malloc.h>
   39 #include <sys/module.h>
   40 #include <sys/bus.h>
   41 #include <sys/conf.h>
   42 #include <sys/ioccom.h>
   43 #include <sys/reboot.h>
   44 #include <sys/sysctl.h>
   45 #include <sys/ctype.h>
   46 #include <sys/linker.h>
   47 #include <sys/power.h>
   48 #include <sys/sbuf.h>
   49 #include <sys/sched.h>
   50 #include <sys/smp.h>
   51 #include <sys/timetc.h>
   52 
   53 #if defined(__i386__) || defined(__amd64__)
   54 #include <machine/pci_cfgreg.h>
   55 #endif
   56 #include <machine/resource.h>
   57 #include <machine/bus.h>
   58 #include <sys/rman.h>
   59 #include <isa/isavar.h>
   60 #include <isa/pnpvar.h>
   61 
   62 #include <contrib/dev/acpica/include/acpi.h>
   63 #include <contrib/dev/acpica/include/accommon.h>
   64 #include <contrib/dev/acpica/include/acnamesp.h>
   65 
   66 #include <dev/acpica/acpivar.h>
   67 #include <dev/acpica/acpiio.h>
   68 
   69 #include "pci_if.h"
   70 #include <dev/pci/pcivar.h>
   71 #include <dev/pci/pci_private.h>
   72 
   73 #include <vm/vm_param.h>
   74 
   75 MALLOC_DEFINE(M_ACPIDEV, "acpidev", "ACPI devices");
   76 
   77 /* Hooks for the ACPI CA debugging infrastructure */
   78 #define _COMPONENT      ACPI_BUS
   79 ACPI_MODULE_NAME("ACPI")
   80 
   81 static d_open_t         acpiopen;
   82 static d_close_t        acpiclose;
   83 static d_ioctl_t        acpiioctl;
   84 
   85 static struct cdevsw acpi_cdevsw = {
   86         .d_version =    D_VERSION,
   87         .d_open =       acpiopen,
   88         .d_close =      acpiclose,
   89         .d_ioctl =      acpiioctl,
   90         .d_name =       "acpi",
   91 };
   92 
   93 struct acpi_interface {
   94         ACPI_STRING     *data;
   95         int             num;
   96 };
   97 
   98 /* Global mutex for locking access to the ACPI subsystem. */
   99 struct mtx      acpi_mutex;
  100 
  101 /* Bitmap of device quirks. */
  102 int             acpi_quirks;
  103 
  104 /* Supported sleep states. */
  105 static BOOLEAN  acpi_sleep_states[ACPI_S_STATE_COUNT];
  106 
  107 static int      acpi_modevent(struct module *mod, int event, void *junk);
  108 static int      acpi_probe(device_t dev);
  109 static int      acpi_attach(device_t dev);
  110 static int      acpi_suspend(device_t dev);
  111 static int      acpi_resume(device_t dev);
  112 static int      acpi_shutdown(device_t dev);
  113 static device_t acpi_add_child(device_t bus, u_int order, const char *name,
  114                         int unit);
  115 static int      acpi_print_child(device_t bus, device_t child);
  116 static void     acpi_probe_nomatch(device_t bus, device_t child);
  117 static void     acpi_driver_added(device_t dev, driver_t *driver);
  118 static int      acpi_read_ivar(device_t dev, device_t child, int index,
  119                         uintptr_t *result);
  120 static int      acpi_write_ivar(device_t dev, device_t child, int index,
  121                         uintptr_t value);
  122 static struct resource_list *acpi_get_rlist(device_t dev, device_t child);
  123 static int      acpi_sysres_alloc(device_t dev);
  124 static struct resource *acpi_alloc_resource(device_t bus, device_t child,
  125                         int type, int *rid, u_long start, u_long end,
  126                         u_long count, u_int flags);
  127 static int      acpi_adjust_resource(device_t bus, device_t child, int type,
  128                         struct resource *r, u_long start, u_long end);
  129 static int      acpi_release_resource(device_t bus, device_t child, int type,
  130                         int rid, struct resource *r);
  131 static void     acpi_delete_resource(device_t bus, device_t child, int type,
  132                     int rid);
  133 static uint32_t acpi_isa_get_logicalid(device_t dev);
  134 static int      acpi_isa_get_compatid(device_t dev, uint32_t *cids, int count);
  135 static char     *acpi_device_id_probe(device_t bus, device_t dev, char **ids);
  136 static ACPI_STATUS acpi_device_eval_obj(device_t bus, device_t dev,
  137                     ACPI_STRING pathname, ACPI_OBJECT_LIST *parameters,
  138                     ACPI_BUFFER *ret);
  139 static int      acpi_device_pwr_for_sleep(device_t bus, device_t dev,
  140                     int *dstate);
  141 static ACPI_STATUS acpi_device_scan_cb(ACPI_HANDLE h, UINT32 level,
  142                     void *context, void **retval);
  143 static ACPI_STATUS acpi_device_scan_children(device_t bus, device_t dev,
  144                     int max_depth, acpi_scan_cb_t user_fn, void *arg);
  145 static int      acpi_set_powerstate_method(device_t bus, device_t child,
  146                     int state);
  147 static int      acpi_isa_pnp_probe(device_t bus, device_t child,
  148                     struct isa_pnp_id *ids);
  149 static void     acpi_probe_children(device_t bus);
  150 static void     acpi_probe_order(ACPI_HANDLE handle, int *order);
  151 static ACPI_STATUS acpi_probe_child(ACPI_HANDLE handle, UINT32 level,
  152                     void *context, void **status);
  153 static void     acpi_sleep_enable(void *arg);
  154 static ACPI_STATUS acpi_sleep_disable(struct acpi_softc *sc);
  155 static ACPI_STATUS acpi_EnterSleepState(struct acpi_softc *sc, int state);
  156 static void     acpi_shutdown_final(void *arg, int howto);
  157 static void     acpi_enable_fixed_events(struct acpi_softc *sc);
  158 static int      acpi_wake_sleep_prep(ACPI_HANDLE handle, int sstate);
  159 static int      acpi_wake_run_prep(ACPI_HANDLE handle, int sstate);
  160 static int      acpi_wake_prep_walk(int sstate);
  161 static int      acpi_wake_sysctl_walk(device_t dev);
  162 static int      acpi_wake_set_sysctl(SYSCTL_HANDLER_ARGS);
  163 static void     acpi_system_eventhandler_sleep(void *arg, int state);
  164 static void     acpi_system_eventhandler_wakeup(void *arg, int state);
  165 static int      acpi_sname2sstate(const char *sname);
  166 static const char *acpi_sstate2sname(int sstate);
  167 static int      acpi_supported_sleep_state_sysctl(SYSCTL_HANDLER_ARGS);
  168 static int      acpi_sleep_state_sysctl(SYSCTL_HANDLER_ARGS);
  169 static int      acpi_debug_objects_sysctl(SYSCTL_HANDLER_ARGS);
  170 static int      acpi_pm_func(u_long cmd, void *arg, ...);
  171 static int      acpi_child_location_str_method(device_t acdev, device_t child,
  172                                                char *buf, size_t buflen);
  173 static int      acpi_child_pnpinfo_str_method(device_t acdev, device_t child,
  174                                               char *buf, size_t buflen);
  175 #if defined(__i386__) || defined(__amd64__)
  176 static void     acpi_enable_pcie(void);
  177 #endif
  178 static void     acpi_hint_device_unit(device_t acdev, device_t child,
  179                     const char *name, int *unitp);
  180 static void     acpi_reset_interfaces(device_t dev);
  181 
  182 static device_method_t acpi_methods[] = {
  183     /* Device interface */
  184     DEVMETHOD(device_probe,             acpi_probe),
  185     DEVMETHOD(device_attach,            acpi_attach),
  186     DEVMETHOD(device_shutdown,          acpi_shutdown),
  187     DEVMETHOD(device_detach,            bus_generic_detach),
  188     DEVMETHOD(device_suspend,           acpi_suspend),
  189     DEVMETHOD(device_resume,            acpi_resume),
  190 
  191     /* Bus interface */
  192     DEVMETHOD(bus_add_child,            acpi_add_child),
  193     DEVMETHOD(bus_print_child,          acpi_print_child),
  194     DEVMETHOD(bus_probe_nomatch,        acpi_probe_nomatch),
  195     DEVMETHOD(bus_driver_added,         acpi_driver_added),
  196     DEVMETHOD(bus_read_ivar,            acpi_read_ivar),
  197     DEVMETHOD(bus_write_ivar,           acpi_write_ivar),
  198     DEVMETHOD(bus_get_resource_list,    acpi_get_rlist),
  199     DEVMETHOD(bus_set_resource,         bus_generic_rl_set_resource),
  200     DEVMETHOD(bus_get_resource,         bus_generic_rl_get_resource),
  201     DEVMETHOD(bus_alloc_resource,       acpi_alloc_resource),
  202     DEVMETHOD(bus_adjust_resource,      acpi_adjust_resource),
  203     DEVMETHOD(bus_release_resource,     acpi_release_resource),
  204     DEVMETHOD(bus_delete_resource,      acpi_delete_resource),
  205     DEVMETHOD(bus_child_pnpinfo_str,    acpi_child_pnpinfo_str_method),
  206     DEVMETHOD(bus_child_location_str,   acpi_child_location_str_method),
  207     DEVMETHOD(bus_activate_resource,    bus_generic_activate_resource),
  208     DEVMETHOD(bus_deactivate_resource,  bus_generic_deactivate_resource),
  209     DEVMETHOD(bus_setup_intr,           bus_generic_setup_intr),
  210     DEVMETHOD(bus_teardown_intr,        bus_generic_teardown_intr),
  211     DEVMETHOD(bus_hint_device_unit,     acpi_hint_device_unit),
  212 
  213     /* ACPI bus */
  214     DEVMETHOD(acpi_id_probe,            acpi_device_id_probe),
  215     DEVMETHOD(acpi_evaluate_object,     acpi_device_eval_obj),
  216     DEVMETHOD(acpi_pwr_for_sleep,       acpi_device_pwr_for_sleep),
  217     DEVMETHOD(acpi_scan_children,       acpi_device_scan_children),
  218 
  219     /* PCI emulation */
  220     DEVMETHOD(pci_set_powerstate,       acpi_set_powerstate_method),
  221 
  222     /* ISA emulation */
  223     DEVMETHOD(isa_pnp_probe,            acpi_isa_pnp_probe),
  224 
  225     {0, 0}
  226 };
  227 
  228 static driver_t acpi_driver = {
  229     "acpi",
  230     acpi_methods,
  231     sizeof(struct acpi_softc),
  232 };
  233 
  234 static devclass_t acpi_devclass;
  235 DRIVER_MODULE(acpi, nexus, acpi_driver, acpi_devclass, acpi_modevent, 0);
  236 MODULE_VERSION(acpi, 1);
  237 
  238 ACPI_SERIAL_DECL(acpi, "ACPI root bus");
  239 
  240 /* Local pools for managing system resources for ACPI child devices. */
  241 static struct rman acpi_rman_io, acpi_rman_mem;
  242 
  243 #define ACPI_MINIMUM_AWAKETIME  5
  244 
  245 /* Holds the description of the acpi0 device. */
  246 static char acpi_desc[ACPI_OEM_ID_SIZE + ACPI_OEM_TABLE_ID_SIZE + 2];
  247 
  248 SYSCTL_NODE(_debug, OID_AUTO, acpi, CTLFLAG_RD, NULL, "ACPI debugging");
  249 static char acpi_ca_version[12];
  250 SYSCTL_STRING(_debug_acpi, OID_AUTO, acpi_ca_version, CTLFLAG_RD,
  251               acpi_ca_version, 0, "Version of Intel ACPI-CA");
  252 
  253 /*
  254  * Allow overriding _OSI methods.
  255  */
  256 static char acpi_install_interface[256];
  257 TUNABLE_STR("hw.acpi.install_interface", acpi_install_interface,
  258     sizeof(acpi_install_interface));
  259 static char acpi_remove_interface[256];
  260 TUNABLE_STR("hw.acpi.remove_interface", acpi_remove_interface,
  261     sizeof(acpi_remove_interface));
  262 
  263 /*
  264  * Allow override of whether methods execute in parallel or not.
  265  * Enable this for serial behavior, which fixes "AE_ALREADY_EXISTS"
  266  * errors for AML that really can't handle parallel method execution.
  267  * It is off by default since this breaks recursive methods and
  268  * some IBMs use such code.
  269  */
  270 static int acpi_serialize_methods;
  271 TUNABLE_INT("hw.acpi.serialize_methods", &acpi_serialize_methods);
  272 
  273 /* Allow users to dump Debug objects without ACPI debugger. */
  274 static int acpi_debug_objects;
  275 TUNABLE_INT("debug.acpi.enable_debug_objects", &acpi_debug_objects);
  276 SYSCTL_PROC(_debug_acpi, OID_AUTO, enable_debug_objects,
  277     CTLFLAG_RW | CTLTYPE_INT, NULL, 0, acpi_debug_objects_sysctl, "I",
  278     "Enable Debug objects");
  279 
  280 /* Allow the interpreter to ignore common mistakes in BIOS. */
  281 static int acpi_interpreter_slack = 1;
  282 TUNABLE_INT("debug.acpi.interpreter_slack", &acpi_interpreter_slack);
  283 SYSCTL_INT(_debug_acpi, OID_AUTO, interpreter_slack, CTLFLAG_RDTUN,
  284     &acpi_interpreter_slack, 1, "Turn on interpreter slack mode.");
  285 
  286 /* Power devices off and on in suspend and resume.  XXX Remove once tested. */
  287 static int acpi_do_powerstate = 1;
  288 TUNABLE_INT("debug.acpi.do_powerstate", &acpi_do_powerstate);
  289 SYSCTL_INT(_debug_acpi, OID_AUTO, do_powerstate, CTLFLAG_RW,
  290     &acpi_do_powerstate, 1, "Turn off devices when suspending.");
  291 
  292 /* Reset system clock while resuming.  XXX Remove once tested. */
  293 static int acpi_reset_clock = 1;
  294 TUNABLE_INT("debug.acpi.reset_clock", &acpi_reset_clock);
  295 SYSCTL_INT(_debug_acpi, OID_AUTO, reset_clock, CTLFLAG_RW,
  296     &acpi_reset_clock, 1, "Reset system clock while resuming.");
  297 
  298 /* Allow users to override quirks. */
  299 TUNABLE_INT("debug.acpi.quirks", &acpi_quirks);
  300 
  301 static int acpi_susp_bounce;
  302 SYSCTL_INT(_debug_acpi, OID_AUTO, suspend_bounce, CTLFLAG_RW,
  303     &acpi_susp_bounce, 0, "Don't actually suspend, just test devices.");
  304 
  305 /*
  306  * ACPI can only be loaded as a module by the loader; activating it after
  307  * system bootstrap time is not useful, and can be fatal to the system.
  308  * It also cannot be unloaded, since the entire system bus hierarchy hangs
  309  * off it.
  310  */
  311 static int
  312 acpi_modevent(struct module *mod, int event, void *junk)
  313 {
  314     switch (event) {
  315     case MOD_LOAD:
  316         if (!cold) {
  317             printf("The ACPI driver cannot be loaded after boot.\n");
  318             return (EPERM);
  319         }
  320         break;
  321     case MOD_UNLOAD:
  322         if (!cold && power_pm_get_type() == POWER_PM_TYPE_ACPI)
  323             return (EBUSY);
  324         break;
  325     default:
  326         break;
  327     }
  328     return (0);
  329 }
  330 
  331 /*
  332  * Perform early initialization.
  333  */
  334 ACPI_STATUS
  335 acpi_Startup(void)
  336 {
  337     static int started = 0;
  338     ACPI_STATUS status;
  339     int val;
  340 
  341     ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
  342 
  343     /* Only run the startup code once.  The MADT driver also calls this. */
  344     if (started)
  345         return_VALUE (AE_OK);
  346     started = 1;
  347 
  348     /*
  349      * Pre-allocate space for RSDT/XSDT and DSDT tables and allow resizing
  350      * if more tables exist.
  351      */
  352     if (ACPI_FAILURE(status = AcpiInitializeTables(NULL, 2, TRUE))) {
  353         printf("ACPI: Table initialisation failed: %s\n",
  354             AcpiFormatException(status));
  355         return_VALUE (status);
  356     }
  357 
  358     /* Set up any quirks we have for this system. */
  359     if (acpi_quirks == ACPI_Q_OK)
  360         acpi_table_quirks(&acpi_quirks);
  361 
  362     /* If the user manually set the disabled hint to 0, force-enable ACPI. */
  363     if (resource_int_value("acpi", 0, "disabled", &val) == 0 && val == 0)
  364         acpi_quirks &= ~ACPI_Q_BROKEN;
  365     if (acpi_quirks & ACPI_Q_BROKEN) {
  366         printf("ACPI disabled by blacklist.  Contact your BIOS vendor.\n");
  367         status = AE_SUPPORT;
  368     }
  369 
  370     return_VALUE (status);
  371 }
  372 
  373 /*
  374  * Detect ACPI and perform early initialisation.
  375  */
  376 int
  377 acpi_identify(void)
  378 {
  379     ACPI_TABLE_RSDP     *rsdp;
  380     ACPI_TABLE_HEADER   *rsdt;
  381     ACPI_PHYSICAL_ADDRESS paddr;
  382     struct sbuf         sb;
  383 
  384     ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
  385 
  386     if (!cold)
  387         return (ENXIO);
  388 
  389     /* Check that we haven't been disabled with a hint. */
  390     if (resource_disabled("acpi", 0))
  391         return (ENXIO);
  392 
  393     /* Check for other PM systems. */
  394     if (power_pm_get_type() != POWER_PM_TYPE_NONE &&
  395         power_pm_get_type() != POWER_PM_TYPE_ACPI) {
  396         printf("ACPI identify failed, other PM system enabled.\n");
  397         return (ENXIO);
  398     }
  399 
  400     /* Initialize root tables. */
  401     if (ACPI_FAILURE(acpi_Startup())) {
  402         printf("ACPI: Try disabling either ACPI or apic support.\n");
  403         return (ENXIO);
  404     }
  405 
  406     if ((paddr = AcpiOsGetRootPointer()) == 0 ||
  407         (rsdp = AcpiOsMapMemory(paddr, sizeof(ACPI_TABLE_RSDP))) == NULL)
  408         return (ENXIO);
  409     if (rsdp->Revision > 1 && rsdp->XsdtPhysicalAddress != 0)
  410         paddr = (ACPI_PHYSICAL_ADDRESS)rsdp->XsdtPhysicalAddress;
  411     else
  412         paddr = (ACPI_PHYSICAL_ADDRESS)rsdp->RsdtPhysicalAddress;
  413     AcpiOsUnmapMemory(rsdp, sizeof(ACPI_TABLE_RSDP));
  414 
  415     if ((rsdt = AcpiOsMapMemory(paddr, sizeof(ACPI_TABLE_HEADER))) == NULL)
  416         return (ENXIO);
  417     sbuf_new(&sb, acpi_desc, sizeof(acpi_desc), SBUF_FIXEDLEN);
  418     sbuf_bcat(&sb, rsdt->OemId, ACPI_OEM_ID_SIZE);
  419     sbuf_trim(&sb);
  420     sbuf_putc(&sb, ' ');
  421     sbuf_bcat(&sb, rsdt->OemTableId, ACPI_OEM_TABLE_ID_SIZE);
  422     sbuf_trim(&sb);
  423     sbuf_finish(&sb);
  424     sbuf_delete(&sb);
  425     AcpiOsUnmapMemory(rsdt, sizeof(ACPI_TABLE_HEADER));
  426 
  427     snprintf(acpi_ca_version, sizeof(acpi_ca_version), "%x", ACPI_CA_VERSION);
  428 
  429     return (0);
  430 }
  431 
  432 /*
  433  * Fetch some descriptive data from ACPI to put in our attach message.
  434  */
  435 static int
  436 acpi_probe(device_t dev)
  437 {
  438 
  439     ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
  440 
  441     device_set_desc(dev, acpi_desc);
  442 
  443     return_VALUE (0);
  444 }
  445 
  446 static int
  447 acpi_attach(device_t dev)
  448 {
  449     struct acpi_softc   *sc;
  450     ACPI_STATUS         status;
  451     int                 error, state;
  452     UINT32              flags;
  453     UINT8               TypeA, TypeB;
  454     char                *env;
  455 
  456     ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
  457 
  458     sc = device_get_softc(dev);
  459     sc->acpi_dev = dev;
  460     callout_init(&sc->susp_force_to, TRUE);
  461 
  462     error = ENXIO;
  463 
  464     /* Initialize resource manager. */
  465     acpi_rman_io.rm_type = RMAN_ARRAY;
  466     acpi_rman_io.rm_start = 0;
  467     acpi_rman_io.rm_end = 0xffff;
  468     acpi_rman_io.rm_descr = "ACPI I/O ports";
  469     if (rman_init(&acpi_rman_io) != 0)
  470         panic("acpi rman_init IO ports failed");
  471     acpi_rman_mem.rm_type = RMAN_ARRAY;
  472     acpi_rman_mem.rm_start = 0;
  473     acpi_rman_mem.rm_end = ~0ul;
  474     acpi_rman_mem.rm_descr = "ACPI I/O memory addresses";
  475     if (rman_init(&acpi_rman_mem) != 0)
  476         panic("acpi rman_init memory failed");
  477 
  478     /* Initialise the ACPI mutex */
  479     mtx_init(&acpi_mutex, "ACPI global lock", NULL, MTX_DEF);
  480 
  481     /*
  482      * Set the globals from our tunables.  This is needed because ACPI-CA
  483      * uses UINT8 for some values and we have no tunable_byte.
  484      */
  485     AcpiGbl_AllMethodsSerialized = acpi_serialize_methods ? TRUE : FALSE;
  486     AcpiGbl_EnableInterpreterSlack = acpi_interpreter_slack ? TRUE : FALSE;
  487     AcpiGbl_EnableAmlDebugObject = acpi_debug_objects ? TRUE : FALSE;
  488 
  489 #ifndef ACPI_DEBUG
  490     /*
  491      * Disable all debugging layers and levels.
  492      */
  493     AcpiDbgLayer = 0;
  494     AcpiDbgLevel = 0;
  495 #endif
  496 
  497     /* Start up the ACPI CA subsystem. */
  498     status = AcpiInitializeSubsystem();
  499     if (ACPI_FAILURE(status)) {
  500         device_printf(dev, "Could not initialize Subsystem: %s\n",
  501                       AcpiFormatException(status));
  502         goto out;
  503     }
  504 
  505     /* Override OS interfaces if the user requested. */
  506     acpi_reset_interfaces(dev);
  507 
  508     /* Load ACPI name space. */
  509     status = AcpiLoadTables();
  510     if (ACPI_FAILURE(status)) {
  511         device_printf(dev, "Could not load Namespace: %s\n",
  512                       AcpiFormatException(status));
  513         goto out;
  514     }
  515 
  516 #if defined(__i386__) || defined(__amd64__)
  517     /* Handle MCFG table if present. */
  518     acpi_enable_pcie();
  519 #endif
  520 
  521     /*
  522      * Note that some systems (specifically, those with namespace evaluation
  523      * issues that require the avoidance of parts of the namespace) must
  524      * avoid running _INI and _STA on everything, as well as dodging the final
  525      * object init pass.
  526      *
  527      * For these devices, we set ACPI_NO_DEVICE_INIT and ACPI_NO_OBJECT_INIT).
  528      *
  529      * XXX We should arrange for the object init pass after we have attached
  530      *     all our child devices, but on many systems it works here.
  531      */
  532     flags = 0;
  533     if (testenv("debug.acpi.avoid"))
  534         flags = ACPI_NO_DEVICE_INIT | ACPI_NO_OBJECT_INIT;
  535 
  536     /* Bring the hardware and basic handlers online. */
  537     if (ACPI_FAILURE(status = AcpiEnableSubsystem(flags))) {
  538         device_printf(dev, "Could not enable ACPI: %s\n",
  539                       AcpiFormatException(status));
  540         goto out;
  541     }
  542 
  543     /*
  544      * Call the ECDT probe function to provide EC functionality before
  545      * the namespace has been evaluated.
  546      *
  547      * XXX This happens before the sysresource devices have been probed and
  548      * attached so its resources come from nexus0.  In practice, this isn't
  549      * a problem but should be addressed eventually.
  550      */
  551     acpi_ec_ecdt_probe(dev);
  552 
  553     /* Bring device objects and regions online. */
  554     if (ACPI_FAILURE(status = AcpiInitializeObjects(flags))) {
  555         device_printf(dev, "Could not initialize ACPI objects: %s\n",
  556                       AcpiFormatException(status));
  557         goto out;
  558     }
  559 
  560     /*
  561      * Setup our sysctl tree.
  562      *
  563      * XXX: This doesn't check to make sure that none of these fail.
  564      */
  565     sysctl_ctx_init(&sc->acpi_sysctl_ctx);
  566     sc->acpi_sysctl_tree = SYSCTL_ADD_NODE(&sc->acpi_sysctl_ctx,
  567                                SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO,
  568                                device_get_name(dev), CTLFLAG_RD, 0, "");
  569     SYSCTL_ADD_PROC(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree),
  570         OID_AUTO, "supported_sleep_state", CTLTYPE_STRING | CTLFLAG_RD,
  571         0, 0, acpi_supported_sleep_state_sysctl, "A", "");
  572     SYSCTL_ADD_PROC(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree),
  573         OID_AUTO, "power_button_state", CTLTYPE_STRING | CTLFLAG_RW,
  574         &sc->acpi_power_button_sx, 0, acpi_sleep_state_sysctl, "A", "");
  575     SYSCTL_ADD_PROC(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree),
  576         OID_AUTO, "sleep_button_state", CTLTYPE_STRING | CTLFLAG_RW,
  577         &sc->acpi_sleep_button_sx, 0, acpi_sleep_state_sysctl, "A", "");
  578     SYSCTL_ADD_PROC(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree),
  579         OID_AUTO, "lid_switch_state", CTLTYPE_STRING | CTLFLAG_RW,
  580         &sc->acpi_lid_switch_sx, 0, acpi_sleep_state_sysctl, "A", "");
  581     SYSCTL_ADD_PROC(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree),
  582         OID_AUTO, "standby_state", CTLTYPE_STRING | CTLFLAG_RW,
  583         &sc->acpi_standby_sx, 0, acpi_sleep_state_sysctl, "A", "");
  584     SYSCTL_ADD_PROC(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree),
  585         OID_AUTO, "suspend_state", CTLTYPE_STRING | CTLFLAG_RW,
  586         &sc->acpi_suspend_sx, 0, acpi_sleep_state_sysctl, "A", "");
  587     SYSCTL_ADD_INT(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree),
  588         OID_AUTO, "sleep_delay", CTLFLAG_RW, &sc->acpi_sleep_delay, 0,
  589         "sleep delay in seconds");
  590     SYSCTL_ADD_INT(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree),
  591         OID_AUTO, "s4bios", CTLFLAG_RW, &sc->acpi_s4bios, 0, "S4BIOS mode");
  592     SYSCTL_ADD_INT(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree),
  593         OID_AUTO, "verbose", CTLFLAG_RW, &sc->acpi_verbose, 0, "verbose mode");
  594     SYSCTL_ADD_INT(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree),
  595         OID_AUTO, "disable_on_reboot", CTLFLAG_RW,
  596         &sc->acpi_do_disable, 0, "Disable ACPI when rebooting/halting system");
  597     SYSCTL_ADD_INT(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree),
  598         OID_AUTO, "handle_reboot", CTLFLAG_RW,
  599         &sc->acpi_handle_reboot, 0, "Use ACPI Reset Register to reboot");
  600 
  601     /*
  602      * Default to 1 second before sleeping to give some machines time to
  603      * stabilize.
  604      */
  605     sc->acpi_sleep_delay = 1;
  606     if (bootverbose)
  607         sc->acpi_verbose = 1;
  608     if ((env = getenv("hw.acpi.verbose")) != NULL) {
  609         if (strcmp(env, "") != 0)
  610             sc->acpi_verbose = 1;
  611         freeenv(env);
  612     }
  613 
  614     /* Only enable reboot by default if the FADT says it is available. */
  615     if (AcpiGbl_FADT.Flags & ACPI_FADT_RESET_REGISTER)
  616         sc->acpi_handle_reboot = 1;
  617 
  618     /* Only enable S4BIOS by default if the FACS says it is available. */
  619     if (AcpiGbl_FACS->Flags & ACPI_FACS_S4_BIOS_PRESENT)
  620         sc->acpi_s4bios = 1;
  621 
  622     /* Probe all supported sleep states. */
  623     acpi_sleep_states[ACPI_STATE_S0] = TRUE;
  624     for (state = ACPI_STATE_S1; state < ACPI_S_STATE_COUNT; state++)
  625         if (ACPI_SUCCESS(AcpiGetSleepTypeData(state, &TypeA, &TypeB)))
  626             acpi_sleep_states[state] = TRUE;
  627 
  628     /*
  629      * Dispatch the default sleep state to devices.  The lid switch is set
  630      * to UNKNOWN by default to avoid surprising users.
  631      */
  632     sc->acpi_power_button_sx = acpi_sleep_states[ACPI_STATE_S5] ?
  633         ACPI_STATE_S5 : ACPI_STATE_UNKNOWN;
  634     sc->acpi_lid_switch_sx = ACPI_STATE_UNKNOWN;
  635     sc->acpi_standby_sx = acpi_sleep_states[ACPI_STATE_S1] ?
  636         ACPI_STATE_S1 : ACPI_STATE_UNKNOWN;
  637     sc->acpi_suspend_sx = acpi_sleep_states[ACPI_STATE_S3] ?
  638         ACPI_STATE_S3 : ACPI_STATE_UNKNOWN;
  639 
  640     /* Pick the first valid sleep state for the sleep button default. */
  641     sc->acpi_sleep_button_sx = ACPI_STATE_UNKNOWN;
  642     for (state = ACPI_STATE_S1; state <= ACPI_STATE_S4; state++)
  643         if (acpi_sleep_states[state]) {
  644             sc->acpi_sleep_button_sx = state;
  645             break;
  646         }
  647 
  648     acpi_enable_fixed_events(sc);
  649 
  650     /*
  651      * Scan the namespace and attach/initialise children.
  652      */
  653 
  654     /* Register our shutdown handler. */
  655     EVENTHANDLER_REGISTER(shutdown_final, acpi_shutdown_final, sc,
  656         SHUTDOWN_PRI_LAST);
  657 
  658     /*
  659      * Register our acpi event handlers.
  660      * XXX should be configurable eg. via userland policy manager.
  661      */
  662     EVENTHANDLER_REGISTER(acpi_sleep_event, acpi_system_eventhandler_sleep,
  663         sc, ACPI_EVENT_PRI_LAST);
  664     EVENTHANDLER_REGISTER(acpi_wakeup_event, acpi_system_eventhandler_wakeup,
  665         sc, ACPI_EVENT_PRI_LAST);
  666 
  667     /* Flag our initial states. */
  668     sc->acpi_enabled = TRUE;
  669     sc->acpi_sstate = ACPI_STATE_S0;
  670     sc->acpi_sleep_disabled = TRUE;
  671 
  672     /* Create the control device */
  673     sc->acpi_dev_t = make_dev(&acpi_cdevsw, 0, UID_ROOT, GID_WHEEL, 0644,
  674                               "acpi");
  675     sc->acpi_dev_t->si_drv1 = sc;
  676 
  677     if ((error = acpi_machdep_init(dev)))
  678         goto out;
  679 
  680     /* Register ACPI again to pass the correct argument of pm_func. */
  681     power_pm_register(POWER_PM_TYPE_ACPI, acpi_pm_func, sc);
  682 
  683     if (!acpi_disabled("bus"))
  684         acpi_probe_children(dev);
  685 
  686     /* Allow sleep request after a while. */
  687     timeout(acpi_sleep_enable, sc, hz * ACPI_MINIMUM_AWAKETIME);
  688 
  689     error = 0;
  690 
  691  out:
  692     return_VALUE (error);
  693 }
  694 
  695 static int
  696 acpi_suspend(device_t dev)
  697 {
  698     device_t child, *devlist;
  699     int error, i, numdevs, pstate;
  700 
  701     GIANT_REQUIRED;
  702 
  703     /* First give child devices a chance to suspend. */
  704     error = bus_generic_suspend(dev);
  705     if (error)
  706         return (error);
  707 
  708     /*
  709      * Now, set them into the appropriate power state, usually D3.  If the
  710      * device has an _SxD method for the next sleep state, use that power
  711      * state instead.
  712      */
  713     error = device_get_children(dev, &devlist, &numdevs);
  714     if (error)
  715         return (error);
  716     for (i = 0; i < numdevs; i++) {
  717         /* If the device is not attached, we've powered it down elsewhere. */
  718         child = devlist[i];
  719         if (!device_is_attached(child))
  720             continue;
  721 
  722         /*
  723          * Default to D3 for all sleep states.  The _SxD method is optional
  724          * so set the powerstate even if it's absent.
  725          */
  726         pstate = PCI_POWERSTATE_D3;
  727         error = acpi_device_pwr_for_sleep(device_get_parent(child),
  728             child, &pstate);
  729         if ((error == 0 || error == ESRCH) && acpi_do_powerstate)
  730             pci_set_powerstate(child, pstate);
  731     }
  732     free(devlist, M_TEMP);
  733     error = 0;
  734 
  735     return (error);
  736 }
  737 
  738 static int
  739 acpi_resume(device_t dev)
  740 {
  741     ACPI_HANDLE handle;
  742     int i, numdevs, error;
  743     device_t child, *devlist;
  744 
  745     GIANT_REQUIRED;
  746 
  747     /*
  748      * Put all devices in D0 before resuming them.  Call _S0D on each one
  749      * since some systems expect this.
  750      */
  751     error = device_get_children(dev, &devlist, &numdevs);
  752     if (error)
  753         return (error);
  754     for (i = 0; i < numdevs; i++) {
  755         child = devlist[i];
  756         handle = acpi_get_handle(child);
  757         if (handle)
  758             AcpiEvaluateObject(handle, "_S0D", NULL, NULL);
  759         if (device_is_attached(child) && acpi_do_powerstate)
  760             pci_set_powerstate(child, PCI_POWERSTATE_D0);
  761     }
  762     free(devlist, M_TEMP);
  763 
  764     return (bus_generic_resume(dev));
  765 }
  766 
  767 static int
  768 acpi_shutdown(device_t dev)
  769 {
  770 
  771     GIANT_REQUIRED;
  772 
  773     /* Allow children to shutdown first. */
  774     bus_generic_shutdown(dev);
  775 
  776     /*
  777      * Enable any GPEs that are able to power-on the system (i.e., RTC).
  778      * Also, disable any that are not valid for this state (most).
  779      */
  780     acpi_wake_prep_walk(ACPI_STATE_S5);
  781 
  782     return (0);
  783 }
  784 
  785 /*
  786  * Handle a new device being added
  787  */
  788 static device_t
  789 acpi_add_child(device_t bus, u_int order, const char *name, int unit)
  790 {
  791     struct acpi_device  *ad;
  792     device_t            child;
  793 
  794     if ((ad = malloc(sizeof(*ad), M_ACPIDEV, M_NOWAIT | M_ZERO)) == NULL)
  795         return (NULL);
  796 
  797     resource_list_init(&ad->ad_rl);
  798 
  799     child = device_add_child_ordered(bus, order, name, unit);
  800     if (child != NULL)
  801         device_set_ivars(child, ad);
  802     else
  803         free(ad, M_ACPIDEV);
  804     return (child);
  805 }
  806 
  807 static int
  808 acpi_print_child(device_t bus, device_t child)
  809 {
  810     struct acpi_device   *adev = device_get_ivars(child);
  811     struct resource_list *rl = &adev->ad_rl;
  812     int retval = 0;
  813 
  814     retval += bus_print_child_header(bus, child);
  815     retval += resource_list_print_type(rl, "port",  SYS_RES_IOPORT, "%#lx");
  816     retval += resource_list_print_type(rl, "iomem", SYS_RES_MEMORY, "%#lx");
  817     retval += resource_list_print_type(rl, "irq",   SYS_RES_IRQ,    "%ld");
  818     retval += resource_list_print_type(rl, "drq",   SYS_RES_DRQ,    "%ld");
  819     if (device_get_flags(child))
  820         retval += printf(" flags %#x", device_get_flags(child));
  821     retval += bus_print_child_footer(bus, child);
  822 
  823     return (retval);
  824 }
  825 
  826 /*
  827  * If this device is an ACPI child but no one claimed it, attempt
  828  * to power it off.  We'll power it back up when a driver is added.
  829  *
  830  * XXX Disabled for now since many necessary devices (like fdc and
  831  * ATA) don't claim the devices we created for them but still expect
  832  * them to be powered up.
  833  */
  834 static void
  835 acpi_probe_nomatch(device_t bus, device_t child)
  836 {
  837 #ifdef ACPI_ENABLE_POWERDOWN_NODRIVER
  838     pci_set_powerstate(child, PCI_POWERSTATE_D3);
  839 #endif
  840 }
  841 
  842 /*
  843  * If a new driver has a chance to probe a child, first power it up.
  844  *
  845  * XXX Disabled for now (see acpi_probe_nomatch for details).
  846  */
  847 static void
  848 acpi_driver_added(device_t dev, driver_t *driver)
  849 {
  850     device_t child, *devlist;
  851     int i, numdevs;
  852 
  853     DEVICE_IDENTIFY(driver, dev);
  854     if (device_get_children(dev, &devlist, &numdevs))
  855             return;
  856     for (i = 0; i < numdevs; i++) {
  857         child = devlist[i];
  858         if (device_get_state(child) == DS_NOTPRESENT) {
  859 #ifdef ACPI_ENABLE_POWERDOWN_NODRIVER
  860             pci_set_powerstate(child, PCI_POWERSTATE_D0);
  861             if (device_probe_and_attach(child) != 0)
  862                 pci_set_powerstate(child, PCI_POWERSTATE_D3);
  863 #else
  864             device_probe_and_attach(child);
  865 #endif
  866         }
  867     }
  868     free(devlist, M_TEMP);
  869 }
  870 
  871 /* Location hint for devctl(8) */
  872 static int
  873 acpi_child_location_str_method(device_t cbdev, device_t child, char *buf,
  874     size_t buflen)
  875 {
  876     struct acpi_device *dinfo = device_get_ivars(child);
  877 
  878     if (dinfo->ad_handle)
  879         snprintf(buf, buflen, "handle=%s", acpi_name(dinfo->ad_handle));
  880     else
  881         snprintf(buf, buflen, "unknown");
  882     return (0);
  883 }
  884 
  885 /* PnP information for devctl(8) */
  886 static int
  887 acpi_child_pnpinfo_str_method(device_t cbdev, device_t child, char *buf,
  888     size_t buflen)
  889 {
  890     struct acpi_device *dinfo = device_get_ivars(child);
  891     ACPI_DEVICE_INFO *adinfo;
  892 
  893     if (ACPI_FAILURE(AcpiGetObjectInfo(dinfo->ad_handle, &adinfo))) {
  894         snprintf(buf, buflen, "unknown");
  895         return (0);
  896     }
  897 
  898     snprintf(buf, buflen, "_HID=%s _UID=%lu",
  899         (adinfo->Valid & ACPI_VALID_HID) ?
  900         adinfo->HardwareId.String : "none",
  901         (adinfo->Valid & ACPI_VALID_UID) ?
  902         strtoul(adinfo->UniqueId.String, NULL, 10) : 0UL);
  903     AcpiOsFree(adinfo);
  904 
  905     return (0);
  906 }
  907 
  908 /*
  909  * Handle per-device ivars
  910  */
  911 static int
  912 acpi_read_ivar(device_t dev, device_t child, int index, uintptr_t *result)
  913 {
  914     struct acpi_device  *ad;
  915 
  916     if ((ad = device_get_ivars(child)) == NULL) {
  917         device_printf(child, "device has no ivars\n");
  918         return (ENOENT);
  919     }
  920 
  921     /* ACPI and ISA compatibility ivars */
  922     switch(index) {
  923     case ACPI_IVAR_HANDLE:
  924         *(ACPI_HANDLE *)result = ad->ad_handle;
  925         break;
  926     case ACPI_IVAR_MAGIC:
  927         *(uintptr_t *)result = ad->ad_magic;
  928         break;
  929     case ACPI_IVAR_PRIVATE:
  930         *(void **)result = ad->ad_private;
  931         break;
  932     case ACPI_IVAR_FLAGS:
  933         *(int *)result = ad->ad_flags;
  934         break;
  935     case ISA_IVAR_VENDORID:
  936     case ISA_IVAR_SERIAL:
  937     case ISA_IVAR_COMPATID:
  938         *(int *)result = -1;
  939         break;
  940     case ISA_IVAR_LOGICALID:
  941         *(int *)result = acpi_isa_get_logicalid(child);
  942         break;
  943     default:
  944         return (ENOENT);
  945     }
  946 
  947     return (0);
  948 }
  949 
  950 static int
  951 acpi_write_ivar(device_t dev, device_t child, int index, uintptr_t value)
  952 {
  953     struct acpi_device  *ad;
  954 
  955     if ((ad = device_get_ivars(child)) == NULL) {
  956         device_printf(child, "device has no ivars\n");
  957         return (ENOENT);
  958     }
  959 
  960     switch(index) {
  961     case ACPI_IVAR_HANDLE:
  962         ad->ad_handle = (ACPI_HANDLE)value;
  963         break;
  964     case ACPI_IVAR_MAGIC:
  965         ad->ad_magic = (uintptr_t)value;
  966         break;
  967     case ACPI_IVAR_PRIVATE:
  968         ad->ad_private = (void *)value;
  969         break;
  970     case ACPI_IVAR_FLAGS:
  971         ad->ad_flags = (int)value;
  972         break;
  973     default:
  974         panic("bad ivar write request (%d)", index);
  975         return (ENOENT);
  976     }
  977 
  978     return (0);
  979 }
  980 
  981 /*
  982  * Handle child resource allocation/removal
  983  */
  984 static struct resource_list *
  985 acpi_get_rlist(device_t dev, device_t child)
  986 {
  987     struct acpi_device          *ad;
  988 
  989     ad = device_get_ivars(child);
  990     return (&ad->ad_rl);
  991 }
  992 
  993 static int
  994 acpi_match_resource_hint(device_t dev, int type, long value)
  995 {
  996     struct acpi_device *ad = device_get_ivars(dev);
  997     struct resource_list *rl = &ad->ad_rl;
  998     struct resource_list_entry *rle;
  999 
 1000     STAILQ_FOREACH(rle, rl, link) {
 1001         if (rle->type != type)
 1002             continue;
 1003         if (rle->start <= value && rle->end >= value)
 1004             return (1);
 1005     }
 1006     return (0);
 1007 }
 1008 
 1009 /*
 1010  * Wire device unit numbers based on resource matches in hints.
 1011  */
 1012 static void
 1013 acpi_hint_device_unit(device_t acdev, device_t child, const char *name,
 1014     int *unitp)
 1015 {
 1016     const char *s;
 1017     long value;
 1018     int line, matches, unit;
 1019 
 1020     /*
 1021      * Iterate over all the hints for the devices with the specified
 1022      * name to see if one's resources are a subset of this device.
 1023      */
 1024     line = 0;
 1025     for (;;) {
 1026         if (resource_find_dev(&line, name, &unit, "at", NULL) != 0)
 1027             break;
 1028 
 1029         /* Must have an "at" for acpi or isa. */
 1030         resource_string_value(name, unit, "at", &s);
 1031         if (!(strcmp(s, "acpi0") == 0 || strcmp(s, "acpi") == 0 ||
 1032             strcmp(s, "isa0") == 0 || strcmp(s, "isa") == 0))
 1033             continue;
 1034 
 1035         /*
 1036          * Check for matching resources.  We must have at least one match.
 1037          * Since I/O and memory resources cannot be shared, if we get a
 1038          * match on either of those, ignore any mismatches in IRQs or DRQs.
 1039          *
 1040          * XXX: We may want to revisit this to be more lenient and wire
 1041          * as long as it gets one match.
 1042          */
 1043         matches = 0;
 1044         if (resource_long_value(name, unit, "port", &value) == 0) {
 1045             /*
 1046              * Floppy drive controllers are notorious for having a
 1047              * wide variety of resources not all of which include the
 1048              * first port that is specified by the hint (typically
 1049              * 0x3f0) (see the comment above fdc_isa_alloc_resources()
 1050              * in fdc_isa.c).  However, they do all seem to include
 1051              * port + 2 (e.g. 0x3f2) so for a floppy device, look for
 1052              * 'value + 2' in the port resources instead of the hint
 1053              * value.
 1054              */
 1055             if (strcmp(name, "fdc") == 0)
 1056                 value += 2;
 1057             if (acpi_match_resource_hint(child, SYS_RES_IOPORT, value))
 1058                 matches++;
 1059             else
 1060                 continue;
 1061         }
 1062         if (resource_long_value(name, unit, "maddr", &value) == 0) {
 1063             if (acpi_match_resource_hint(child, SYS_RES_MEMORY, value))
 1064                 matches++;
 1065             else
 1066                 continue;
 1067         }
 1068         if (matches > 0)
 1069             goto matched;
 1070         if (resource_long_value(name, unit, "irq", &value) == 0) {
 1071             if (acpi_match_resource_hint(child, SYS_RES_IRQ, value))
 1072                 matches++;
 1073             else
 1074                 continue;
 1075         }
 1076         if (resource_long_value(name, unit, "drq", &value) == 0) {
 1077             if (acpi_match_resource_hint(child, SYS_RES_DRQ, value))
 1078                 matches++;
 1079             else
 1080                 continue;
 1081         }
 1082 
 1083     matched:
 1084         if (matches > 0) {
 1085             /* We have a winner! */
 1086             *unitp = unit;
 1087             break;
 1088         }
 1089     }
 1090 }
 1091 
 1092 /*
 1093  * Pre-allocate/manage all memory and IO resources.  Since rman can't handle
 1094  * duplicates, we merge any in the sysresource attach routine.
 1095  */
 1096 static int
 1097 acpi_sysres_alloc(device_t dev)
 1098 {
 1099     struct resource *res;
 1100     struct resource_list *rl;
 1101     struct resource_list_entry *rle;
 1102     struct rman *rm;
 1103     char *sysres_ids[] = { "PNP0C01", "PNP0C02", NULL };
 1104     device_t *children;
 1105     int child_count, i;
 1106 
 1107     /*
 1108      * Probe/attach any sysresource devices.  This would be unnecessary if we
 1109      * had multi-pass probe/attach.
 1110      */
 1111     if (device_get_children(dev, &children, &child_count) != 0)
 1112         return (ENXIO);
 1113     for (i = 0; i < child_count; i++) {
 1114         if (ACPI_ID_PROBE(dev, children[i], sysres_ids) != NULL)
 1115             device_probe_and_attach(children[i]);
 1116     }
 1117     free(children, M_TEMP);
 1118 
 1119     rl = BUS_GET_RESOURCE_LIST(device_get_parent(dev), dev);
 1120     STAILQ_FOREACH(rle, rl, link) {
 1121         if (rle->res != NULL) {
 1122             device_printf(dev, "duplicate resource for %lx\n", rle->start);
 1123             continue;
 1124         }
 1125 
 1126         /* Only memory and IO resources are valid here. */
 1127         switch (rle->type) {
 1128         case SYS_RES_IOPORT:
 1129             rm = &acpi_rman_io;
 1130             break;
 1131         case SYS_RES_MEMORY:
 1132             rm = &acpi_rman_mem;
 1133             break;
 1134         default:
 1135             continue;
 1136         }
 1137 
 1138         /* Pre-allocate resource and add to our rman pool. */
 1139         res = BUS_ALLOC_RESOURCE(device_get_parent(dev), dev, rle->type,
 1140             &rle->rid, rle->start, rle->start + rle->count - 1, rle->count, 0);
 1141         if (res != NULL) {
 1142             rman_manage_region(rm, rman_get_start(res), rman_get_end(res));
 1143             rle->res = res;
 1144         } else
 1145             device_printf(dev, "reservation of %lx, %lx (%d) failed\n",
 1146                 rle->start, rle->count, rle->type);
 1147     }
 1148     return (0);
 1149 }
 1150 
 1151 static struct resource *
 1152 acpi_alloc_resource(device_t bus, device_t child, int type, int *rid,
 1153     u_long start, u_long end, u_long count, u_int flags)
 1154 {
 1155     ACPI_RESOURCE ares;
 1156     struct acpi_device *ad = device_get_ivars(child);
 1157     struct resource_list *rl = &ad->ad_rl;
 1158     struct resource_list_entry *rle;
 1159     struct resource *res;
 1160     struct rman *rm;
 1161 
 1162     res = NULL;
 1163 
 1164     /* We only handle memory and IO resources through rman. */
 1165     switch (type) {
 1166     case SYS_RES_IOPORT:
 1167         rm = &acpi_rman_io;
 1168         break;
 1169     case SYS_RES_MEMORY:
 1170         rm = &acpi_rman_mem;
 1171         break;
 1172     default:
 1173         rm = NULL;
 1174     }
 1175             
 1176     ACPI_SERIAL_BEGIN(acpi);
 1177 
 1178     /*
 1179      * If this is an allocation of the "default" range for a given RID, and
 1180      * we know what the resources for this device are (i.e., they're on the
 1181      * child's resource list), use those start/end values.
 1182      */
 1183     if (bus == device_get_parent(child) && start == 0UL && end == ~0UL) {
 1184         rle = resource_list_find(rl, type, *rid);
 1185         if (rle == NULL)
 1186             goto out;
 1187         start = rle->start;
 1188         end = rle->end;
 1189         count = rle->count;
 1190     }
 1191 
 1192     /*
 1193      * If this is an allocation of a specific range, see if we can satisfy
 1194      * the request from our system resource regions.  If we can't, pass the
 1195      * request up to the parent.
 1196      */
 1197     if (start + count - 1 == end && rm != NULL)
 1198         res = rman_reserve_resource(rm, start, end, count, flags & ~RF_ACTIVE,
 1199             child);
 1200     if (res == NULL) {
 1201         res = BUS_ALLOC_RESOURCE(device_get_parent(bus), child, type, rid,
 1202             start, end, count, flags);
 1203     } else {
 1204         rman_set_rid(res, *rid);
 1205 
 1206         /* If requested, activate the resource using the parent's method. */
 1207         if (flags & RF_ACTIVE)
 1208             if (bus_activate_resource(child, type, *rid, res) != 0) {
 1209                 rman_release_resource(res);
 1210                 res = NULL;
 1211                 goto out;
 1212             }
 1213     }
 1214 
 1215     if (res != NULL && device_get_parent(child) == bus)
 1216         switch (type) {
 1217         case SYS_RES_IRQ:
 1218             /*
 1219              * Since bus_config_intr() takes immediate effect, we cannot
 1220              * configure the interrupt associated with a device when we
 1221              * parse the resources but have to defer it until a driver
 1222              * actually allocates the interrupt via bus_alloc_resource().
 1223              *
 1224              * XXX: Should we handle the lookup failing?
 1225              */
 1226             if (ACPI_SUCCESS(acpi_lookup_irq_resource(child, *rid, res, &ares)))
 1227                 acpi_config_intr(child, &ares);
 1228             break;
 1229         }
 1230 
 1231 out:
 1232     ACPI_SERIAL_END(acpi);
 1233     return (res);
 1234 }
 1235 
 1236 static int
 1237 acpi_is_resource_managed(int type, struct resource *r)
 1238 {
 1239 
 1240     /* We only handle memory and IO resources through rman. */
 1241     switch (type) {
 1242     case SYS_RES_IOPORT:
 1243         return (rman_is_region_manager(r, &acpi_rman_io));
 1244     case SYS_RES_MEMORY:
 1245         return (rman_is_region_manager(r, &acpi_rman_mem));
 1246     }
 1247     return (0);
 1248 }
 1249 
 1250 static int
 1251 acpi_adjust_resource(device_t bus, device_t child, int type, struct resource *r,
 1252     u_long start, u_long end)
 1253 {
 1254 
 1255     if (acpi_is_resource_managed(type, r))
 1256         return (rman_adjust_resource(r, start, end));
 1257     return (bus_generic_adjust_resource(bus, child, type, r, start, end));
 1258 }
 1259 
 1260 static int
 1261 acpi_release_resource(device_t bus, device_t child, int type, int rid,
 1262     struct resource *r)
 1263 {
 1264     int ret;
 1265 
 1266     ACPI_SERIAL_BEGIN(acpi);
 1267 
 1268     /*
 1269      * If this resource belongs to one of our internal managers,
 1270      * deactivate it and release it to the local pool.  If it doesn't,
 1271      * pass this request up to the parent.
 1272      */
 1273     if (acpi_is_resource_managed(type, r)) {
 1274         if (rman_get_flags(r) & RF_ACTIVE) {
 1275             ret = bus_deactivate_resource(child, type, rid, r);
 1276             if (ret != 0)
 1277                 goto out;
 1278         }
 1279         ret = rman_release_resource(r);
 1280     } else
 1281         ret = BUS_RELEASE_RESOURCE(device_get_parent(bus), child, type, rid, r);
 1282 
 1283 out:
 1284     ACPI_SERIAL_END(acpi);
 1285     return (ret);
 1286 }
 1287 
 1288 static void
 1289 acpi_delete_resource(device_t bus, device_t child, int type, int rid)
 1290 {
 1291     struct resource_list *rl;
 1292 
 1293     rl = acpi_get_rlist(bus, child);
 1294     resource_list_delete(rl, type, rid);
 1295 }
 1296 
 1297 /* Allocate an IO port or memory resource, given its GAS. */
 1298 int
 1299 acpi_bus_alloc_gas(device_t dev, int *type, int *rid, ACPI_GENERIC_ADDRESS *gas,
 1300     struct resource **res, u_int flags)
 1301 {
 1302     int error, res_type;
 1303 
 1304     error = ENOMEM;
 1305     if (type == NULL || rid == NULL || gas == NULL || res == NULL)
 1306         return (EINVAL);
 1307 
 1308     /* We only support memory and IO spaces. */
 1309     switch (gas->SpaceId) {
 1310     case ACPI_ADR_SPACE_SYSTEM_MEMORY:
 1311         res_type = SYS_RES_MEMORY;
 1312         break;
 1313     case ACPI_ADR_SPACE_SYSTEM_IO:
 1314         res_type = SYS_RES_IOPORT;
 1315         break;
 1316     default:
 1317         return (EOPNOTSUPP);
 1318     }
 1319 
 1320     /*
 1321      * If the register width is less than 8, assume the BIOS author means
 1322      * it is a bit field and just allocate a byte.
 1323      */
 1324     if (gas->BitWidth && gas->BitWidth < 8)
 1325         gas->BitWidth = 8;
 1326 
 1327     /* Validate the address after we're sure we support the space. */
 1328     if (gas->Address == 0 || gas->BitWidth == 0)
 1329         return (EINVAL);
 1330 
 1331     bus_set_resource(dev, res_type, *rid, gas->Address,
 1332         gas->BitWidth / 8);
 1333     *res = bus_alloc_resource_any(dev, res_type, rid, RF_ACTIVE | flags);
 1334     if (*res != NULL) {
 1335         *type = res_type;
 1336         error = 0;
 1337     } else
 1338         bus_delete_resource(dev, res_type, *rid);
 1339 
 1340     return (error);
 1341 }
 1342 
 1343 /* Probe _HID and _CID for compatible ISA PNP ids. */
 1344 static uint32_t
 1345 acpi_isa_get_logicalid(device_t dev)
 1346 {
 1347     ACPI_DEVICE_INFO    *devinfo;
 1348     ACPI_HANDLE         h;
 1349     uint32_t            pnpid;
 1350 
 1351     ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
 1352 
 1353     /* Fetch and validate the HID. */
 1354     if ((h = acpi_get_handle(dev)) == NULL ||
 1355         ACPI_FAILURE(AcpiGetObjectInfo(h, &devinfo)))
 1356         return_VALUE (0);
 1357 
 1358     pnpid = (devinfo->Valid & ACPI_VALID_HID) != 0 &&
 1359         devinfo->HardwareId.Length >= ACPI_EISAID_STRING_SIZE ?
 1360         PNP_EISAID(devinfo->HardwareId.String) : 0;
 1361     AcpiOsFree(devinfo);
 1362 
 1363     return_VALUE (pnpid);
 1364 }
 1365 
 1366 static int
 1367 acpi_isa_get_compatid(device_t dev, uint32_t *cids, int count)
 1368 {
 1369     ACPI_DEVICE_INFO    *devinfo;
 1370     ACPI_DEVICE_ID      *ids;
 1371     ACPI_HANDLE         h;
 1372     uint32_t            *pnpid;
 1373     int                 i, valid;
 1374 
 1375     ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
 1376 
 1377     pnpid = cids;
 1378 
 1379     /* Fetch and validate the CID */
 1380     if ((h = acpi_get_handle(dev)) == NULL ||
 1381         ACPI_FAILURE(AcpiGetObjectInfo(h, &devinfo)))
 1382         return_VALUE (0);
 1383 
 1384     if ((devinfo->Valid & ACPI_VALID_CID) == 0) {
 1385         AcpiOsFree(devinfo);
 1386         return_VALUE (0);
 1387     }
 1388 
 1389     if (devinfo->CompatibleIdList.Count < count)
 1390         count = devinfo->CompatibleIdList.Count;
 1391     ids = devinfo->CompatibleIdList.Ids;
 1392     for (i = 0, valid = 0; i < count; i++)
 1393         if (ids[i].Length >= ACPI_EISAID_STRING_SIZE &&
 1394             strncmp(ids[i].String, "PNP", 3) == 0) {
 1395             *pnpid++ = PNP_EISAID(ids[i].String);
 1396             valid++;
 1397         }
 1398     AcpiOsFree(devinfo);
 1399 
 1400     return_VALUE (valid);
 1401 }
 1402 
 1403 static char *
 1404 acpi_device_id_probe(device_t bus, device_t dev, char **ids) 
 1405 {
 1406     ACPI_HANDLE h;
 1407     ACPI_OBJECT_TYPE t;
 1408     int i;
 1409 
 1410     h = acpi_get_handle(dev);
 1411     if (ids == NULL || h == NULL)
 1412         return (NULL);
 1413     t = acpi_get_type(dev);
 1414     if (t != ACPI_TYPE_DEVICE && t != ACPI_TYPE_PROCESSOR)
 1415         return (NULL);
 1416 
 1417     /* Try to match one of the array of IDs with a HID or CID. */
 1418     for (i = 0; ids[i] != NULL; i++) {
 1419         if (acpi_MatchHid(h, ids[i]))
 1420             return (ids[i]);
 1421     }
 1422     return (NULL);
 1423 }
 1424 
 1425 static ACPI_STATUS
 1426 acpi_device_eval_obj(device_t bus, device_t dev, ACPI_STRING pathname,
 1427     ACPI_OBJECT_LIST *parameters, ACPI_BUFFER *ret)
 1428 {
 1429     ACPI_HANDLE h;
 1430 
 1431     if (dev == NULL)
 1432         h = ACPI_ROOT_OBJECT;
 1433     else if ((h = acpi_get_handle(dev)) == NULL)
 1434         return (AE_BAD_PARAMETER);
 1435     return (AcpiEvaluateObject(h, pathname, parameters, ret));
 1436 }
 1437 
 1438 static int
 1439 acpi_device_pwr_for_sleep(device_t bus, device_t dev, int *dstate)
 1440 {
 1441     struct acpi_softc *sc;
 1442     ACPI_HANDLE handle;
 1443     ACPI_STATUS status;
 1444     char sxd[8];
 1445     int error;
 1446 
 1447     sc = device_get_softc(bus);
 1448     handle = acpi_get_handle(dev);
 1449 
 1450     /*
 1451      * XXX If we find these devices, don't try to power them down.
 1452      * The serial and IRDA ports on my T23 hang the system when
 1453      * set to D3 and it appears that such legacy devices may
 1454      * need special handling in their drivers.
 1455      */
 1456     if (handle == NULL ||
 1457         acpi_MatchHid(handle, "PNP0500") ||
 1458         acpi_MatchHid(handle, "PNP0501") ||
 1459         acpi_MatchHid(handle, "PNP0502") ||
 1460         acpi_MatchHid(handle, "PNP0510") ||
 1461         acpi_MatchHid(handle, "PNP0511"))
 1462         return (ENXIO);
 1463 
 1464     /*
 1465      * Override next state with the value from _SxD, if present.  If no
 1466      * dstate argument was provided, don't fetch the return value.
 1467      */
 1468     snprintf(sxd, sizeof(sxd), "_S%dD", sc->acpi_sstate);
 1469     if (dstate)
 1470         status = acpi_GetInteger(handle, sxd, dstate);
 1471     else
 1472         status = AcpiEvaluateObject(handle, sxd, NULL, NULL);
 1473 
 1474     switch (status) {
 1475     case AE_OK:
 1476         error = 0;
 1477         break;
 1478     case AE_NOT_FOUND:
 1479         error = ESRCH;
 1480         break;
 1481     default:
 1482         error = ENXIO;
 1483         break;
 1484     }
 1485 
 1486     return (error);
 1487 }
 1488 
 1489 /* Callback arg for our implementation of walking the namespace. */
 1490 struct acpi_device_scan_ctx {
 1491     acpi_scan_cb_t      user_fn;
 1492     void                *arg;
 1493     ACPI_HANDLE         parent;
 1494 };
 1495 
 1496 static ACPI_STATUS
 1497 acpi_device_scan_cb(ACPI_HANDLE h, UINT32 level, void *arg, void **retval)
 1498 {
 1499     struct acpi_device_scan_ctx *ctx;
 1500     device_t dev, old_dev;
 1501     ACPI_STATUS status;
 1502     ACPI_OBJECT_TYPE type;
 1503 
 1504     /*
 1505      * Skip this device if we think we'll have trouble with it or it is
 1506      * the parent where the scan began.
 1507      */
 1508     ctx = (struct acpi_device_scan_ctx *)arg;
 1509     if (acpi_avoid(h) || h == ctx->parent)
 1510         return (AE_OK);
 1511 
 1512     /* If this is not a valid device type (e.g., a method), skip it. */
 1513     if (ACPI_FAILURE(AcpiGetType(h, &type)))
 1514         return (AE_OK);
 1515     if (type != ACPI_TYPE_DEVICE && type != ACPI_TYPE_PROCESSOR &&
 1516         type != ACPI_TYPE_THERMAL && type != ACPI_TYPE_POWER)
 1517         return (AE_OK);
 1518 
 1519     /*
 1520      * Call the user function with the current device.  If it is unchanged
 1521      * afterwards, return.  Otherwise, we update the handle to the new dev.
 1522      */
 1523     old_dev = acpi_get_device(h);
 1524     dev = old_dev;
 1525     status = ctx->user_fn(h, &dev, level, ctx->arg);
 1526     if (ACPI_FAILURE(status) || old_dev == dev)
 1527         return (status);
 1528 
 1529     /* Remove the old child and its connection to the handle. */
 1530     if (old_dev != NULL) {
 1531         device_delete_child(device_get_parent(old_dev), old_dev);
 1532         AcpiDetachData(h, acpi_fake_objhandler);
 1533     }
 1534 
 1535     /* Recreate the handle association if the user created a device. */
 1536     if (dev != NULL)
 1537         AcpiAttachData(h, acpi_fake_objhandler, dev);
 1538 
 1539     return (AE_OK);
 1540 }
 1541 
 1542 static ACPI_STATUS
 1543 acpi_device_scan_children(device_t bus, device_t dev, int max_depth,
 1544     acpi_scan_cb_t user_fn, void *arg)
 1545 {
 1546     ACPI_HANDLE h;
 1547     struct acpi_device_scan_ctx ctx;
 1548 
 1549     if (acpi_disabled("children"))
 1550         return (AE_OK);
 1551 
 1552     if (dev == NULL)
 1553         h = ACPI_ROOT_OBJECT;
 1554     else if ((h = acpi_get_handle(dev)) == NULL)
 1555         return (AE_BAD_PARAMETER);
 1556     ctx.user_fn = user_fn;
 1557     ctx.arg = arg;
 1558     ctx.parent = h;
 1559     return (AcpiWalkNamespace(ACPI_TYPE_ANY, h, max_depth,
 1560         acpi_device_scan_cb, NULL, &ctx, NULL));
 1561 }
 1562 
 1563 /*
 1564  * Even though ACPI devices are not PCI, we use the PCI approach for setting
 1565  * device power states since it's close enough to ACPI.
 1566  */
 1567 static int
 1568 acpi_set_powerstate_method(device_t bus, device_t child, int state)
 1569 {
 1570     ACPI_HANDLE h;
 1571     ACPI_STATUS status;
 1572     int error;
 1573 
 1574     error = 0;
 1575     h = acpi_get_handle(child);
 1576     if (state < ACPI_STATE_D0 || state > ACPI_D_STATES_MAX)
 1577         return (EINVAL);
 1578     if (h == NULL)
 1579         return (0);
 1580 
 1581     /* Ignore errors if the power methods aren't present. */
 1582     status = acpi_pwr_switch_consumer(h, state);
 1583     if (ACPI_FAILURE(status) && status != AE_NOT_FOUND
 1584         && status != AE_BAD_PARAMETER)
 1585         device_printf(bus, "failed to set ACPI power state D%d on %s: %s\n",
 1586             state, acpi_name(h), AcpiFormatException(status));
 1587 
 1588     return (error);
 1589 }
 1590 
 1591 static int
 1592 acpi_isa_pnp_probe(device_t bus, device_t child, struct isa_pnp_id *ids)
 1593 {
 1594     int                 result, cid_count, i;
 1595     uint32_t            lid, cids[8];
 1596 
 1597     ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
 1598 
 1599     /*
 1600      * ISA-style drivers attached to ACPI may persist and
 1601      * probe manually if we return ENOENT.  We never want
 1602      * that to happen, so don't ever return it.
 1603      */
 1604     result = ENXIO;
 1605 
 1606     /* Scan the supplied IDs for a match */
 1607     lid = acpi_isa_get_logicalid(child);
 1608     cid_count = acpi_isa_get_compatid(child, cids, 8);
 1609     while (ids && ids->ip_id) {
 1610         if (lid == ids->ip_id) {
 1611             result = 0;
 1612             goto out;
 1613         }
 1614         for (i = 0; i < cid_count; i++) {
 1615             if (cids[i] == ids->ip_id) {
 1616                 result = 0;
 1617                 goto out;
 1618             }
 1619         }
 1620         ids++;
 1621     }
 1622 
 1623  out:
 1624     if (result == 0 && ids->ip_desc)
 1625         device_set_desc(child, ids->ip_desc);
 1626 
 1627     return_VALUE (result);
 1628 }
 1629 
 1630 #if defined(__i386__) || defined(__amd64__)
 1631 /*
 1632  * Look for a MCFG table.  If it is present, use the settings for
 1633  * domain (segment) 0 to setup PCI config space access via the memory
 1634  * map.
 1635  */
 1636 static void
 1637 acpi_enable_pcie(void)
 1638 {
 1639         ACPI_TABLE_HEADER *hdr;
 1640         ACPI_MCFG_ALLOCATION *alloc, *end;
 1641         ACPI_STATUS status;
 1642 
 1643         status = AcpiGetTable(ACPI_SIG_MCFG, 1, &hdr);
 1644         if (ACPI_FAILURE(status))
 1645                 return;
 1646 
 1647         end = (ACPI_MCFG_ALLOCATION *)((char *)hdr + hdr->Length);
 1648         alloc = (ACPI_MCFG_ALLOCATION *)((ACPI_TABLE_MCFG *)hdr + 1);
 1649         while (alloc < end) {
 1650                 if (alloc->PciSegment == 0) {
 1651                         pcie_cfgregopen(alloc->Address, alloc->StartBusNumber,
 1652                             alloc->EndBusNumber);
 1653                         return;
 1654                 }
 1655                 alloc++;
 1656         }
 1657 }
 1658 #endif
 1659 
 1660 /*
 1661  * Scan all of the ACPI namespace and attach child devices.
 1662  *
 1663  * We should only expect to find devices in the \_PR, \_TZ, \_SI, and
 1664  * \_SB scopes, and \_PR and \_TZ became obsolete in the ACPI 2.0 spec.
 1665  * However, in violation of the spec, some systems place their PCI link
 1666  * devices in \, so we have to walk the whole namespace.  We check the
 1667  * type of namespace nodes, so this should be ok.
 1668  */
 1669 static void
 1670 acpi_probe_children(device_t bus)
 1671 {
 1672 
 1673     ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
 1674 
 1675     /*
 1676      * Scan the namespace and insert placeholders for all the devices that
 1677      * we find.  We also probe/attach any early devices.
 1678      *
 1679      * Note that we use AcpiWalkNamespace rather than AcpiGetDevices because
 1680      * we want to create nodes for all devices, not just those that are
 1681      * currently present. (This assumes that we don't want to create/remove
 1682      * devices as they appear, which might be smarter.)
 1683      */
 1684     ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "namespace scan\n"));
 1685     AcpiWalkNamespace(ACPI_TYPE_ANY, ACPI_ROOT_OBJECT, 100, acpi_probe_child,
 1686         NULL, bus, NULL);
 1687 
 1688     /* Pre-allocate resources for our rman from any sysresource devices. */
 1689     acpi_sysres_alloc(bus);
 1690 
 1691     /* Create any static children by calling device identify methods. */
 1692     ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "device identify routines\n"));
 1693     bus_generic_probe(bus);
 1694 
 1695     /* Probe/attach all children, created staticly and from the namespace. */
 1696     ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "acpi bus_generic_attach\n"));
 1697     bus_generic_attach(bus);
 1698 
 1699     /* Attach wake sysctls. */
 1700     acpi_wake_sysctl_walk(bus);
 1701 
 1702     ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "done attaching children\n"));
 1703     return_VOID;
 1704 }
 1705 
 1706 /*
 1707  * Determine the probe order for a given device.
 1708  */
 1709 static void
 1710 acpi_probe_order(ACPI_HANDLE handle, int *order)
 1711 {
 1712     ACPI_OBJECT_TYPE type;
 1713 
 1714     /*
 1715      * 1. CPUs
 1716      * 2. I/O port and memory system resource holders
 1717      * 3. Embedded controllers (to handle early accesses)
 1718      * 4. PCI Link Devices
 1719      */
 1720     AcpiGetType(handle, &type);
 1721     if (type == ACPI_TYPE_PROCESSOR)
 1722         *order = 1;
 1723     else if (acpi_MatchHid(handle, "PNP0C01") || acpi_MatchHid(handle, "PNP0C02"))
 1724         *order = 2;
 1725     else if (acpi_MatchHid(handle, "PNP0C09"))
 1726         *order = 3;
 1727     else if (acpi_MatchHid(handle, "PNP0C0F"))
 1728         *order = 4;
 1729 }
 1730 
 1731 /*
 1732  * Evaluate a child device and determine whether we might attach a device to
 1733  * it.
 1734  */
 1735 static ACPI_STATUS
 1736 acpi_probe_child(ACPI_HANDLE handle, UINT32 level, void *context, void **status)
 1737 {
 1738     ACPI_OBJECT_TYPE type;
 1739     ACPI_HANDLE h;
 1740     device_t bus, child;
 1741     char *handle_str;
 1742     int order;
 1743 
 1744     ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
 1745 
 1746     if (acpi_disabled("children"))
 1747         return_ACPI_STATUS (AE_OK);
 1748 
 1749     /* Skip this device if we think we'll have trouble with it. */
 1750     if (acpi_avoid(handle))
 1751         return_ACPI_STATUS (AE_OK);
 1752 
 1753     bus = (device_t)context;
 1754     if (ACPI_SUCCESS(AcpiGetType(handle, &type))) {
 1755         handle_str = acpi_name(handle);
 1756         switch (type) {
 1757         case ACPI_TYPE_DEVICE:
 1758             /*
 1759              * Since we scan from \, be sure to skip system scope objects.
 1760              * \_SB_ and \_TZ_ are defined in ACPICA as devices to work around
 1761              * BIOS bugs.  For example, \_SB_ is to allow \_SB_._INI to be run
 1762              * during the intialization and \_TZ_ is to support Notify() on it.
 1763              */
 1764             if (strcmp(handle_str, "\\_SB_") == 0 ||
 1765                 strcmp(handle_str, "\\_TZ_") == 0)
 1766                 break;
 1767             /* FALLTHROUGH */
 1768         case ACPI_TYPE_PROCESSOR:
 1769         case ACPI_TYPE_THERMAL:
 1770         case ACPI_TYPE_POWER:
 1771             /* 
 1772              * Create a placeholder device for this node.  Sort the
 1773              * placeholder so that the probe/attach passes will run
 1774              * breadth-first.  Orders less than ACPI_DEV_BASE_ORDER
 1775              * are reserved for special objects (i.e., system
 1776              * resources).
 1777              */
 1778             ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "scanning '%s'\n", handle_str));
 1779             order = level * 10 + 100;
 1780             acpi_probe_order(handle, &order);
 1781             child = BUS_ADD_CHILD(bus, order, NULL, -1);
 1782             if (child == NULL)
 1783                 break;
 1784 
 1785             /* Associate the handle with the device_t and vice versa. */
 1786             acpi_set_handle(child, handle);
 1787             AcpiAttachData(handle, acpi_fake_objhandler, child);
 1788 
 1789             /*
 1790              * Check that the device is present.  If it's not present,
 1791              * leave it disabled (so that we have a device_t attached to
 1792              * the handle, but we don't probe it).
 1793              *
 1794              * XXX PCI link devices sometimes report "present" but not
 1795              * "functional" (i.e. if disabled).  Go ahead and probe them
 1796              * anyway since we may enable them later.
 1797              */
 1798             if (type == ACPI_TYPE_DEVICE && !acpi_DeviceIsPresent(child)) {
 1799                 /* Never disable PCI link devices. */
 1800                 if (acpi_MatchHid(handle, "PNP0C0F"))
 1801                     break;
 1802                 /*
 1803                  * Docking stations should remain enabled since the system
 1804                  * may be undocked at boot.
 1805                  */
 1806                 if (ACPI_SUCCESS(AcpiGetHandle(handle, "_DCK", &h)))
 1807                     break;
 1808 
 1809                 device_disable(child);
 1810                 break;
 1811             }
 1812 
 1813             /*
 1814              * Get the device's resource settings and attach them.
 1815              * Note that if the device has _PRS but no _CRS, we need
 1816              * to decide when it's appropriate to try to configure the
 1817              * device.  Ignore the return value here; it's OK for the
 1818              * device not to have any resources.
 1819              */
 1820             acpi_parse_resources(child, handle, &acpi_res_parse_set, NULL);
 1821             break;
 1822         }
 1823     }
 1824 
 1825     return_ACPI_STATUS (AE_OK);
 1826 }
 1827 
 1828 /*
 1829  * AcpiAttachData() requires an object handler but never uses it.  This is a
 1830  * placeholder object handler so we can store a device_t in an ACPI_HANDLE.
 1831  */
 1832 void
 1833 acpi_fake_objhandler(ACPI_HANDLE h, void *data)
 1834 {
 1835 }
 1836 
 1837 static void
 1838 acpi_shutdown_final(void *arg, int howto)
 1839 {
 1840     struct acpi_softc *sc = (struct acpi_softc *)arg;
 1841     ACPI_STATUS status;
 1842 
 1843     /*
 1844      * XXX Shutdown code should only run on the BSP (cpuid 0).
 1845      * Some chipsets do not power off the system correctly if called from
 1846      * an AP.
 1847      */
 1848     if ((howto & RB_POWEROFF) != 0) {
 1849         status = AcpiEnterSleepStatePrep(ACPI_STATE_S5);
 1850         if (ACPI_FAILURE(status)) {
 1851             device_printf(sc->acpi_dev, "AcpiEnterSleepStatePrep failed - %s\n",
 1852                 AcpiFormatException(status));
 1853             return;
 1854         }
 1855         device_printf(sc->acpi_dev, "Powering system off\n");
 1856         ACPI_DISABLE_IRQS();
 1857         status = AcpiEnterSleepState(ACPI_STATE_S5);
 1858         if (ACPI_FAILURE(status))
 1859             device_printf(sc->acpi_dev, "power-off failed - %s\n",
 1860                 AcpiFormatException(status));
 1861         else {
 1862             DELAY(1000000);
 1863             device_printf(sc->acpi_dev, "power-off failed - timeout\n");
 1864         }
 1865     } else if ((howto & RB_HALT) == 0 && sc->acpi_handle_reboot) {
 1866         /* Reboot using the reset register. */
 1867         status = AcpiReset();
 1868         if (ACPI_SUCCESS(status)) {
 1869             DELAY(1000000);
 1870             device_printf(sc->acpi_dev, "reset failed - timeout\n");
 1871         } else if (status != AE_NOT_EXIST)
 1872             device_printf(sc->acpi_dev, "reset failed - %s\n",
 1873                 AcpiFormatException(status));
 1874     } else if (sc->acpi_do_disable && panicstr == NULL) {
 1875         /*
 1876          * Only disable ACPI if the user requested.  On some systems, writing
 1877          * the disable value to SMI_CMD hangs the system.
 1878          */
 1879         device_printf(sc->acpi_dev, "Shutting down\n");
 1880         AcpiTerminate();
 1881     }
 1882 }
 1883 
 1884 static void
 1885 acpi_enable_fixed_events(struct acpi_softc *sc)
 1886 {
 1887     static int  first_time = 1;
 1888 
 1889     /* Enable and clear fixed events and install handlers. */
 1890     if ((AcpiGbl_FADT.Flags & ACPI_FADT_POWER_BUTTON) == 0) {
 1891         AcpiClearEvent(ACPI_EVENT_POWER_BUTTON);
 1892         AcpiInstallFixedEventHandler(ACPI_EVENT_POWER_BUTTON,
 1893                                      acpi_event_power_button_sleep, sc);
 1894         if (first_time)
 1895             device_printf(sc->acpi_dev, "Power Button (fixed)\n");
 1896     }
 1897     if ((AcpiGbl_FADT.Flags & ACPI_FADT_SLEEP_BUTTON) == 0) {
 1898         AcpiClearEvent(ACPI_EVENT_SLEEP_BUTTON);
 1899         AcpiInstallFixedEventHandler(ACPI_EVENT_SLEEP_BUTTON,
 1900                                      acpi_event_sleep_button_sleep, sc);
 1901         if (first_time)
 1902             device_printf(sc->acpi_dev, "Sleep Button (fixed)\n");
 1903     }
 1904 
 1905     first_time = 0;
 1906 }
 1907 
 1908 /*
 1909  * Returns true if the device is actually present and should
 1910  * be attached to.  This requires the present, enabled, UI-visible 
 1911  * and diagnostics-passed bits to be set.
 1912  */
 1913 BOOLEAN
 1914 acpi_DeviceIsPresent(device_t dev)
 1915 {
 1916     ACPI_DEVICE_INFO    *devinfo;
 1917     ACPI_HANDLE         h;
 1918     BOOLEAN             present;
 1919 
 1920     if ((h = acpi_get_handle(dev)) == NULL ||
 1921         ACPI_FAILURE(AcpiGetObjectInfo(h, &devinfo)))
 1922         return (FALSE);
 1923 
 1924     /* If no _STA method, must be present */
 1925     present = (devinfo->Valid & ACPI_VALID_STA) == 0 ||
 1926         ACPI_DEVICE_PRESENT(devinfo->CurrentStatus) ? TRUE : FALSE;
 1927 
 1928     AcpiOsFree(devinfo);
 1929     return (present);
 1930 }
 1931 
 1932 /*
 1933  * Returns true if the battery is actually present and inserted.
 1934  */
 1935 BOOLEAN
 1936 acpi_BatteryIsPresent(device_t dev)
 1937 {
 1938     ACPI_DEVICE_INFO    *devinfo;
 1939     ACPI_HANDLE         h;
 1940     BOOLEAN             present;
 1941 
 1942     if ((h = acpi_get_handle(dev)) == NULL ||
 1943         ACPI_FAILURE(AcpiGetObjectInfo(h, &devinfo)))
 1944         return (FALSE);
 1945 
 1946     /* If no _STA method, must be present */
 1947     present = (devinfo->Valid & ACPI_VALID_STA) == 0 ||
 1948         ACPI_BATTERY_PRESENT(devinfo->CurrentStatus) ? TRUE : FALSE;
 1949 
 1950     AcpiOsFree(devinfo);
 1951     return (present);
 1952 }
 1953 
 1954 /*
 1955  * Match a HID string against a handle
 1956  */
 1957 BOOLEAN
 1958 acpi_MatchHid(ACPI_HANDLE h, const char *hid) 
 1959 {
 1960     ACPI_DEVICE_INFO    *devinfo;
 1961     BOOLEAN             ret;
 1962     int                 i;
 1963 
 1964     if (hid == NULL || h == NULL ||
 1965         ACPI_FAILURE(AcpiGetObjectInfo(h, &devinfo)))
 1966         return (FALSE);
 1967 
 1968     ret = FALSE;
 1969     if ((devinfo->Valid & ACPI_VALID_HID) != 0 &&
 1970         strcmp(hid, devinfo->HardwareId.String) == 0)
 1971             ret = TRUE;
 1972     else if ((devinfo->Valid & ACPI_VALID_CID) != 0)
 1973         for (i = 0; i < devinfo->CompatibleIdList.Count; i++) {
 1974             if (strcmp(hid, devinfo->CompatibleIdList.Ids[i].String) == 0) {
 1975                 ret = TRUE;
 1976                 break;
 1977             }
 1978         }
 1979 
 1980     AcpiOsFree(devinfo);
 1981     return (ret);
 1982 }
 1983 
 1984 /*
 1985  * Return the handle of a named object within our scope, ie. that of (parent)
 1986  * or one if its parents.
 1987  */
 1988 ACPI_STATUS
 1989 acpi_GetHandleInScope(ACPI_HANDLE parent, char *path, ACPI_HANDLE *result)
 1990 {
 1991     ACPI_HANDLE         r;
 1992     ACPI_STATUS         status;
 1993 
 1994     /* Walk back up the tree to the root */
 1995     for (;;) {
 1996         status = AcpiGetHandle(parent, path, &r);
 1997         if (ACPI_SUCCESS(status)) {
 1998             *result = r;
 1999             return (AE_OK);
 2000         }
 2001         /* XXX Return error here? */
 2002         if (status != AE_NOT_FOUND)
 2003             return (AE_OK);
 2004         if (ACPI_FAILURE(AcpiGetParent(parent, &r)))
 2005             return (AE_NOT_FOUND);
 2006         parent = r;
 2007     }
 2008 }
 2009 
 2010 /* Find the difference between two PM tick counts. */
 2011 uint32_t
 2012 acpi_TimerDelta(uint32_t end, uint32_t start)
 2013 {
 2014     uint32_t delta;
 2015 
 2016     if (end >= start)
 2017         delta = end - start;
 2018     else if (AcpiGbl_FADT.Flags & ACPI_FADT_32BIT_TIMER)
 2019         delta = ((0xFFFFFFFF - start) + end + 1);
 2020     else
 2021         delta = ((0x00FFFFFF - start) + end + 1) & 0x00FFFFFF;
 2022     return (delta);
 2023 }
 2024 
 2025 /*
 2026  * Allocate a buffer with a preset data size.
 2027  */
 2028 ACPI_BUFFER *
 2029 acpi_AllocBuffer(int size)
 2030 {
 2031     ACPI_BUFFER *buf;
 2032 
 2033     if ((buf = malloc(size + sizeof(*buf), M_ACPIDEV, M_NOWAIT)) == NULL)
 2034         return (NULL);
 2035     buf->Length = size;
 2036     buf->Pointer = (void *)(buf + 1);
 2037     return (buf);
 2038 }
 2039 
 2040 ACPI_STATUS
 2041 acpi_SetInteger(ACPI_HANDLE handle, char *path, UINT32 number)
 2042 {
 2043     ACPI_OBJECT arg1;
 2044     ACPI_OBJECT_LIST args;
 2045 
 2046     arg1.Type = ACPI_TYPE_INTEGER;
 2047     arg1.Integer.Value = number;
 2048     args.Count = 1;
 2049     args.Pointer = &arg1;
 2050 
 2051     return (AcpiEvaluateObject(handle, path, &args, NULL));
 2052 }
 2053 
 2054 /*
 2055  * Evaluate a path that should return an integer.
 2056  */
 2057 ACPI_STATUS
 2058 acpi_GetInteger(ACPI_HANDLE handle, char *path, UINT32 *number)
 2059 {
 2060     ACPI_STATUS status;
 2061     ACPI_BUFFER buf;
 2062     ACPI_OBJECT param;
 2063 
 2064     if (handle == NULL)
 2065         handle = ACPI_ROOT_OBJECT;
 2066 
 2067     /*
 2068      * Assume that what we've been pointed at is an Integer object, or
 2069      * a method that will return an Integer.
 2070      */
 2071     buf.Pointer = &param;
 2072     buf.Length = sizeof(param);
 2073     status = AcpiEvaluateObject(handle, path, NULL, &buf);
 2074     if (ACPI_SUCCESS(status)) {
 2075         if (param.Type == ACPI_TYPE_INTEGER)
 2076             *number = param.Integer.Value;
 2077         else
 2078             status = AE_TYPE;
 2079     }
 2080 
 2081     /* 
 2082      * In some applications, a method that's expected to return an Integer
 2083      * may instead return a Buffer (probably to simplify some internal
 2084      * arithmetic).  We'll try to fetch whatever it is, and if it's a Buffer,
 2085      * convert it into an Integer as best we can.
 2086      *
 2087      * This is a hack.
 2088      */
 2089     if (status == AE_BUFFER_OVERFLOW) {
 2090         if ((buf.Pointer = AcpiOsAllocate(buf.Length)) == NULL) {
 2091             status = AE_NO_MEMORY;
 2092         } else {
 2093             status = AcpiEvaluateObject(handle, path, NULL, &buf);
 2094             if (ACPI_SUCCESS(status))
 2095                 status = acpi_ConvertBufferToInteger(&buf, number);
 2096             AcpiOsFree(buf.Pointer);
 2097         }
 2098     }
 2099     return (status);
 2100 }
 2101 
 2102 ACPI_STATUS
 2103 acpi_ConvertBufferToInteger(ACPI_BUFFER *bufp, UINT32 *number)
 2104 {
 2105     ACPI_OBJECT *p;
 2106     UINT8       *val;
 2107     int         i;
 2108 
 2109     p = (ACPI_OBJECT *)bufp->Pointer;
 2110     if (p->Type == ACPI_TYPE_INTEGER) {
 2111         *number = p->Integer.Value;
 2112         return (AE_OK);
 2113     }
 2114     if (p->Type != ACPI_TYPE_BUFFER)
 2115         return (AE_TYPE);
 2116     if (p->Buffer.Length > sizeof(int))
 2117         return (AE_BAD_DATA);
 2118 
 2119     *number = 0;
 2120     val = p->Buffer.Pointer;
 2121     for (i = 0; i < p->Buffer.Length; i++)
 2122         *number += val[i] << (i * 8);
 2123     return (AE_OK);
 2124 }
 2125 
 2126 /*
 2127  * Iterate over the elements of an a package object, calling the supplied
 2128  * function for each element.
 2129  *
 2130  * XXX possible enhancement might be to abort traversal on error.
 2131  */
 2132 ACPI_STATUS
 2133 acpi_ForeachPackageObject(ACPI_OBJECT *pkg,
 2134         void (*func)(ACPI_OBJECT *comp, void *arg), void *arg)
 2135 {
 2136     ACPI_OBJECT *comp;
 2137     int         i;
 2138 
 2139     if (pkg == NULL || pkg->Type != ACPI_TYPE_PACKAGE)
 2140         return (AE_BAD_PARAMETER);
 2141 
 2142     /* Iterate over components */
 2143     i = 0;
 2144     comp = pkg->Package.Elements;
 2145     for (; i < pkg->Package.Count; i++, comp++)
 2146         func(comp, arg);
 2147 
 2148     return (AE_OK);
 2149 }
 2150 
 2151 /*
 2152  * Find the (index)th resource object in a set.
 2153  */
 2154 ACPI_STATUS
 2155 acpi_FindIndexedResource(ACPI_BUFFER *buf, int index, ACPI_RESOURCE **resp)
 2156 {
 2157     ACPI_RESOURCE       *rp;
 2158     int                 i;
 2159 
 2160     rp = (ACPI_RESOURCE *)buf->Pointer;
 2161     i = index;
 2162     while (i-- > 0) {
 2163         /* Range check */
 2164         if (rp > (ACPI_RESOURCE *)((u_int8_t *)buf->Pointer + buf->Length))
 2165             return (AE_BAD_PARAMETER);
 2166 
 2167         /* Check for terminator */
 2168         if (rp->Type == ACPI_RESOURCE_TYPE_END_TAG || rp->Length == 0)
 2169             return (AE_NOT_FOUND);
 2170         rp = ACPI_NEXT_RESOURCE(rp);
 2171     }
 2172     if (resp != NULL)
 2173         *resp = rp;
 2174 
 2175     return (AE_OK);
 2176 }
 2177 
 2178 /*
 2179  * Append an ACPI_RESOURCE to an ACPI_BUFFER.
 2180  *
 2181  * Given a pointer to an ACPI_RESOURCE structure, expand the ACPI_BUFFER
 2182  * provided to contain it.  If the ACPI_BUFFER is empty, allocate a sensible
 2183  * backing block.  If the ACPI_RESOURCE is NULL, return an empty set of
 2184  * resources.
 2185  */
 2186 #define ACPI_INITIAL_RESOURCE_BUFFER_SIZE       512
 2187 
 2188 ACPI_STATUS
 2189 acpi_AppendBufferResource(ACPI_BUFFER *buf, ACPI_RESOURCE *res)
 2190 {
 2191     ACPI_RESOURCE       *rp;
 2192     void                *newp;
 2193 
 2194     /* Initialise the buffer if necessary. */
 2195     if (buf->Pointer == NULL) {
 2196         buf->Length = ACPI_INITIAL_RESOURCE_BUFFER_SIZE;
 2197         if ((buf->Pointer = AcpiOsAllocate(buf->Length)) == NULL)
 2198             return (AE_NO_MEMORY);
 2199         rp = (ACPI_RESOURCE *)buf->Pointer;
 2200         rp->Type = ACPI_RESOURCE_TYPE_END_TAG;
 2201         rp->Length = ACPI_RS_SIZE_MIN;
 2202     }
 2203     if (res == NULL)
 2204         return (AE_OK);
 2205 
 2206     /*
 2207      * Scan the current buffer looking for the terminator.
 2208      * This will either find the terminator or hit the end
 2209      * of the buffer and return an error.
 2210      */
 2211     rp = (ACPI_RESOURCE *)buf->Pointer;
 2212     for (;;) {
 2213         /* Range check, don't go outside the buffer */
 2214         if (rp >= (ACPI_RESOURCE *)((u_int8_t *)buf->Pointer + buf->Length))
 2215             return (AE_BAD_PARAMETER);
 2216         if (rp->Type == ACPI_RESOURCE_TYPE_END_TAG || rp->Length == 0)
 2217             break;
 2218         rp = ACPI_NEXT_RESOURCE(rp);
 2219     }
 2220 
 2221     /*
 2222      * Check the size of the buffer and expand if required.
 2223      *
 2224      * Required size is:
 2225      *  size of existing resources before terminator + 
 2226      *  size of new resource and header +
 2227      *  size of terminator.
 2228      *
 2229      * Note that this loop should really only run once, unless
 2230      * for some reason we are stuffing a *really* huge resource.
 2231      */
 2232     while ((((u_int8_t *)rp - (u_int8_t *)buf->Pointer) + 
 2233             res->Length + ACPI_RS_SIZE_NO_DATA +
 2234             ACPI_RS_SIZE_MIN) >= buf->Length) {
 2235         if ((newp = AcpiOsAllocate(buf->Length * 2)) == NULL)
 2236             return (AE_NO_MEMORY);
 2237         bcopy(buf->Pointer, newp, buf->Length);
 2238         rp = (ACPI_RESOURCE *)((u_int8_t *)newp +
 2239                                ((u_int8_t *)rp - (u_int8_t *)buf->Pointer));
 2240         AcpiOsFree(buf->Pointer);
 2241         buf->Pointer = newp;
 2242         buf->Length += buf->Length;
 2243     }
 2244 
 2245     /* Insert the new resource. */
 2246     bcopy(res, rp, res->Length + ACPI_RS_SIZE_NO_DATA);
 2247 
 2248     /* And add the terminator. */
 2249     rp = ACPI_NEXT_RESOURCE(rp);
 2250     rp->Type = ACPI_RESOURCE_TYPE_END_TAG;
 2251     rp->Length = ACPI_RS_SIZE_MIN;
 2252 
 2253     return (AE_OK);
 2254 }
 2255 
 2256 /*
 2257  * Set interrupt model.
 2258  */
 2259 ACPI_STATUS
 2260 acpi_SetIntrModel(int model)
 2261 {
 2262 
 2263     return (acpi_SetInteger(ACPI_ROOT_OBJECT, "_PIC", model));
 2264 }
 2265 
 2266 /*
 2267  * Walk subtables of a table and call a callback routine for each
 2268  * subtable.  The caller should provide the first subtable and a
 2269  * pointer to the end of the table.  This can be used to walk tables
 2270  * such as MADT and SRAT that use subtable entries.
 2271  */
 2272 void
 2273 acpi_walk_subtables(void *first, void *end, acpi_subtable_handler *handler,
 2274     void *arg)
 2275 {
 2276     ACPI_SUBTABLE_HEADER *entry;
 2277 
 2278     for (entry = first; (void *)entry < end; ) {
 2279         /* Avoid an infinite loop if we hit a bogus entry. */
 2280         if (entry->Length < sizeof(ACPI_SUBTABLE_HEADER))
 2281             return;
 2282 
 2283         handler(entry, arg);
 2284         entry = ACPI_ADD_PTR(ACPI_SUBTABLE_HEADER, entry, entry->Length);
 2285     }
 2286 }
 2287 
 2288 /*
 2289  * DEPRECATED.  This interface has serious deficiencies and will be
 2290  * removed.
 2291  *
 2292  * Immediately enter the sleep state.  In the old model, acpiconf(8) ran
 2293  * rc.suspend and rc.resume so we don't have to notify devd(8) to do this.
 2294  */
 2295 ACPI_STATUS
 2296 acpi_SetSleepState(struct acpi_softc *sc, int state)
 2297 {
 2298     static int once;
 2299 
 2300     if (!once) {
 2301         device_printf(sc->acpi_dev,
 2302 "warning: acpi_SetSleepState() deprecated, need to update your software\n");
 2303         once = 1;
 2304     }
 2305     return (acpi_EnterSleepState(sc, state));
 2306 }
 2307 
 2308 #if defined(__amd64__) || defined(__i386__)
 2309 static void
 2310 acpi_sleep_force_task(void *context)
 2311 {
 2312     struct acpi_softc *sc = (struct acpi_softc *)context;
 2313 
 2314     if (ACPI_FAILURE(acpi_EnterSleepState(sc, sc->acpi_next_sstate)))
 2315         device_printf(sc->acpi_dev, "force sleep state S%d failed\n",
 2316             sc->acpi_next_sstate);
 2317 }
 2318 
 2319 static void
 2320 acpi_sleep_force(void *arg)
 2321 {
 2322     struct acpi_softc *sc = (struct acpi_softc *)arg;
 2323 
 2324     device_printf(sc->acpi_dev,
 2325         "suspend request timed out, forcing sleep now\n");
 2326     /*
 2327      * XXX Suspending from callout cause the freeze in DEVICE_SUSPEND().
 2328      * Suspend from acpi_task thread in stead.
 2329      */
 2330     if (ACPI_FAILURE(AcpiOsExecute(OSL_NOTIFY_HANDLER,
 2331         acpi_sleep_force_task, sc)))
 2332         device_printf(sc->acpi_dev, "AcpiOsExecute() for sleeping failed\n");
 2333 }
 2334 #endif
 2335 
 2336 /*
 2337  * Request that the system enter the given suspend state.  All /dev/apm
 2338  * devices and devd(8) will be notified.  Userland then has a chance to
 2339  * save state and acknowledge the request.  The system sleeps once all
 2340  * acks are in.
 2341  */
 2342 int
 2343 acpi_ReqSleepState(struct acpi_softc *sc, int state)
 2344 {
 2345 #if defined(__amd64__) || defined(__i386__)
 2346 #if defined(__i386__)
 2347     struct apm_clone_data *clone;
 2348 #endif
 2349     ACPI_STATUS status;
 2350 
 2351     if (state < ACPI_STATE_S1 || state > ACPI_S_STATES_MAX)
 2352         return (EINVAL);
 2353     if (!acpi_sleep_states[state])
 2354         return (EOPNOTSUPP);
 2355 
 2356     /* If a suspend request is already in progress, just return. */
 2357     if (sc->acpi_next_sstate != 0) {
 2358         return (0);
 2359     }
 2360 
 2361     /* Wait until sleep is enabled. */
 2362     while (sc->acpi_sleep_disabled) {
 2363         AcpiOsSleep(1000);
 2364     }
 2365 
 2366     ACPI_LOCK(acpi);
 2367 
 2368     sc->acpi_next_sstate = state;
 2369 
 2370     /* S5 (soft-off) should be entered directly with no waiting. */
 2371     if (state == ACPI_STATE_S5) {
 2372         ACPI_UNLOCK(acpi);
 2373         status = acpi_EnterSleepState(sc, state);
 2374         return (ACPI_SUCCESS(status) ? 0 : ENXIO);
 2375     }
 2376 
 2377     /* Record the pending state and notify all apm devices. */
 2378 #if defined(__i386__)
 2379     STAILQ_FOREACH(clone, &sc->apm_cdevs, entries) {
 2380         clone->notify_status = APM_EV_NONE;
 2381         if ((clone->flags & ACPI_EVF_DEVD) == 0) {
 2382             selwakeuppri(&clone->sel_read, PZERO);
 2383             KNOTE_LOCKED(&clone->sel_read.si_note, 0);
 2384         }
 2385     }
 2386 #endif
 2387 
 2388     /* If devd(8) is not running, immediately enter the sleep state. */
 2389     if (!devctl_process_running()) {
 2390         ACPI_UNLOCK(acpi);
 2391         status = acpi_EnterSleepState(sc, state);
 2392         return (ACPI_SUCCESS(status) ? 0 : ENXIO);
 2393     }
 2394 
 2395     /*
 2396      * Set a timeout to fire if userland doesn't ack the suspend request
 2397      * in time.  This way we still eventually go to sleep if we were
 2398      * overheating or running low on battery, even if userland is hung.
 2399      * We cancel this timeout once all userland acks are in or the
 2400      * suspend request is aborted.
 2401      */
 2402     callout_reset(&sc->susp_force_to, 10 * hz, acpi_sleep_force, sc);
 2403     ACPI_UNLOCK(acpi);
 2404 
 2405     /* Now notify devd(8) also. */
 2406     acpi_UserNotify("Suspend", ACPI_ROOT_OBJECT, state);
 2407 
 2408     return (0);
 2409 #else
 2410     /* This platform does not support acpi suspend/resume. */
 2411     return (EOPNOTSUPP);
 2412 #endif
 2413 }
 2414 
 2415 /*
 2416  * Acknowledge (or reject) a pending sleep state.  The caller has
 2417  * prepared for suspend and is now ready for it to proceed.  If the
 2418  * error argument is non-zero, it indicates suspend should be cancelled
 2419  * and gives an errno value describing why.  Once all votes are in,
 2420  * we suspend the system.
 2421  */
 2422 int
 2423 acpi_AckSleepState(struct apm_clone_data *clone, int error)
 2424 {
 2425 #if defined(__amd64__) || defined(__i386__)
 2426     struct acpi_softc *sc;
 2427     int ret, sleeping;
 2428 
 2429     /* If no pending sleep state, return an error. */
 2430     ACPI_LOCK(acpi);
 2431     sc = clone->acpi_sc;
 2432     if (sc->acpi_next_sstate == 0) {
 2433         ACPI_UNLOCK(acpi);
 2434         return (ENXIO);
 2435     }
 2436 
 2437     /* Caller wants to abort suspend process. */
 2438     if (error) {
 2439         sc->acpi_next_sstate = 0;
 2440         callout_stop(&sc->susp_force_to);
 2441         device_printf(sc->acpi_dev,
 2442             "listener on %s cancelled the pending suspend\n",
 2443             devtoname(clone->cdev));
 2444         ACPI_UNLOCK(acpi);
 2445         return (0);
 2446     }
 2447 
 2448     /*
 2449      * Mark this device as acking the suspend request.  Then, walk through
 2450      * all devices, seeing if they agree yet.  We only count devices that
 2451      * are writable since read-only devices couldn't ack the request.
 2452      */
 2453     sleeping = TRUE;
 2454 #if defined(__i386__)
 2455     clone->notify_status = APM_EV_ACKED;
 2456     STAILQ_FOREACH(clone, &sc->apm_cdevs, entries) {
 2457         if ((clone->flags & ACPI_EVF_WRITE) != 0 &&
 2458             clone->notify_status != APM_EV_ACKED) {
 2459             sleeping = FALSE;
 2460             break;
 2461         }
 2462     }
 2463 #endif
 2464 
 2465     /* If all devices have voted "yes", we will suspend now. */
 2466     if (sleeping)
 2467         callout_stop(&sc->susp_force_to);
 2468     ACPI_UNLOCK(acpi);
 2469     ret = 0;
 2470     if (sleeping) {
 2471         if (ACPI_FAILURE(acpi_EnterSleepState(sc, sc->acpi_next_sstate)))
 2472                 ret = ENODEV;
 2473     }
 2474     return (ret);
 2475 #else
 2476     /* This platform does not support acpi suspend/resume. */
 2477     return (EOPNOTSUPP);
 2478 #endif
 2479 }
 2480 
 2481 static void
 2482 acpi_sleep_enable(void *arg)
 2483 {
 2484     struct acpi_softc   *sc = (struct acpi_softc *)arg;
 2485 
 2486     /* Reschedule if the system is not fully up and running. */
 2487     if (!AcpiGbl_SystemAwakeAndRunning) {
 2488         timeout(acpi_sleep_enable, sc, hz * ACPI_MINIMUM_AWAKETIME);
 2489         return;
 2490     }
 2491 
 2492     ACPI_LOCK(acpi);
 2493     sc->acpi_sleep_disabled = FALSE;
 2494     ACPI_UNLOCK(acpi);
 2495 }
 2496 
 2497 static ACPI_STATUS
 2498 acpi_sleep_disable(struct acpi_softc *sc)
 2499 {
 2500     ACPI_STATUS         status;
 2501 
 2502     /* Fail if the system is not fully up and running. */
 2503     if (!AcpiGbl_SystemAwakeAndRunning)
 2504         return (AE_ERROR);
 2505 
 2506     ACPI_LOCK(acpi);
 2507     status = sc->acpi_sleep_disabled ? AE_ERROR : AE_OK;
 2508     sc->acpi_sleep_disabled = TRUE;
 2509     ACPI_UNLOCK(acpi);
 2510 
 2511     return (status);
 2512 }
 2513 
 2514 enum acpi_sleep_state {
 2515     ACPI_SS_NONE,
 2516     ACPI_SS_GPE_SET,
 2517     ACPI_SS_DEV_SUSPEND,
 2518     ACPI_SS_SLP_PREP,
 2519     ACPI_SS_SLEPT,
 2520 };
 2521 
 2522 /*
 2523  * Enter the desired system sleep state.
 2524  *
 2525  * Currently we support S1-S5 but S4 is only S4BIOS
 2526  */
 2527 static ACPI_STATUS
 2528 acpi_EnterSleepState(struct acpi_softc *sc, int state)
 2529 {
 2530     ACPI_STATUS status;
 2531     enum acpi_sleep_state slp_state;
 2532 
 2533     ACPI_FUNCTION_TRACE_U32((char *)(uintptr_t)__func__, state);
 2534 
 2535     if (state < ACPI_STATE_S1 || state > ACPI_S_STATES_MAX)
 2536         return_ACPI_STATUS (AE_BAD_PARAMETER);
 2537     if (!acpi_sleep_states[state]) {
 2538         device_printf(sc->acpi_dev, "Sleep state S%d not supported by BIOS\n",
 2539             state);
 2540         return (AE_SUPPORT);
 2541     }
 2542 
 2543     /* Re-entry once we're suspending is not allowed. */
 2544     status = acpi_sleep_disable(sc);
 2545     if (ACPI_FAILURE(status)) {
 2546         device_printf(sc->acpi_dev,
 2547             "suspend request ignored (not ready yet)\n");
 2548         return (status);
 2549     }
 2550 
 2551     if (state == ACPI_STATE_S5) {
 2552         /*
 2553          * Shut down cleanly and power off.  This will call us back through the
 2554          * shutdown handlers.
 2555          */
 2556         shutdown_nice(RB_POWEROFF);
 2557         return_ACPI_STATUS (AE_OK);
 2558     }
 2559 
 2560     EVENTHANDLER_INVOKE(power_suspend);
 2561 
 2562     if (smp_started) {
 2563         thread_lock(curthread);
 2564         sched_bind(curthread, 0);
 2565         thread_unlock(curthread);
 2566     }
 2567 
 2568     /*
 2569      * Be sure to hold Giant across DEVICE_SUSPEND/RESUME since non-MPSAFE
 2570      * drivers need this.
 2571      */
 2572     mtx_lock(&Giant);
 2573 
 2574     slp_state = ACPI_SS_NONE;
 2575 
 2576     sc->acpi_sstate = state;
 2577 
 2578     /* Enable any GPEs as appropriate and requested by the user. */
 2579     acpi_wake_prep_walk(state);
 2580     slp_state = ACPI_SS_GPE_SET;
 2581 
 2582     /*
 2583      * Inform all devices that we are going to sleep.  If at least one
 2584      * device fails, DEVICE_SUSPEND() automatically resumes the tree.
 2585      *
 2586      * XXX Note that a better two-pass approach with a 'veto' pass
 2587      * followed by a "real thing" pass would be better, but the current
 2588      * bus interface does not provide for this.
 2589      */
 2590     if (DEVICE_SUSPEND(root_bus) != 0) {
 2591         device_printf(sc->acpi_dev, "device_suspend failed\n");
 2592         goto backout;
 2593     }
 2594     slp_state = ACPI_SS_DEV_SUSPEND;
 2595 
 2596     /* If testing device suspend only, back out of everything here. */
 2597     if (acpi_susp_bounce)
 2598         goto backout;
 2599 
 2600     status = AcpiEnterSleepStatePrep(state);
 2601     if (ACPI_FAILURE(status)) {
 2602         device_printf(sc->acpi_dev, "AcpiEnterSleepStatePrep failed - %s\n",
 2603                       AcpiFormatException(status));
 2604         goto backout;
 2605     }
 2606     slp_state = ACPI_SS_SLP_PREP;
 2607 
 2608     if (sc->acpi_sleep_delay > 0)
 2609         DELAY(sc->acpi_sleep_delay * 1000000);
 2610 
 2611     if (state != ACPI_STATE_S1) {
 2612         acpi_sleep_machdep(sc, state);
 2613 
 2614         /* Re-enable ACPI hardware on wakeup from sleep state 4. */
 2615         if (state == ACPI_STATE_S4)
 2616             AcpiEnable();
 2617     } else {
 2618         ACPI_DISABLE_IRQS();
 2619         status = AcpiEnterSleepState(state);
 2620         if (ACPI_FAILURE(status)) {
 2621             device_printf(sc->acpi_dev, "AcpiEnterSleepState failed - %s\n",
 2622                           AcpiFormatException(status));
 2623             goto backout;
 2624         }
 2625     }
 2626     slp_state = ACPI_SS_SLEPT;
 2627 
 2628     /*
 2629      * Back out state according to how far along we got in the suspend
 2630      * process.  This handles both the error and success cases.
 2631      */
 2632 backout:
 2633     if (slp_state >= ACPI_SS_GPE_SET) {
 2634         acpi_wake_prep_walk(state);
 2635         sc->acpi_sstate = ACPI_STATE_S0;
 2636     }
 2637     if (slp_state >= ACPI_SS_DEV_SUSPEND)
 2638         DEVICE_RESUME(root_bus);
 2639     if (slp_state >= ACPI_SS_SLP_PREP)
 2640         AcpiLeaveSleepState(state);
 2641     if (slp_state >= ACPI_SS_SLEPT)
 2642         acpi_enable_fixed_events(sc);
 2643     sc->acpi_next_sstate = 0;
 2644 
 2645     mtx_unlock(&Giant);
 2646 
 2647     if (smp_started) {
 2648         thread_lock(curthread);
 2649         sched_unbind(curthread);
 2650         thread_unlock(curthread);
 2651     }
 2652 
 2653     EVENTHANDLER_INVOKE(power_resume);
 2654 
 2655     /* Allow another sleep request after a while. */
 2656     timeout(acpi_sleep_enable, sc, hz * ACPI_MINIMUM_AWAKETIME);
 2657 
 2658     /* Run /etc/rc.resume after we are back. */
 2659     if (devctl_process_running())
 2660         acpi_UserNotify("Resume", ACPI_ROOT_OBJECT, state);
 2661 
 2662     return_ACPI_STATUS (status);
 2663 }
 2664 
 2665 void
 2666 acpi_resync_clock(struct acpi_softc *sc)
 2667 {
 2668 
 2669     if (!acpi_reset_clock)
 2670         return;
 2671 
 2672     /*
 2673      * Warm up timecounter again and reset system clock.
 2674      */
 2675     (void)timecounter->tc_get_timecount(timecounter);
 2676     (void)timecounter->tc_get_timecount(timecounter);
 2677     inittodr(time_second + sc->acpi_sleep_delay);
 2678 }
 2679 
 2680 /* Enable or disable the device's wake GPE. */
 2681 int
 2682 acpi_wake_set_enable(device_t dev, int enable)
 2683 {
 2684     struct acpi_prw_data prw;
 2685     ACPI_STATUS status;
 2686     int flags;
 2687 
 2688     /* Make sure the device supports waking the system and get the GPE. */
 2689     if (acpi_parse_prw(acpi_get_handle(dev), &prw) != 0)
 2690         return (ENXIO);
 2691 
 2692     flags = acpi_get_flags(dev);
 2693     if (enable) {
 2694         status = AcpiGpeWakeup(prw.gpe_handle, prw.gpe_bit, ACPI_GPE_ENABLE);
 2695         if (ACPI_FAILURE(status)) {
 2696             device_printf(dev, "enable wake failed\n");
 2697             return (ENXIO);
 2698         }
 2699         acpi_set_flags(dev, flags | ACPI_FLAG_WAKE_ENABLED);
 2700     } else {
 2701         status = AcpiGpeWakeup(prw.gpe_handle, prw.gpe_bit, ACPI_GPE_DISABLE);
 2702         if (ACPI_FAILURE(status)) {
 2703             device_printf(dev, "disable wake failed\n");
 2704             return (ENXIO);
 2705         }
 2706         acpi_set_flags(dev, flags & ~ACPI_FLAG_WAKE_ENABLED);
 2707     }
 2708 
 2709     return (0);
 2710 }
 2711 
 2712 static int
 2713 acpi_wake_sleep_prep(ACPI_HANDLE handle, int sstate)
 2714 {
 2715     struct acpi_prw_data prw;
 2716     device_t dev;
 2717 
 2718     /* Check that this is a wake-capable device and get its GPE. */
 2719     if (acpi_parse_prw(handle, &prw) != 0)
 2720         return (ENXIO);
 2721     dev = acpi_get_device(handle);
 2722 
 2723     /*
 2724      * The destination sleep state must be less than (i.e., higher power)
 2725      * or equal to the value specified by _PRW.  If this GPE cannot be
 2726      * enabled for the next sleep state, then disable it.  If it can and
 2727      * the user requested it be enabled, turn on any required power resources
 2728      * and set _PSW.
 2729      */
 2730     if (sstate > prw.lowest_wake) {
 2731         AcpiGpeWakeup(prw.gpe_handle, prw.gpe_bit, ACPI_GPE_DISABLE);
 2732         if (bootverbose)
 2733             device_printf(dev, "wake_prep disabled wake for %s (S%d)\n",
 2734                 acpi_name(handle), sstate);
 2735     } else if (dev && (acpi_get_flags(dev) & ACPI_FLAG_WAKE_ENABLED) != 0) {
 2736         acpi_pwr_wake_enable(handle, 1);
 2737         acpi_SetInteger(handle, "_PSW", 1);
 2738         if (bootverbose)
 2739             device_printf(dev, "wake_prep enabled for %s (S%d)\n",
 2740                 acpi_name(handle), sstate);
 2741     }
 2742 
 2743     return (0);
 2744 }
 2745 
 2746 static int
 2747 acpi_wake_run_prep(ACPI_HANDLE handle, int sstate)
 2748 {
 2749     struct acpi_prw_data prw;
 2750     device_t dev;
 2751 
 2752     /*
 2753      * Check that this is a wake-capable device and get its GPE.  Return
 2754      * now if the user didn't enable this device for wake.
 2755      */
 2756     if (acpi_parse_prw(handle, &prw) != 0)
 2757         return (ENXIO);
 2758     dev = acpi_get_device(handle);
 2759     if (dev == NULL || (acpi_get_flags(dev) & ACPI_FLAG_WAKE_ENABLED) == 0)
 2760         return (0);
 2761 
 2762     /*
 2763      * If this GPE couldn't be enabled for the previous sleep state, it was
 2764      * disabled before going to sleep so re-enable it.  If it was enabled,
 2765      * clear _PSW and turn off any power resources it used.
 2766      */
 2767     if (sstate > prw.lowest_wake) {
 2768         AcpiGpeWakeup(prw.gpe_handle, prw.gpe_bit, ACPI_GPE_ENABLE);
 2769         if (bootverbose)
 2770             device_printf(dev, "run_prep re-enabled %s\n", acpi_name(handle));
 2771     } else {
 2772         acpi_SetInteger(handle, "_PSW", 0);
 2773         acpi_pwr_wake_enable(handle, 0);
 2774         if (bootverbose)
 2775             device_printf(dev, "run_prep cleaned up for %s\n",
 2776                 acpi_name(handle));
 2777     }
 2778 
 2779     return (0);
 2780 }
 2781 
 2782 static ACPI_STATUS
 2783 acpi_wake_prep(ACPI_HANDLE handle, UINT32 level, void *context, void **status)
 2784 {
 2785     int sstate;
 2786 
 2787     /* If suspending, run the sleep prep function, otherwise wake. */
 2788     sstate = *(int *)context;
 2789     if (AcpiGbl_SystemAwakeAndRunning)
 2790         acpi_wake_sleep_prep(handle, sstate);
 2791     else
 2792         acpi_wake_run_prep(handle, sstate);
 2793     return (AE_OK);
 2794 }
 2795 
 2796 /* Walk the tree rooted at acpi0 to prep devices for suspend/resume. */
 2797 static int
 2798 acpi_wake_prep_walk(int sstate)
 2799 {
 2800     ACPI_HANDLE sb_handle;
 2801 
 2802     if (ACPI_SUCCESS(AcpiGetHandle(ACPI_ROOT_OBJECT, "\\_SB_", &sb_handle)))
 2803         AcpiWalkNamespace(ACPI_TYPE_DEVICE, sb_handle, 100,
 2804             acpi_wake_prep, NULL, &sstate, NULL);
 2805     return (0);
 2806 }
 2807 
 2808 /* Walk the tree rooted at acpi0 to attach per-device wake sysctls. */
 2809 static int
 2810 acpi_wake_sysctl_walk(device_t dev)
 2811 {
 2812     int error, i, numdevs;
 2813     device_t *devlist;
 2814     device_t child;
 2815     ACPI_STATUS status;
 2816 
 2817     error = device_get_children(dev, &devlist, &numdevs);
 2818     if (error != 0 || numdevs == 0) {
 2819         if (numdevs == 0)
 2820             free(devlist, M_TEMP);
 2821         return (error);
 2822     }
 2823     for (i = 0; i < numdevs; i++) {
 2824         child = devlist[i];
 2825         acpi_wake_sysctl_walk(child);
 2826         if (!device_is_attached(child))
 2827             continue;
 2828         status = AcpiEvaluateObject(acpi_get_handle(child), "_PRW", NULL, NULL);
 2829         if (ACPI_SUCCESS(status)) {
 2830             SYSCTL_ADD_PROC(device_get_sysctl_ctx(child),
 2831                 SYSCTL_CHILDREN(device_get_sysctl_tree(child)), OID_AUTO,
 2832                 "wake", CTLTYPE_INT | CTLFLAG_RW, child, 0,
 2833                 acpi_wake_set_sysctl, "I", "Device set to wake the system");
 2834         }
 2835     }
 2836     free(devlist, M_TEMP);
 2837 
 2838     return (0);
 2839 }
 2840 
 2841 /* Enable or disable wake from userland. */
 2842 static int
 2843 acpi_wake_set_sysctl(SYSCTL_HANDLER_ARGS)
 2844 {
 2845     int enable, error;
 2846     device_t dev;
 2847 
 2848     dev = (device_t)arg1;
 2849     enable = (acpi_get_flags(dev) & ACPI_FLAG_WAKE_ENABLED) ? 1 : 0;
 2850 
 2851     error = sysctl_handle_int(oidp, &enable, 0, req);
 2852     if (error != 0 || req->newptr == NULL)
 2853         return (error);
 2854     if (enable != 0 && enable != 1)
 2855         return (EINVAL);
 2856 
 2857     return (acpi_wake_set_enable(dev, enable));
 2858 }
 2859 
 2860 /* Parse a device's _PRW into a structure. */
 2861 int
 2862 acpi_parse_prw(ACPI_HANDLE h, struct acpi_prw_data *prw)
 2863 {
 2864     ACPI_STATUS                 status;
 2865     ACPI_BUFFER                 prw_buffer;
 2866     ACPI_OBJECT                 *res, *res2;
 2867     int                         error, i, power_count;
 2868 
 2869     if (h == NULL || prw == NULL)
 2870         return (EINVAL);
 2871 
 2872     /*
 2873      * The _PRW object (7.2.9) is only required for devices that have the
 2874      * ability to wake the system from a sleeping state.
 2875      */
 2876     error = EINVAL;
 2877     prw_buffer.Pointer = NULL;
 2878     prw_buffer.Length = ACPI_ALLOCATE_BUFFER;
 2879     status = AcpiEvaluateObject(h, "_PRW", NULL, &prw_buffer);
 2880     if (ACPI_FAILURE(status))
 2881         return (ENOENT);
 2882     res = (ACPI_OBJECT *)prw_buffer.Pointer;
 2883     if (res == NULL)
 2884         return (ENOENT);
 2885     if (!ACPI_PKG_VALID(res, 2))
 2886         goto out;
 2887 
 2888     /*
 2889      * Element 1 of the _PRW object:
 2890      * The lowest power system sleeping state that can be entered while still
 2891      * providing wake functionality.  The sleeping state being entered must
 2892      * be less than (i.e., higher power) or equal to this value.
 2893      */
 2894     if (acpi_PkgInt32(res, 1, &prw->lowest_wake) != 0)
 2895         goto out;
 2896 
 2897     /*
 2898      * Element 0 of the _PRW object:
 2899      */
 2900     switch (res->Package.Elements[0].Type) {
 2901     case ACPI_TYPE_INTEGER:
 2902         /*
 2903          * If the data type of this package element is numeric, then this
 2904          * _PRW package element is the bit index in the GPEx_EN, in the
 2905          * GPE blocks described in the FADT, of the enable bit that is
 2906          * enabled for the wake event.
 2907          */
 2908         prw->gpe_handle = NULL;
 2909         prw->gpe_bit = res->Package.Elements[0].Integer.Value;
 2910         error = 0;
 2911         break;
 2912     case ACPI_TYPE_PACKAGE:
 2913         /*
 2914          * If the data type of this package element is a package, then this
 2915          * _PRW package element is itself a package containing two
 2916          * elements.  The first is an object reference to the GPE Block
 2917          * device that contains the GPE that will be triggered by the wake
 2918          * event.  The second element is numeric and it contains the bit
 2919          * index in the GPEx_EN, in the GPE Block referenced by the
 2920          * first element in the package, of the enable bit that is enabled for
 2921          * the wake event.
 2922          *
 2923          * For example, if this field is a package then it is of the form:
 2924          * Package() {\_SB.PCI0.ISA.GPE, 2}
 2925          */
 2926         res2 = &res->Package.Elements[0];
 2927         if (!ACPI_PKG_VALID(res2, 2))
 2928             goto out;
 2929         prw->gpe_handle = acpi_GetReference(NULL, &res2->Package.Elements[0]);
 2930         if (prw->gpe_handle == NULL)
 2931             goto out;
 2932         if (acpi_PkgInt32(res2, 1, &prw->gpe_bit) != 0)
 2933             goto out;
 2934         error = 0;
 2935         break;
 2936     default:
 2937         goto out;
 2938     }
 2939 
 2940     /* Elements 2 to N of the _PRW object are power resources. */
 2941     power_count = res->Package.Count - 2;
 2942     if (power_count > ACPI_PRW_MAX_POWERRES) {
 2943         printf("ACPI device %s has too many power resources\n", acpi_name(h));
 2944         power_count = 0;
 2945     }
 2946     prw->power_res_count = power_count;
 2947     for (i = 0; i < power_count; i++)
 2948         prw->power_res[i] = res->Package.Elements[i];
 2949 
 2950 out:
 2951     if (prw_buffer.Pointer != NULL)
 2952         AcpiOsFree(prw_buffer.Pointer);
 2953     return (error);
 2954 }
 2955 
 2956 /*
 2957  * ACPI Event Handlers
 2958  */
 2959 
 2960 /* System Event Handlers (registered by EVENTHANDLER_REGISTER) */
 2961 
 2962 static void
 2963 acpi_system_eventhandler_sleep(void *arg, int state)
 2964 {
 2965     struct acpi_softc *sc = (struct acpi_softc *)arg;
 2966     int ret;
 2967 
 2968     ACPI_FUNCTION_TRACE_U32((char *)(uintptr_t)__func__, state);
 2969 
 2970     /* Check if button action is disabled or unknown. */
 2971     if (state == ACPI_STATE_UNKNOWN)
 2972         return;
 2973 
 2974     /* Request that the system prepare to enter the given suspend state. */
 2975     ret = acpi_ReqSleepState(sc, state);
 2976     if (ret != 0)
 2977         device_printf(sc->acpi_dev,
 2978             "request to enter state S%d failed (err %d)\n", state, ret);
 2979 
 2980     return_VOID;
 2981 }
 2982 
 2983 static void
 2984 acpi_system_eventhandler_wakeup(void *arg, int state)
 2985 {
 2986 
 2987     ACPI_FUNCTION_TRACE_U32((char *)(uintptr_t)__func__, state);
 2988 
 2989     /* Currently, nothing to do for wakeup. */
 2990 
 2991     return_VOID;
 2992 }
 2993 
 2994 /* 
 2995  * ACPICA Event Handlers (FixedEvent, also called from button notify handler)
 2996  */
 2997 UINT32
 2998 acpi_event_power_button_sleep(void *context)
 2999 {
 3000     struct acpi_softc   *sc = (struct acpi_softc *)context;
 3001 
 3002     ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
 3003 
 3004     EVENTHANDLER_INVOKE(acpi_sleep_event, sc->acpi_power_button_sx);
 3005 
 3006     return_VALUE (ACPI_INTERRUPT_HANDLED);
 3007 }
 3008 
 3009 UINT32
 3010 acpi_event_power_button_wake(void *context)
 3011 {
 3012     struct acpi_softc   *sc = (struct acpi_softc *)context;
 3013 
 3014     ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
 3015 
 3016     EVENTHANDLER_INVOKE(acpi_wakeup_event, sc->acpi_power_button_sx);
 3017 
 3018     return_VALUE (ACPI_INTERRUPT_HANDLED);
 3019 }
 3020 
 3021 UINT32
 3022 acpi_event_sleep_button_sleep(void *context)
 3023 {
 3024     struct acpi_softc   *sc = (struct acpi_softc *)context;
 3025 
 3026     ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
 3027 
 3028     EVENTHANDLER_INVOKE(acpi_sleep_event, sc->acpi_sleep_button_sx);
 3029 
 3030     return_VALUE (ACPI_INTERRUPT_HANDLED);
 3031 }
 3032 
 3033 UINT32
 3034 acpi_event_sleep_button_wake(void *context)
 3035 {
 3036     struct acpi_softc   *sc = (struct acpi_softc *)context;
 3037 
 3038     ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
 3039 
 3040     EVENTHANDLER_INVOKE(acpi_wakeup_event, sc->acpi_sleep_button_sx);
 3041 
 3042     return_VALUE (ACPI_INTERRUPT_HANDLED);
 3043 }
 3044 
 3045 /*
 3046  * XXX This static buffer is suboptimal.  There is no locking so only
 3047  * use this for single-threaded callers.
 3048  */
 3049 char *
 3050 acpi_name(ACPI_HANDLE handle)
 3051 {
 3052     ACPI_BUFFER buf;
 3053     static char data[256];
 3054 
 3055     buf.Length = sizeof(data);
 3056     buf.Pointer = data;
 3057 
 3058     if (handle && ACPI_SUCCESS(AcpiGetName(handle, ACPI_FULL_PATHNAME, &buf)))
 3059         return (data);
 3060     return ("(unknown)");
 3061 }
 3062 
 3063 /*
 3064  * Debugging/bug-avoidance.  Avoid trying to fetch info on various
 3065  * parts of the namespace.
 3066  */
 3067 int
 3068 acpi_avoid(ACPI_HANDLE handle)
 3069 {
 3070     char        *cp, *env, *np;
 3071     int         len;
 3072 
 3073     np = acpi_name(handle);
 3074     if (*np == '\\')
 3075         np++;
 3076     if ((env = getenv("debug.acpi.avoid")) == NULL)
 3077         return (0);
 3078 
 3079     /* Scan the avoid list checking for a match */
 3080     cp = env;
 3081     for (;;) {
 3082         while (*cp != 0 && isspace(*cp))
 3083             cp++;
 3084         if (*cp == 0)
 3085             break;
 3086         len = 0;
 3087         while (cp[len] != 0 && !isspace(cp[len]))
 3088             len++;
 3089         if (!strncmp(cp, np, len)) {
 3090             freeenv(env);
 3091             return(1);
 3092         }
 3093         cp += len;
 3094     }
 3095     freeenv(env);
 3096 
 3097     return (0);
 3098 }
 3099 
 3100 /*
 3101  * Debugging/bug-avoidance.  Disable ACPI subsystem components.
 3102  */
 3103 int
 3104 acpi_disabled(char *subsys)
 3105 {
 3106     char        *cp, *env;
 3107     int         len;
 3108 
 3109     if ((env = getenv("debug.acpi.disabled")) == NULL)
 3110         return (0);
 3111     if (strcmp(env, "all") == 0) {
 3112         freeenv(env);
 3113         return (1);
 3114     }
 3115 
 3116     /* Scan the disable list, checking for a match. */
 3117     cp = env;
 3118     for (;;) {
 3119         while (*cp != '\0' && isspace(*cp))
 3120             cp++;
 3121         if (*cp == '\0')
 3122             break;
 3123         len = 0;
 3124         while (cp[len] != '\0' && !isspace(cp[len]))
 3125             len++;
 3126         if (strncmp(cp, subsys, len) == 0) {
 3127             freeenv(env);
 3128             return (1);
 3129         }
 3130         cp += len;
 3131     }
 3132     freeenv(env);
 3133 
 3134     return (0);
 3135 }
 3136 
 3137 /*
 3138  * Control interface.
 3139  *
 3140  * We multiplex ioctls for all participating ACPI devices here.  Individual 
 3141  * drivers wanting to be accessible via /dev/acpi should use the
 3142  * register/deregister interface to make their handlers visible.
 3143  */
 3144 struct acpi_ioctl_hook
 3145 {
 3146     TAILQ_ENTRY(acpi_ioctl_hook) link;
 3147     u_long                       cmd;
 3148     acpi_ioctl_fn                fn;
 3149     void                         *arg;
 3150 };
 3151 
 3152 static TAILQ_HEAD(,acpi_ioctl_hook)     acpi_ioctl_hooks;
 3153 static int                              acpi_ioctl_hooks_initted;
 3154 
 3155 int
 3156 acpi_register_ioctl(u_long cmd, acpi_ioctl_fn fn, void *arg)
 3157 {
 3158     struct acpi_ioctl_hook      *hp;
 3159 
 3160     if ((hp = malloc(sizeof(*hp), M_ACPIDEV, M_NOWAIT)) == NULL)
 3161         return (ENOMEM);
 3162     hp->cmd = cmd;
 3163     hp->fn = fn;
 3164     hp->arg = arg;
 3165 
 3166     ACPI_LOCK(acpi);
 3167     if (acpi_ioctl_hooks_initted == 0) {
 3168         TAILQ_INIT(&acpi_ioctl_hooks);
 3169         acpi_ioctl_hooks_initted = 1;
 3170     }
 3171     TAILQ_INSERT_TAIL(&acpi_ioctl_hooks, hp, link);
 3172     ACPI_UNLOCK(acpi);
 3173 
 3174     return (0);
 3175 }
 3176 
 3177 void
 3178 acpi_deregister_ioctl(u_long cmd, acpi_ioctl_fn fn)
 3179 {
 3180     struct acpi_ioctl_hook      *hp;
 3181 
 3182     ACPI_LOCK(acpi);
 3183     TAILQ_FOREACH(hp, &acpi_ioctl_hooks, link)
 3184         if (hp->cmd == cmd && hp->fn == fn)
 3185             break;
 3186 
 3187     if (hp != NULL) {
 3188         TAILQ_REMOVE(&acpi_ioctl_hooks, hp, link);
 3189         free(hp, M_ACPIDEV);
 3190     }
 3191     ACPI_UNLOCK(acpi);
 3192 }
 3193 
 3194 static int
 3195 acpiopen(struct cdev *dev, int flag, int fmt, struct thread *td)
 3196 {
 3197     return (0);
 3198 }
 3199 
 3200 static int
 3201 acpiclose(struct cdev *dev, int flag, int fmt, struct thread *td)
 3202 {
 3203     return (0);
 3204 }
 3205 
 3206 static int
 3207 acpiioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td)
 3208 {
 3209     struct acpi_softc           *sc;
 3210     struct acpi_ioctl_hook      *hp;
 3211     int                         error, state;
 3212 
 3213     error = 0;
 3214     hp = NULL;
 3215     sc = dev->si_drv1;
 3216 
 3217     /*
 3218      * Scan the list of registered ioctls, looking for handlers.
 3219      */
 3220     ACPI_LOCK(acpi);
 3221     if (acpi_ioctl_hooks_initted)
 3222         TAILQ_FOREACH(hp, &acpi_ioctl_hooks, link) {
 3223             if (hp->cmd == cmd)
 3224                 break;
 3225         }
 3226     ACPI_UNLOCK(acpi);
 3227     if (hp)
 3228         return (hp->fn(cmd, addr, hp->arg));
 3229 
 3230     /*
 3231      * Core ioctls are not permitted for non-writable user.
 3232      * Currently, other ioctls just fetch information.
 3233      * Not changing system behavior.
 3234      */
 3235     if ((flag & FWRITE) == 0)
 3236         return (EPERM);
 3237 
 3238     /* Core system ioctls. */
 3239     switch (cmd) {
 3240     case ACPIIO_REQSLPSTATE:
 3241         state = *(int *)addr;
 3242         if (state != ACPI_STATE_S5)
 3243             return (acpi_ReqSleepState(sc, state));
 3244         device_printf(sc->acpi_dev, "power off via acpi ioctl not supported\n");
 3245         error = EOPNOTSUPP;
 3246         break;
 3247     case ACPIIO_ACKSLPSTATE:
 3248         error = *(int *)addr;
 3249         error = acpi_AckSleepState(sc->acpi_clone, error);
 3250         break;
 3251     case ACPIIO_SETSLPSTATE:    /* DEPRECATED */
 3252         state = *(int *)addr;
 3253         if (state < ACPI_STATE_S0 || state > ACPI_S_STATES_MAX)
 3254             return (EINVAL);
 3255         if (!acpi_sleep_states[state])
 3256             return (EOPNOTSUPP);
 3257         if (ACPI_FAILURE(acpi_SetSleepState(sc, state)))
 3258             error = ENXIO;
 3259         break;
 3260     default:
 3261         error = ENXIO;
 3262         break;
 3263     }
 3264 
 3265     return (error);
 3266 }
 3267 
 3268 static int
 3269 acpi_sname2sstate(const char *sname)
 3270 {
 3271     int sstate;
 3272 
 3273     if (toupper(sname[0]) == 'S') {
 3274         sstate = sname[1] - '';
 3275         if (sstate >= ACPI_STATE_S0 && sstate <= ACPI_STATE_S5 &&
 3276             sname[2] == '\0')
 3277             return (sstate);
 3278     } else if (strcasecmp(sname, "NONE") == 0)
 3279         return (ACPI_STATE_UNKNOWN);
 3280     return (-1);
 3281 }
 3282 
 3283 static const char *
 3284 acpi_sstate2sname(int sstate)
 3285 {
 3286     static const char *snames[] = { "S0", "S1", "S2", "S3", "S4", "S5" };
 3287 
 3288     if (sstate >= ACPI_STATE_S0 && sstate <= ACPI_STATE_S5)
 3289         return (snames[sstate]);
 3290     else if (sstate == ACPI_STATE_UNKNOWN)
 3291         return ("NONE");
 3292     return (NULL);
 3293 }
 3294 
 3295 static int
 3296 acpi_supported_sleep_state_sysctl(SYSCTL_HANDLER_ARGS)
 3297 {
 3298     int error;
 3299     struct sbuf sb;
 3300     UINT8 state;
 3301 
 3302     sbuf_new(&sb, NULL, 32, SBUF_AUTOEXTEND);
 3303     for (state = ACPI_STATE_S1; state < ACPI_S_STATE_COUNT; state++)
 3304         if (acpi_sleep_states[state])
 3305             sbuf_printf(&sb, "%s ", acpi_sstate2sname(state));
 3306     sbuf_trim(&sb);
 3307     sbuf_finish(&sb);
 3308     error = sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req);
 3309     sbuf_delete(&sb);
 3310     return (error);
 3311 }
 3312 
 3313 static int
 3314 acpi_sleep_state_sysctl(SYSCTL_HANDLER_ARGS)
 3315 {
 3316     char sleep_state[10];
 3317     int error, new_state, old_state;
 3318 
 3319     old_state = *(int *)oidp->oid_arg1;
 3320     strlcpy(sleep_state, acpi_sstate2sname(old_state), sizeof(sleep_state));
 3321     error = sysctl_handle_string(oidp, sleep_state, sizeof(sleep_state), req);
 3322     if (error == 0 && req->newptr != NULL) {
 3323         new_state = acpi_sname2sstate(sleep_state);
 3324         if (new_state < ACPI_STATE_S1)
 3325             return (EINVAL);
 3326         if (new_state < ACPI_S_STATE_COUNT && !acpi_sleep_states[new_state])
 3327             return (EOPNOTSUPP);
 3328         if (new_state != old_state)
 3329             *(int *)oidp->oid_arg1 = new_state;
 3330     }
 3331     return (error);
 3332 }
 3333 
 3334 /* Inform devctl(4) when we receive a Notify. */
 3335 void
 3336 acpi_UserNotify(const char *subsystem, ACPI_HANDLE h, uint8_t notify)
 3337 {
 3338     char                notify_buf[16];
 3339     ACPI_BUFFER         handle_buf;
 3340     ACPI_STATUS         status;
 3341 
 3342     if (subsystem == NULL)
 3343         return;
 3344 
 3345     handle_buf.Pointer = NULL;
 3346     handle_buf.Length = ACPI_ALLOCATE_BUFFER;
 3347     status = AcpiNsHandleToPathname(h, &handle_buf);
 3348     if (ACPI_FAILURE(status))
 3349         return;
 3350     snprintf(notify_buf, sizeof(notify_buf), "notify=0x%02x", notify);
 3351     devctl_notify("ACPI", subsystem, handle_buf.Pointer, notify_buf);
 3352     AcpiOsFree(handle_buf.Pointer);
 3353 }
 3354 
 3355 #ifdef ACPI_DEBUG
 3356 /*
 3357  * Support for parsing debug options from the kernel environment.
 3358  *
 3359  * Bits may be set in the AcpiDbgLayer and AcpiDbgLevel debug registers
 3360  * by specifying the names of the bits in the debug.acpi.layer and
 3361  * debug.acpi.level environment variables.  Bits may be unset by 
 3362  * prefixing the bit name with !.
 3363  */
 3364 struct debugtag
 3365 {
 3366     char        *name;
 3367     UINT32      value;
 3368 };
 3369 
 3370 static struct debugtag  dbg_layer[] = {
 3371     {"ACPI_UTILITIES",          ACPI_UTILITIES},
 3372     {"ACPI_HARDWARE",           ACPI_HARDWARE},
 3373     {"ACPI_EVENTS",             ACPI_EVENTS},
 3374     {"ACPI_TABLES",             ACPI_TABLES},
 3375     {"ACPI_NAMESPACE",          ACPI_NAMESPACE},
 3376     {"ACPI_PARSER",             ACPI_PARSER},
 3377     {"ACPI_DISPATCHER",         ACPI_DISPATCHER},
 3378     {"ACPI_EXECUTER",           ACPI_EXECUTER},
 3379     {"ACPI_RESOURCES",          ACPI_RESOURCES},
 3380     {"ACPI_CA_DEBUGGER",        ACPI_CA_DEBUGGER},
 3381     {"ACPI_OS_SERVICES",        ACPI_OS_SERVICES},
 3382     {"ACPI_CA_DISASSEMBLER",    ACPI_CA_DISASSEMBLER},
 3383     {"ACPI_ALL_COMPONENTS",     ACPI_ALL_COMPONENTS},
 3384 
 3385     {"ACPI_AC_ADAPTER",         ACPI_AC_ADAPTER},
 3386     {"ACPI_BATTERY",            ACPI_BATTERY},
 3387     {"ACPI_BUS",                ACPI_BUS},
 3388     {"ACPI_BUTTON",             ACPI_BUTTON},
 3389     {"ACPI_EC",                 ACPI_EC},
 3390     {"ACPI_FAN",                ACPI_FAN},
 3391     {"ACPI_POWERRES",           ACPI_POWERRES},
 3392     {"ACPI_PROCESSOR",          ACPI_PROCESSOR},
 3393     {"ACPI_THERMAL",            ACPI_THERMAL},
 3394     {"ACPI_TIMER",              ACPI_TIMER},
 3395     {"ACPI_ALL_DRIVERS",        ACPI_ALL_DRIVERS},
 3396     {NULL, 0}
 3397 };
 3398 
 3399 static struct debugtag dbg_level[] = {
 3400     {"ACPI_LV_INIT",            ACPI_LV_INIT},
 3401     {"ACPI_LV_DEBUG_OBJECT",    ACPI_LV_DEBUG_OBJECT},
 3402     {"ACPI_LV_INFO",            ACPI_LV_INFO},
 3403     {"ACPI_LV_REPAIR",          ACPI_LV_REPAIR},
 3404     {"ACPI_LV_ALL_EXCEPTIONS",  ACPI_LV_ALL_EXCEPTIONS},
 3405 
 3406     /* Trace verbosity level 1 [Standard Trace Level] */
 3407     {"ACPI_LV_INIT_NAMES",      ACPI_LV_INIT_NAMES},
 3408     {"ACPI_LV_PARSE",           ACPI_LV_PARSE},
 3409     {"ACPI_LV_LOAD",            ACPI_LV_LOAD},
 3410     {"ACPI_LV_DISPATCH",        ACPI_LV_DISPATCH},
 3411     {"ACPI_LV_EXEC",            ACPI_LV_EXEC},
 3412     {"ACPI_LV_NAMES",           ACPI_LV_NAMES},
 3413     {"ACPI_LV_OPREGION",        ACPI_LV_OPREGION},
 3414     {"ACPI_LV_BFIELD",          ACPI_LV_BFIELD},
 3415     {"ACPI_LV_TABLES",          ACPI_LV_TABLES},
 3416     {"ACPI_LV_VALUES",          ACPI_LV_VALUES},
 3417     {"ACPI_LV_OBJECTS",         ACPI_LV_OBJECTS},
 3418     {"ACPI_LV_RESOURCES",       ACPI_LV_RESOURCES},
 3419     {"ACPI_LV_USER_REQUESTS",   ACPI_LV_USER_REQUESTS},
 3420     {"ACPI_LV_PACKAGE",         ACPI_LV_PACKAGE},
 3421     {"ACPI_LV_VERBOSITY1",      ACPI_LV_VERBOSITY1},
 3422 
 3423     /* Trace verbosity level 2 [Function tracing and memory allocation] */
 3424     {"ACPI_LV_ALLOCATIONS",     ACPI_LV_ALLOCATIONS},
 3425     {"ACPI_LV_FUNCTIONS",       ACPI_LV_FUNCTIONS},
 3426     {"ACPI_LV_OPTIMIZATIONS",   ACPI_LV_OPTIMIZATIONS},
 3427     {"ACPI_LV_VERBOSITY2",      ACPI_LV_VERBOSITY2},
 3428     {"ACPI_LV_ALL",             ACPI_LV_ALL},
 3429 
 3430     /* Trace verbosity level 3 [Threading, I/O, and Interrupts] */
 3431     {"ACPI_LV_MUTEX",           ACPI_LV_MUTEX},
 3432     {"ACPI_LV_THREADS",         ACPI_LV_THREADS},
 3433     {"ACPI_LV_IO",              ACPI_LV_IO},
 3434     {"ACPI_LV_INTERRUPTS",      ACPI_LV_INTERRUPTS},
 3435     {"ACPI_LV_VERBOSITY3",      ACPI_LV_VERBOSITY3},
 3436 
 3437     /* Exceptionally verbose output -- also used in the global "DebugLevel"  */
 3438     {"ACPI_LV_AML_DISASSEMBLE", ACPI_LV_AML_DISASSEMBLE},
 3439     {"ACPI_LV_VERBOSE_INFO",    ACPI_LV_VERBOSE_INFO},
 3440     {"ACPI_LV_FULL_TABLES",     ACPI_LV_FULL_TABLES},
 3441     {"ACPI_LV_EVENTS",          ACPI_LV_EVENTS},
 3442     {"ACPI_LV_VERBOSE",         ACPI_LV_VERBOSE},
 3443     {NULL, 0}
 3444 };    
 3445 
 3446 static void
 3447 acpi_parse_debug(char *cp, struct debugtag *tag, UINT32 *flag)
 3448 {
 3449     char        *ep;
 3450     int         i, l;
 3451     int         set;
 3452 
 3453     while (*cp) {
 3454         if (isspace(*cp)) {
 3455             cp++;
 3456             continue;
 3457         }
 3458         ep = cp;
 3459         while (*ep && !isspace(*ep))
 3460             ep++;
 3461         if (*cp == '!') {
 3462             set = 0;
 3463             cp++;
 3464             if (cp == ep)
 3465                 continue;
 3466         } else {
 3467             set = 1;
 3468         }
 3469         l = ep - cp;
 3470         for (i = 0; tag[i].name != NULL; i++) {
 3471             if (!strncmp(cp, tag[i].name, l)) {
 3472                 if (set)
 3473                     *flag |= tag[i].value;
 3474                 else
 3475                     *flag &= ~tag[i].value;
 3476             }
 3477         }
 3478         cp = ep;
 3479     }
 3480 }
 3481 
 3482 static void
 3483 acpi_set_debugging(void *junk)
 3484 {
 3485     char        *layer, *level;
 3486 
 3487     if (cold) {
 3488         AcpiDbgLayer = 0;
 3489         AcpiDbgLevel = 0;
 3490     }
 3491 
 3492     layer = getenv("debug.acpi.layer");
 3493     level = getenv("debug.acpi.level");
 3494     if (layer == NULL && level == NULL)
 3495         return;
 3496 
 3497     printf("ACPI set debug");
 3498     if (layer != NULL) {
 3499         if (strcmp("NONE", layer) != 0)
 3500             printf(" layer '%s'", layer);
 3501         acpi_parse_debug(layer, &dbg_layer[0], &AcpiDbgLayer);
 3502         freeenv(layer);
 3503     }
 3504     if (level != NULL) {
 3505         if (strcmp("NONE", level) != 0)
 3506             printf(" level '%s'", level);
 3507         acpi_parse_debug(level, &dbg_level[0], &AcpiDbgLevel);
 3508         freeenv(level);
 3509     }
 3510     printf("\n");
 3511 }
 3512 
 3513 SYSINIT(acpi_debugging, SI_SUB_TUNABLES, SI_ORDER_ANY, acpi_set_debugging,
 3514         NULL);
 3515 
 3516 static int
 3517 acpi_debug_sysctl(SYSCTL_HANDLER_ARGS)
 3518 {
 3519     int          error, *dbg;
 3520     struct       debugtag *tag;
 3521     struct       sbuf sb;
 3522 
 3523     if (sbuf_new(&sb, NULL, 128, SBUF_AUTOEXTEND) == NULL)
 3524         return (ENOMEM);
 3525     if (strcmp(oidp->oid_arg1, "debug.acpi.layer") == 0) {
 3526         tag = &dbg_layer[0];
 3527         dbg = &AcpiDbgLayer;
 3528     } else {
 3529         tag = &dbg_level[0];
 3530         dbg = &AcpiDbgLevel;
 3531     }
 3532 
 3533     /* Get old values if this is a get request. */
 3534     ACPI_SERIAL_BEGIN(acpi);
 3535     if (*dbg == 0) {
 3536         sbuf_cpy(&sb, "NONE");
 3537     } else if (req->newptr == NULL) {
 3538         for (; tag->name != NULL; tag++) {
 3539             if ((*dbg & tag->value) == tag->value)
 3540                 sbuf_printf(&sb, "%s ", tag->name);
 3541         }
 3542     }
 3543     sbuf_trim(&sb);
 3544     sbuf_finish(&sb);
 3545 
 3546     /* Copy out the old values to the user. */
 3547     error = SYSCTL_OUT(req, sbuf_data(&sb), sbuf_len(&sb));
 3548     sbuf_delete(&sb);
 3549 
 3550     /* If the user is setting a string, parse it. */
 3551     if (error == 0 && req->newptr != NULL) {
 3552         *dbg = 0;
 3553         setenv((char *)oidp->oid_arg1, (char *)req->newptr);
 3554         acpi_set_debugging(NULL);
 3555     }
 3556     ACPI_SERIAL_END(acpi);
 3557 
 3558     return (error);
 3559 }
 3560 
 3561 SYSCTL_PROC(_debug_acpi, OID_AUTO, layer, CTLFLAG_RW | CTLTYPE_STRING,
 3562             "debug.acpi.layer", 0, acpi_debug_sysctl, "A", "");
 3563 SYSCTL_PROC(_debug_acpi, OID_AUTO, level, CTLFLAG_RW | CTLTYPE_STRING,
 3564             "debug.acpi.level", 0, acpi_debug_sysctl, "A", "");
 3565 #endif /* ACPI_DEBUG */
 3566 
 3567 static int
 3568 acpi_debug_objects_sysctl(SYSCTL_HANDLER_ARGS)
 3569 {
 3570         int     error;
 3571         int     old;
 3572 
 3573         old = acpi_debug_objects;
 3574         error = sysctl_handle_int(oidp, &acpi_debug_objects, 0, req);
 3575         if (error != 0 || req->newptr == NULL)
 3576                 return (error);
 3577         if (old == acpi_debug_objects || (old && acpi_debug_objects))
 3578                 return (0);
 3579 
 3580         ACPI_SERIAL_BEGIN(acpi);
 3581         AcpiGbl_EnableAmlDebugObject = acpi_debug_objects ? TRUE : FALSE;
 3582         ACPI_SERIAL_END(acpi);
 3583 
 3584         return (0);
 3585 }
 3586 
 3587 static int
 3588 acpi_parse_interfaces(char *str, struct acpi_interface *iface)
 3589 {
 3590         char *p;
 3591         size_t len;
 3592         int i, j;
 3593 
 3594         p = str;
 3595         while (isspace(*p) || *p == ',')
 3596                 p++;
 3597         len = strlen(p);
 3598         if (len == 0)
 3599                 return (0);
 3600         p = strdup(p, M_TEMP);
 3601         for (i = 0; i < len; i++)
 3602                 if (p[i] == ',')
 3603                         p[i] = '\0';
 3604         i = j = 0;
 3605         while (i < len)
 3606                 if (isspace(p[i]) || p[i] == '\0')
 3607                         i++;
 3608                 else {
 3609                         i += strlen(p + i) + 1;
 3610                         j++;
 3611                 }
 3612         if (j == 0) {
 3613                 free(p, M_TEMP);
 3614                 return (0);
 3615         }
 3616         iface->data = malloc(sizeof(*iface->data) * j, M_TEMP, M_WAITOK);
 3617         iface->num = j;
 3618         i = j = 0;
 3619         while (i < len)
 3620                 if (isspace(p[i]) || p[i] == '\0')
 3621                         i++;
 3622                 else {
 3623                         iface->data[j] = p + i;
 3624                         i += strlen(p + i) + 1;
 3625                         j++;
 3626                 }
 3627 
 3628         return (j);
 3629 }
 3630 
 3631 static void
 3632 acpi_free_interfaces(struct acpi_interface *iface)
 3633 {
 3634 
 3635         free(iface->data[0], M_TEMP);
 3636         free(iface->data, M_TEMP);
 3637 }
 3638 
 3639 static void
 3640 acpi_reset_interfaces(device_t dev)
 3641 {
 3642         struct acpi_interface list;
 3643         ACPI_STATUS status;
 3644         int i;
 3645 
 3646         if (acpi_parse_interfaces(acpi_install_interface, &list) > 0) {
 3647                 for (i = 0; i < list.num; i++) {
 3648                         status = AcpiInstallInterface(list.data[i]);
 3649                         if (ACPI_FAILURE(status))
 3650                                 device_printf(dev,
 3651                                     "failed to install _OSI(\"%s\"): %s\n",
 3652                                     list.data[i], AcpiFormatException(status));
 3653                         else if (bootverbose)
 3654                                 device_printf(dev, "installed _OSI(\"%s\")\n",
 3655                                     list.data[i]);
 3656                 }
 3657                 acpi_free_interfaces(&list);
 3658         }
 3659         if (acpi_parse_interfaces(acpi_remove_interface, &list) > 0) {
 3660                 for (i = 0; i < list.num; i++) {
 3661                         status = AcpiRemoveInterface(list.data[i]);
 3662                         if (ACPI_FAILURE(status))
 3663                                 device_printf(dev,
 3664                                     "failed to remove _OSI(\"%s\"): %s\n",
 3665                                     list.data[i], AcpiFormatException(status));
 3666                         else if (bootverbose)
 3667                                 device_printf(dev, "removed _OSI(\"%s\")\n",
 3668                                     list.data[i]);
 3669                 }
 3670                 acpi_free_interfaces(&list);
 3671         }
 3672 }
 3673 
 3674 static int
 3675 acpi_pm_func(u_long cmd, void *arg, ...)
 3676 {
 3677         int     state, acpi_state;
 3678         int     error;
 3679         struct  acpi_softc *sc;
 3680         va_list ap;
 3681 
 3682         error = 0;
 3683         switch (cmd) {
 3684         case POWER_CMD_SUSPEND:
 3685                 sc = (struct acpi_softc *)arg;
 3686                 if (sc == NULL) {
 3687                         error = EINVAL;
 3688                         goto out;
 3689                 }
 3690 
 3691                 va_start(ap, arg);
 3692                 state = va_arg(ap, int);
 3693                 va_end(ap);
 3694 
 3695                 switch (state) {
 3696                 case POWER_SLEEP_STATE_STANDBY:
 3697                         acpi_state = sc->acpi_standby_sx;
 3698                         break;
 3699                 case POWER_SLEEP_STATE_SUSPEND:
 3700                         acpi_state = sc->acpi_suspend_sx;
 3701                         break;
 3702                 case POWER_SLEEP_STATE_HIBERNATE:
 3703                         acpi_state = ACPI_STATE_S4;
 3704                         break;
 3705                 default:
 3706                         error = EINVAL;
 3707                         goto out;
 3708                 }
 3709 
 3710                 if (ACPI_FAILURE(acpi_EnterSleepState(sc, acpi_state)))
 3711                         error = ENXIO;
 3712                 break;
 3713         default:
 3714                 error = EINVAL;
 3715                 goto out;
 3716         }
 3717 
 3718 out:
 3719         return (error);
 3720 }
 3721 
 3722 static void
 3723 acpi_pm_register(void *arg)
 3724 {
 3725     if (!cold || resource_disabled("acpi", 0))
 3726         return;
 3727 
 3728     power_pm_register(POWER_PM_TYPE_ACPI, acpi_pm_func, NULL);
 3729 }
 3730 
 3731 SYSINIT(power, SI_SUB_KLD, SI_ORDER_ANY, acpi_pm_register, 0);

Cache object: e886d7b015cd11ce13d5419515747f30


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.