The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/acpica/acpi.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 2000 Takanori Watanabe <takawata@jp.freebsd.org>
    3  * Copyright (c) 2000 Mitsuru IWASAKI <iwasaki@jp.freebsd.org>
    4  * Copyright (c) 2000, 2001 Michael Smith
    5  * Copyright (c) 2000 BSDi
    6  * All rights reserved.
    7  *
    8  * Redistribution and use in source and binary forms, with or without
    9  * modification, are permitted provided that the following conditions
   10  * are met:
   11  * 1. Redistributions of source code must retain the above copyright
   12  *    notice, this list of conditions and the following disclaimer.
   13  * 2. Redistributions in binary form must reproduce the above copyright
   14  *    notice, this list of conditions and the following disclaimer in the
   15  *    documentation and/or other materials provided with the distribution.
   16  *
   17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   27  * SUCH DAMAGE.
   28  */
   29 
   30 #include <sys/cdefs.h>
   31 __FBSDID("$FreeBSD$");
   32 
   33 #include "opt_acpi.h"
   34 #include "opt_device_numa.h"
   35 
   36 #include <sys/param.h>
   37 #include <sys/kernel.h>
   38 #include <sys/proc.h>
   39 #include <sys/fcntl.h>
   40 #include <sys/malloc.h>
   41 #include <sys/module.h>
   42 #include <sys/bus.h>
   43 #include <sys/conf.h>
   44 #include <sys/ioccom.h>
   45 #include <sys/reboot.h>
   46 #include <sys/sysctl.h>
   47 #include <sys/ctype.h>
   48 #include <sys/linker.h>
   49 #include <sys/power.h>
   50 #include <sys/sbuf.h>
   51 #include <sys/sched.h>
   52 #include <sys/smp.h>
   53 #include <sys/timetc.h>
   54 
   55 #if defined(__i386__) || defined(__amd64__)
   56 #include <machine/clock.h>
   57 #include <machine/pci_cfgreg.h>
   58 #endif
   59 #include <machine/resource.h>
   60 #include <machine/bus.h>
   61 #include <sys/rman.h>
   62 #include <isa/isavar.h>
   63 #include <isa/pnpvar.h>
   64 
   65 #include <contrib/dev/acpica/include/acpi.h>
   66 #include <contrib/dev/acpica/include/accommon.h>
   67 #include <contrib/dev/acpica/include/acnamesp.h>
   68 
   69 #include <dev/acpica/acpivar.h>
   70 #include <dev/acpica/acpiio.h>
   71 
   72 #include <vm/vm_param.h>
   73 
   74 static MALLOC_DEFINE(M_ACPIDEV, "acpidev", "ACPI devices");
   75 
   76 /* Hooks for the ACPI CA debugging infrastructure */
   77 #define _COMPONENT      ACPI_BUS
   78 ACPI_MODULE_NAME("ACPI")
   79 
   80 static d_open_t         acpiopen;
   81 static d_close_t        acpiclose;
   82 static d_ioctl_t        acpiioctl;
   83 
   84 static struct cdevsw acpi_cdevsw = {
   85         .d_version =    D_VERSION,
   86         .d_open =       acpiopen,
   87         .d_close =      acpiclose,
   88         .d_ioctl =      acpiioctl,
   89         .d_name =       "acpi",
   90 };
   91 
   92 struct acpi_interface {
   93         ACPI_STRING     *data;
   94         int             num;
   95 };
   96 
   97 /* Global mutex for locking access to the ACPI subsystem. */
   98 struct mtx      acpi_mutex;
   99 struct callout  acpi_sleep_timer;
  100 
  101 /* Bitmap of device quirks. */
  102 int             acpi_quirks;
  103 
  104 /* Supported sleep states. */
  105 static BOOLEAN  acpi_sleep_states[ACPI_S_STATE_COUNT];
  106 
  107 static void     acpi_lookup(void *arg, const char *name, device_t *dev);
  108 static int      acpi_modevent(struct module *mod, int event, void *junk);
  109 static int      acpi_probe(device_t dev);
  110 static int      acpi_attach(device_t dev);
  111 static int      acpi_suspend(device_t dev);
  112 static int      acpi_resume(device_t dev);
  113 static int      acpi_shutdown(device_t dev);
  114 static device_t acpi_add_child(device_t bus, u_int order, const char *name,
  115                         int unit);
  116 static int      acpi_print_child(device_t bus, device_t child);
  117 static void     acpi_probe_nomatch(device_t bus, device_t child);
  118 static void     acpi_driver_added(device_t dev, driver_t *driver);
  119 static int      acpi_read_ivar(device_t dev, device_t child, int index,
  120                         uintptr_t *result);
  121 static int      acpi_write_ivar(device_t dev, device_t child, int index,
  122                         uintptr_t value);
  123 static struct resource_list *acpi_get_rlist(device_t dev, device_t child);
  124 static void     acpi_reserve_resources(device_t dev);
  125 static int      acpi_sysres_alloc(device_t dev);
  126 static int      acpi_set_resource(device_t dev, device_t child, int type,
  127                         int rid, rman_res_t start, rman_res_t count);
  128 static struct resource *acpi_alloc_resource(device_t bus, device_t child,
  129                         int type, int *rid, rman_res_t start, rman_res_t end,
  130                         rman_res_t count, u_int flags);
  131 static int      acpi_adjust_resource(device_t bus, device_t child, int type,
  132                         struct resource *r, rman_res_t start, rman_res_t end);
  133 static int      acpi_release_resource(device_t bus, device_t child, int type,
  134                         int rid, struct resource *r);
  135 static void     acpi_delete_resource(device_t bus, device_t child, int type,
  136                     int rid);
  137 static uint32_t acpi_isa_get_logicalid(device_t dev);
  138 static int      acpi_isa_get_compatid(device_t dev, uint32_t *cids, int count);
  139 static char     *acpi_device_id_probe(device_t bus, device_t dev, char **ids);
  140 static ACPI_STATUS acpi_device_eval_obj(device_t bus, device_t dev,
  141                     ACPI_STRING pathname, ACPI_OBJECT_LIST *parameters,
  142                     ACPI_BUFFER *ret);
  143 static ACPI_STATUS acpi_device_scan_cb(ACPI_HANDLE h, UINT32 level,
  144                     void *context, void **retval);
  145 static ACPI_STATUS acpi_device_scan_children(device_t bus, device_t dev,
  146                     int max_depth, acpi_scan_cb_t user_fn, void *arg);
  147 static int      acpi_set_powerstate(device_t child, int state);
  148 static int      acpi_isa_pnp_probe(device_t bus, device_t child,
  149                     struct isa_pnp_id *ids);
  150 static void     acpi_probe_children(device_t bus);
  151 static void     acpi_probe_order(ACPI_HANDLE handle, int *order);
  152 static ACPI_STATUS acpi_probe_child(ACPI_HANDLE handle, UINT32 level,
  153                     void *context, void **status);
  154 static void     acpi_sleep_enable(void *arg);
  155 static ACPI_STATUS acpi_sleep_disable(struct acpi_softc *sc);
  156 static ACPI_STATUS acpi_EnterSleepState(struct acpi_softc *sc, int state);
  157 static void     acpi_shutdown_final(void *arg, int howto);
  158 static void     acpi_enable_fixed_events(struct acpi_softc *sc);
  159 static BOOLEAN  acpi_has_hid(ACPI_HANDLE handle);
  160 static void     acpi_resync_clock(struct acpi_softc *sc);
  161 static int      acpi_wake_sleep_prep(ACPI_HANDLE handle, int sstate);
  162 static int      acpi_wake_run_prep(ACPI_HANDLE handle, int sstate);
  163 static int      acpi_wake_prep_walk(int sstate);
  164 static int      acpi_wake_sysctl_walk(device_t dev);
  165 static int      acpi_wake_set_sysctl(SYSCTL_HANDLER_ARGS);
  166 static void     acpi_system_eventhandler_sleep(void *arg, int state);
  167 static void     acpi_system_eventhandler_wakeup(void *arg, int state);
  168 static int      acpi_sname2sstate(const char *sname);
  169 static const char *acpi_sstate2sname(int sstate);
  170 static int      acpi_supported_sleep_state_sysctl(SYSCTL_HANDLER_ARGS);
  171 static int      acpi_sleep_state_sysctl(SYSCTL_HANDLER_ARGS);
  172 static int      acpi_debug_objects_sysctl(SYSCTL_HANDLER_ARGS);
  173 static int      acpi_pm_func(u_long cmd, void *arg, ...);
  174 static int      acpi_child_location_str_method(device_t acdev, device_t child,
  175                                                char *buf, size_t buflen);
  176 static int      acpi_child_pnpinfo_str_method(device_t acdev, device_t child,
  177                                               char *buf, size_t buflen);
  178 #if defined(__i386__) || defined(__amd64__)
  179 static void     acpi_enable_pcie(void);
  180 #endif
  181 static void     acpi_hint_device_unit(device_t acdev, device_t child,
  182                     const char *name, int *unitp);
  183 static void     acpi_reset_interfaces(device_t dev);
  184 
  185 static device_method_t acpi_methods[] = {
  186     /* Device interface */
  187     DEVMETHOD(device_probe,             acpi_probe),
  188     DEVMETHOD(device_attach,            acpi_attach),
  189     DEVMETHOD(device_shutdown,          acpi_shutdown),
  190     DEVMETHOD(device_detach,            bus_generic_detach),
  191     DEVMETHOD(device_suspend,           acpi_suspend),
  192     DEVMETHOD(device_resume,            acpi_resume),
  193 
  194     /* Bus interface */
  195     DEVMETHOD(bus_add_child,            acpi_add_child),
  196     DEVMETHOD(bus_print_child,          acpi_print_child),
  197     DEVMETHOD(bus_probe_nomatch,        acpi_probe_nomatch),
  198     DEVMETHOD(bus_driver_added,         acpi_driver_added),
  199     DEVMETHOD(bus_read_ivar,            acpi_read_ivar),
  200     DEVMETHOD(bus_write_ivar,           acpi_write_ivar),
  201     DEVMETHOD(bus_get_resource_list,    acpi_get_rlist),
  202     DEVMETHOD(bus_set_resource,         acpi_set_resource),
  203     DEVMETHOD(bus_get_resource,         bus_generic_rl_get_resource),
  204     DEVMETHOD(bus_alloc_resource,       acpi_alloc_resource),
  205     DEVMETHOD(bus_adjust_resource,      acpi_adjust_resource),
  206     DEVMETHOD(bus_release_resource,     acpi_release_resource),
  207     DEVMETHOD(bus_delete_resource,      acpi_delete_resource),
  208     DEVMETHOD(bus_child_pnpinfo_str,    acpi_child_pnpinfo_str_method),
  209     DEVMETHOD(bus_child_location_str,   acpi_child_location_str_method),
  210     DEVMETHOD(bus_activate_resource,    bus_generic_activate_resource),
  211     DEVMETHOD(bus_deactivate_resource,  bus_generic_deactivate_resource),
  212     DEVMETHOD(bus_setup_intr,           bus_generic_setup_intr),
  213     DEVMETHOD(bus_teardown_intr,        bus_generic_teardown_intr),
  214     DEVMETHOD(bus_hint_device_unit,     acpi_hint_device_unit),
  215     DEVMETHOD(bus_get_cpus,             acpi_get_cpus),
  216     DEVMETHOD(bus_get_domain,           acpi_get_domain),
  217 
  218     /* ACPI bus */
  219     DEVMETHOD(acpi_id_probe,            acpi_device_id_probe),
  220     DEVMETHOD(acpi_evaluate_object,     acpi_device_eval_obj),
  221     DEVMETHOD(acpi_pwr_for_sleep,       acpi_device_pwr_for_sleep),
  222     DEVMETHOD(acpi_scan_children,       acpi_device_scan_children),
  223 
  224     /* ISA emulation */
  225     DEVMETHOD(isa_pnp_probe,            acpi_isa_pnp_probe),
  226 
  227     DEVMETHOD_END
  228 };
  229 
  230 static driver_t acpi_driver = {
  231     "acpi",
  232     acpi_methods,
  233     sizeof(struct acpi_softc),
  234 };
  235 
  236 static devclass_t acpi_devclass;
  237 DRIVER_MODULE(acpi, nexus, acpi_driver, acpi_devclass, acpi_modevent, 0);
  238 MODULE_VERSION(acpi, 1);
  239 
  240 ACPI_SERIAL_DECL(acpi, "ACPI root bus");
  241 
  242 /* Local pools for managing system resources for ACPI child devices. */
  243 static struct rman acpi_rman_io, acpi_rman_mem;
  244 
  245 #define ACPI_MINIMUM_AWAKETIME  5
  246 
  247 /* Holds the description of the acpi0 device. */
  248 static char acpi_desc[ACPI_OEM_ID_SIZE + ACPI_OEM_TABLE_ID_SIZE + 2];
  249 
  250 SYSCTL_NODE(_debug, OID_AUTO, acpi, CTLFLAG_RD, NULL, "ACPI debugging");
  251 static char acpi_ca_version[12];
  252 SYSCTL_STRING(_debug_acpi, OID_AUTO, acpi_ca_version, CTLFLAG_RD,
  253               acpi_ca_version, 0, "Version of Intel ACPI-CA");
  254 
  255 /*
  256  * Allow overriding _OSI methods.
  257  */
  258 static char acpi_install_interface[256];
  259 TUNABLE_STR("hw.acpi.install_interface", acpi_install_interface,
  260     sizeof(acpi_install_interface));
  261 static char acpi_remove_interface[256];
  262 TUNABLE_STR("hw.acpi.remove_interface", acpi_remove_interface,
  263     sizeof(acpi_remove_interface));
  264 
  265 /* Allow users to dump Debug objects without ACPI debugger. */
  266 static int acpi_debug_objects;
  267 TUNABLE_INT("debug.acpi.enable_debug_objects", &acpi_debug_objects);
  268 SYSCTL_PROC(_debug_acpi, OID_AUTO, enable_debug_objects,
  269     CTLFLAG_RW | CTLTYPE_INT, NULL, 0, acpi_debug_objects_sysctl, "I",
  270     "Enable Debug objects");
  271 
  272 /* Allow the interpreter to ignore common mistakes in BIOS. */
  273 static int acpi_interpreter_slack = 1;
  274 TUNABLE_INT("debug.acpi.interpreter_slack", &acpi_interpreter_slack);
  275 SYSCTL_INT(_debug_acpi, OID_AUTO, interpreter_slack, CTLFLAG_RDTUN,
  276     &acpi_interpreter_slack, 1, "Turn on interpreter slack mode.");
  277 
  278 /* Ignore register widths set by FADT and use default widths instead. */
  279 static int acpi_ignore_reg_width = 1;
  280 TUNABLE_INT("debug.acpi.default_register_width", &acpi_ignore_reg_width);
  281 SYSCTL_INT(_debug_acpi, OID_AUTO, default_register_width, CTLFLAG_RDTUN,
  282     &acpi_ignore_reg_width, 1, "Ignore register widths set by FADT");
  283 
  284 #ifdef __amd64__
  285 /* Reset system clock while resuming.  XXX Remove once tested. */
  286 static int acpi_reset_clock = 1;
  287 TUNABLE_INT("debug.acpi.reset_clock", &acpi_reset_clock);
  288 SYSCTL_INT(_debug_acpi, OID_AUTO, reset_clock, CTLFLAG_RW,
  289     &acpi_reset_clock, 1, "Reset system clock while resuming.");
  290 #endif
  291 
  292 /* Allow users to override quirks. */
  293 TUNABLE_INT("debug.acpi.quirks", &acpi_quirks);
  294 
  295 int acpi_susp_bounce;
  296 SYSCTL_INT(_debug_acpi, OID_AUTO, suspend_bounce, CTLFLAG_RW,
  297     &acpi_susp_bounce, 0, "Don't actually suspend, just test devices.");
  298 
  299 /*
  300  * ACPI can only be loaded as a module by the loader; activating it after
  301  * system bootstrap time is not useful, and can be fatal to the system.
  302  * It also cannot be unloaded, since the entire system bus hierarchy hangs
  303  * off it.
  304  */
  305 static int
  306 acpi_modevent(struct module *mod, int event, void *junk)
  307 {
  308     switch (event) {
  309     case MOD_LOAD:
  310         if (!cold) {
  311             printf("The ACPI driver cannot be loaded after boot.\n");
  312             return (EPERM);
  313         }
  314         break;
  315     case MOD_UNLOAD:
  316         if (!cold && power_pm_get_type() == POWER_PM_TYPE_ACPI)
  317             return (EBUSY);
  318         break;
  319     default:
  320         break;
  321     }
  322     return (0);
  323 }
  324 
  325 /*
  326  * Perform early initialization.
  327  */
  328 ACPI_STATUS
  329 acpi_Startup(void)
  330 {
  331     static int started = 0;
  332     ACPI_STATUS status;
  333     int val;
  334 
  335     ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
  336 
  337     /* Only run the startup code once.  The MADT driver also calls this. */
  338     if (started)
  339         return_VALUE (AE_OK);
  340     started = 1;
  341 
  342     /*
  343      * Initialize the ACPICA subsystem.
  344      */
  345     if (ACPI_FAILURE(status = AcpiInitializeSubsystem())) {
  346         printf("ACPI: Could not initialize Subsystem: %s\n",
  347             AcpiFormatException(status));
  348         return_VALUE (status);
  349     }
  350 
  351     /*
  352      * Pre-allocate space for RSDT/XSDT and DSDT tables and allow resizing
  353      * if more tables exist.
  354      */
  355     if (ACPI_FAILURE(status = AcpiInitializeTables(NULL, 2, TRUE))) {
  356         printf("ACPI: Table initialisation failed: %s\n",
  357             AcpiFormatException(status));
  358         return_VALUE (status);
  359     }
  360 
  361     /* Set up any quirks we have for this system. */
  362     if (acpi_quirks == ACPI_Q_OK)
  363         acpi_table_quirks(&acpi_quirks);
  364 
  365     /* If the user manually set the disabled hint to 0, force-enable ACPI. */
  366     if (resource_int_value("acpi", 0, "disabled", &val) == 0 && val == 0)
  367         acpi_quirks &= ~ACPI_Q_BROKEN;
  368     if (acpi_quirks & ACPI_Q_BROKEN) {
  369         printf("ACPI disabled by blacklist.  Contact your BIOS vendor.\n");
  370         status = AE_SUPPORT;
  371     }
  372 
  373     return_VALUE (status);
  374 }
  375 
  376 /*
  377  * Detect ACPI and perform early initialisation.
  378  */
  379 int
  380 acpi_identify(void)
  381 {
  382     ACPI_TABLE_RSDP     *rsdp;
  383     ACPI_TABLE_HEADER   *rsdt;
  384     ACPI_PHYSICAL_ADDRESS paddr;
  385     struct sbuf         sb;
  386 
  387     ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
  388 
  389     if (!cold)
  390         return (ENXIO);
  391 
  392     /* Check that we haven't been disabled with a hint. */
  393     if (resource_disabled("acpi", 0))
  394         return (ENXIO);
  395 
  396     /* Check for other PM systems. */
  397     if (power_pm_get_type() != POWER_PM_TYPE_NONE &&
  398         power_pm_get_type() != POWER_PM_TYPE_ACPI) {
  399         printf("ACPI identify failed, other PM system enabled.\n");
  400         return (ENXIO);
  401     }
  402 
  403     /* Initialize root tables. */
  404     if (ACPI_FAILURE(acpi_Startup())) {
  405         printf("ACPI: Try disabling either ACPI or apic support.\n");
  406         return (ENXIO);
  407     }
  408 
  409     if ((paddr = AcpiOsGetRootPointer()) == 0 ||
  410         (rsdp = AcpiOsMapMemory(paddr, sizeof(ACPI_TABLE_RSDP))) == NULL)
  411         return (ENXIO);
  412     if (rsdp->Revision > 1 && rsdp->XsdtPhysicalAddress != 0)
  413         paddr = (ACPI_PHYSICAL_ADDRESS)rsdp->XsdtPhysicalAddress;
  414     else
  415         paddr = (ACPI_PHYSICAL_ADDRESS)rsdp->RsdtPhysicalAddress;
  416     AcpiOsUnmapMemory(rsdp, sizeof(ACPI_TABLE_RSDP));
  417 
  418     if ((rsdt = AcpiOsMapMemory(paddr, sizeof(ACPI_TABLE_HEADER))) == NULL)
  419         return (ENXIO);
  420     sbuf_new(&sb, acpi_desc, sizeof(acpi_desc), SBUF_FIXEDLEN);
  421     sbuf_bcat(&sb, rsdt->OemId, ACPI_OEM_ID_SIZE);
  422     sbuf_trim(&sb);
  423     sbuf_putc(&sb, ' ');
  424     sbuf_bcat(&sb, rsdt->OemTableId, ACPI_OEM_TABLE_ID_SIZE);
  425     sbuf_trim(&sb);
  426     sbuf_finish(&sb);
  427     sbuf_delete(&sb);
  428     AcpiOsUnmapMemory(rsdt, sizeof(ACPI_TABLE_HEADER));
  429 
  430     snprintf(acpi_ca_version, sizeof(acpi_ca_version), "%x", ACPI_CA_VERSION);
  431 
  432     return (0);
  433 }
  434 
  435 /*
  436  * Fetch some descriptive data from ACPI to put in our attach message.
  437  */
  438 static int
  439 acpi_probe(device_t dev)
  440 {
  441 
  442     ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
  443 
  444     device_set_desc(dev, acpi_desc);
  445 
  446     return_VALUE (BUS_PROBE_NOWILDCARD);
  447 }
  448 
  449 static int
  450 acpi_attach(device_t dev)
  451 {
  452     struct acpi_softc   *sc;
  453     ACPI_STATUS         status;
  454     int                 error, state;
  455     UINT32              flags;
  456     UINT8               TypeA, TypeB;
  457     char                *env;
  458 
  459     ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
  460 
  461     sc = device_get_softc(dev);
  462     sc->acpi_dev = dev;
  463     callout_init(&sc->susp_force_to, 1);
  464 
  465     error = ENXIO;
  466 
  467     /* Initialize resource manager. */
  468     acpi_rman_io.rm_type = RMAN_ARRAY;
  469     acpi_rman_io.rm_start = 0;
  470     acpi_rman_io.rm_end = 0xffff;
  471     acpi_rman_io.rm_descr = "ACPI I/O ports";
  472     if (rman_init(&acpi_rman_io) != 0)
  473         panic("acpi rman_init IO ports failed");
  474     acpi_rman_mem.rm_type = RMAN_ARRAY;
  475     acpi_rman_mem.rm_descr = "ACPI I/O memory addresses";
  476     if (rman_init(&acpi_rman_mem) != 0)
  477         panic("acpi rman_init memory failed");
  478 
  479     /* Initialise the ACPI mutex */
  480     mtx_init(&acpi_mutex, "ACPI global lock", NULL, MTX_DEF);
  481 
  482     /*
  483      * Set the globals from our tunables.  This is needed because ACPI-CA
  484      * uses UINT8 for some values and we have no tunable_byte.
  485      */
  486     AcpiGbl_EnableInterpreterSlack = acpi_interpreter_slack ? TRUE : FALSE;
  487     AcpiGbl_EnableAmlDebugObject = acpi_debug_objects ? TRUE : FALSE;
  488     AcpiGbl_UseDefaultRegisterWidths = acpi_ignore_reg_width ? TRUE : FALSE;
  489 
  490 #ifndef ACPI_DEBUG
  491     /*
  492      * Disable all debugging layers and levels.
  493      */
  494     AcpiDbgLayer = 0;
  495     AcpiDbgLevel = 0;
  496 #endif
  497 
  498     /* Override OS interfaces if the user requested. */
  499     acpi_reset_interfaces(dev);
  500 
  501     /* Load ACPI name space. */
  502     status = AcpiLoadTables();
  503     if (ACPI_FAILURE(status)) {
  504         device_printf(dev, "Could not load Namespace: %s\n",
  505                       AcpiFormatException(status));
  506         goto out;
  507     }
  508 
  509 #if defined(__i386__) || defined(__amd64__)
  510     /* Handle MCFG table if present. */
  511     acpi_enable_pcie();
  512 #endif
  513 
  514     /*
  515      * Note that some systems (specifically, those with namespace evaluation
  516      * issues that require the avoidance of parts of the namespace) must
  517      * avoid running _INI and _STA on everything, as well as dodging the final
  518      * object init pass.
  519      *
  520      * For these devices, we set ACPI_NO_DEVICE_INIT and ACPI_NO_OBJECT_INIT).
  521      *
  522      * XXX We should arrange for the object init pass after we have attached
  523      *     all our child devices, but on many systems it works here.
  524      */
  525     flags = 0;
  526     if (testenv("debug.acpi.avoid"))
  527         flags = ACPI_NO_DEVICE_INIT | ACPI_NO_OBJECT_INIT;
  528 
  529     /* Bring the hardware and basic handlers online. */
  530     if (ACPI_FAILURE(status = AcpiEnableSubsystem(flags))) {
  531         device_printf(dev, "Could not enable ACPI: %s\n",
  532                       AcpiFormatException(status));
  533         goto out;
  534     }
  535 
  536     /*
  537      * Call the ECDT probe function to provide EC functionality before
  538      * the namespace has been evaluated.
  539      *
  540      * XXX This happens before the sysresource devices have been probed and
  541      * attached so its resources come from nexus0.  In practice, this isn't
  542      * a problem but should be addressed eventually.
  543      */
  544     acpi_ec_ecdt_probe(dev);
  545 
  546     /* Bring device objects and regions online. */
  547     if (ACPI_FAILURE(status = AcpiInitializeObjects(flags))) {
  548         device_printf(dev, "Could not initialize ACPI objects: %s\n",
  549                       AcpiFormatException(status));
  550         goto out;
  551     }
  552 
  553     /*
  554      * Setup our sysctl tree.
  555      *
  556      * XXX: This doesn't check to make sure that none of these fail.
  557      */
  558     sysctl_ctx_init(&sc->acpi_sysctl_ctx);
  559     sc->acpi_sysctl_tree = SYSCTL_ADD_NODE(&sc->acpi_sysctl_ctx,
  560                                SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO,
  561                                device_get_name(dev), CTLFLAG_RD, 0, "");
  562     SYSCTL_ADD_PROC(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree),
  563         OID_AUTO, "supported_sleep_state", CTLTYPE_STRING | CTLFLAG_RD,
  564         0, 0, acpi_supported_sleep_state_sysctl, "A",
  565         "List supported ACPI sleep states.");
  566     SYSCTL_ADD_PROC(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree),
  567         OID_AUTO, "power_button_state", CTLTYPE_STRING | CTLFLAG_RW,
  568         &sc->acpi_power_button_sx, 0, acpi_sleep_state_sysctl, "A",
  569         "Power button ACPI sleep state.");
  570     SYSCTL_ADD_PROC(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree),
  571         OID_AUTO, "sleep_button_state", CTLTYPE_STRING | CTLFLAG_RW,
  572         &sc->acpi_sleep_button_sx, 0, acpi_sleep_state_sysctl, "A",
  573         "Sleep button ACPI sleep state.");
  574     SYSCTL_ADD_PROC(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree),
  575         OID_AUTO, "lid_switch_state", CTLTYPE_STRING | CTLFLAG_RW,
  576         &sc->acpi_lid_switch_sx, 0, acpi_sleep_state_sysctl, "A",
  577         "Lid ACPI sleep state. Set to S3 if you want to suspend your laptop when close the Lid.");
  578     SYSCTL_ADD_PROC(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree),
  579         OID_AUTO, "standby_state", CTLTYPE_STRING | CTLFLAG_RW,
  580         &sc->acpi_standby_sx, 0, acpi_sleep_state_sysctl, "A", "");
  581     SYSCTL_ADD_PROC(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree),
  582         OID_AUTO, "suspend_state", CTLTYPE_STRING | CTLFLAG_RW,
  583         &sc->acpi_suspend_sx, 0, acpi_sleep_state_sysctl, "A", "");
  584     SYSCTL_ADD_INT(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree),
  585         OID_AUTO, "sleep_delay", CTLFLAG_RW, &sc->acpi_sleep_delay, 0,
  586         "sleep delay in seconds");
  587     SYSCTL_ADD_INT(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree),
  588         OID_AUTO, "s4bios", CTLFLAG_RW, &sc->acpi_s4bios, 0, "S4BIOS mode");
  589     SYSCTL_ADD_INT(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree),
  590         OID_AUTO, "verbose", CTLFLAG_RW, &sc->acpi_verbose, 0, "verbose mode");
  591     SYSCTL_ADD_INT(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree),
  592         OID_AUTO, "disable_on_reboot", CTLFLAG_RW,
  593         &sc->acpi_do_disable, 0, "Disable ACPI when rebooting/halting system");
  594     SYSCTL_ADD_INT(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree),
  595         OID_AUTO, "handle_reboot", CTLFLAG_RW,
  596         &sc->acpi_handle_reboot, 0, "Use ACPI Reset Register to reboot");
  597 
  598     /*
  599      * Default to 1 second before sleeping to give some machines time to
  600      * stabilize.
  601      */
  602     sc->acpi_sleep_delay = 1;
  603     if (bootverbose)
  604         sc->acpi_verbose = 1;
  605     if ((env = kern_getenv("hw.acpi.verbose")) != NULL) {
  606         if (strcmp(env, "") != 0)
  607             sc->acpi_verbose = 1;
  608         freeenv(env);
  609     }
  610 
  611     /* Only enable reboot by default if the FADT says it is available. */
  612     if (AcpiGbl_FADT.Flags & ACPI_FADT_RESET_REGISTER)
  613         sc->acpi_handle_reboot = 1;
  614 
  615 #if !ACPI_REDUCED_HARDWARE
  616     /* Only enable S4BIOS by default if the FACS says it is available. */
  617     if (AcpiGbl_FACS != NULL && AcpiGbl_FACS->Flags & ACPI_FACS_S4_BIOS_PRESENT)
  618         sc->acpi_s4bios = 1;
  619 #endif
  620 
  621     /* Probe all supported sleep states. */
  622     acpi_sleep_states[ACPI_STATE_S0] = TRUE;
  623     for (state = ACPI_STATE_S1; state < ACPI_S_STATE_COUNT; state++)
  624         if (ACPI_SUCCESS(AcpiEvaluateObject(ACPI_ROOT_OBJECT,
  625             __DECONST(char *, AcpiGbl_SleepStateNames[state]), NULL, NULL)) &&
  626             ACPI_SUCCESS(AcpiGetSleepTypeData(state, &TypeA, &TypeB)))
  627             acpi_sleep_states[state] = TRUE;
  628 
  629     /*
  630      * Dispatch the default sleep state to devices.  The lid switch is set
  631      * to UNKNOWN by default to avoid surprising users.
  632      */
  633     sc->acpi_power_button_sx = acpi_sleep_states[ACPI_STATE_S5] ?
  634         ACPI_STATE_S5 : ACPI_STATE_UNKNOWN;
  635     sc->acpi_lid_switch_sx = ACPI_STATE_UNKNOWN;
  636     sc->acpi_standby_sx = acpi_sleep_states[ACPI_STATE_S1] ?
  637         ACPI_STATE_S1 : ACPI_STATE_UNKNOWN;
  638     sc->acpi_suspend_sx = acpi_sleep_states[ACPI_STATE_S3] ?
  639         ACPI_STATE_S3 : ACPI_STATE_UNKNOWN;
  640 
  641     /* Pick the first valid sleep state for the sleep button default. */
  642     sc->acpi_sleep_button_sx = ACPI_STATE_UNKNOWN;
  643     for (state = ACPI_STATE_S1; state <= ACPI_STATE_S4; state++)
  644         if (acpi_sleep_states[state]) {
  645             sc->acpi_sleep_button_sx = state;
  646             break;
  647         }
  648 
  649     acpi_enable_fixed_events(sc);
  650 
  651     /*
  652      * Scan the namespace and attach/initialise children.
  653      */
  654 
  655     /* Register our shutdown handler. */
  656     EVENTHANDLER_REGISTER(shutdown_final, acpi_shutdown_final, sc,
  657         SHUTDOWN_PRI_LAST);
  658 
  659     /*
  660      * Register our acpi event handlers.
  661      * XXX should be configurable eg. via userland policy manager.
  662      */
  663     EVENTHANDLER_REGISTER(acpi_sleep_event, acpi_system_eventhandler_sleep,
  664         sc, ACPI_EVENT_PRI_LAST);
  665     EVENTHANDLER_REGISTER(acpi_wakeup_event, acpi_system_eventhandler_wakeup,
  666         sc, ACPI_EVENT_PRI_LAST);
  667 
  668     /* Flag our initial states. */
  669     sc->acpi_enabled = TRUE;
  670     sc->acpi_sstate = ACPI_STATE_S0;
  671     sc->acpi_sleep_disabled = TRUE;
  672 
  673     /* Create the control device */
  674     sc->acpi_dev_t = make_dev(&acpi_cdevsw, 0, UID_ROOT, GID_WHEEL, 0644,
  675                               "acpi");
  676     sc->acpi_dev_t->si_drv1 = sc;
  677 
  678     if ((error = acpi_machdep_init(dev)))
  679         goto out;
  680 
  681     /* Register ACPI again to pass the correct argument of pm_func. */
  682     power_pm_register(POWER_PM_TYPE_ACPI, acpi_pm_func, sc);
  683 
  684     if (!acpi_disabled("bus")) {
  685         EVENTHANDLER_REGISTER(dev_lookup, acpi_lookup, NULL, 1000);
  686         acpi_probe_children(dev);
  687     }
  688 
  689     /* Update all GPEs and enable runtime GPEs. */
  690     status = AcpiUpdateAllGpes();
  691     if (ACPI_FAILURE(status))
  692         device_printf(dev, "Could not update all GPEs: %s\n",
  693             AcpiFormatException(status));
  694 
  695     /* Allow sleep request after a while. */
  696     callout_init_mtx(&acpi_sleep_timer, &acpi_mutex, 0);
  697     callout_reset(&acpi_sleep_timer, hz * ACPI_MINIMUM_AWAKETIME,
  698         acpi_sleep_enable, sc);
  699 
  700     error = 0;
  701 
  702  out:
  703     return_VALUE (error);
  704 }
  705 
  706 static void
  707 acpi_set_power_children(device_t dev, int state)
  708 {
  709         device_t child;
  710         device_t *devlist;
  711         int dstate, i, numdevs;
  712 
  713         if (device_get_children(dev, &devlist, &numdevs) != 0)
  714                 return;
  715 
  716         /*
  717          * Retrieve and set D-state for the sleep state if _SxD is present.
  718          * Skip children who aren't attached since they are handled separately.
  719          */
  720         for (i = 0; i < numdevs; i++) {
  721                 child = devlist[i];
  722                 dstate = state;
  723                 if (device_is_attached(child) &&
  724                     acpi_device_pwr_for_sleep(dev, child, &dstate) == 0)
  725                         acpi_set_powerstate(child, dstate);
  726         }
  727         free(devlist, M_TEMP);
  728 }
  729 
  730 static int
  731 acpi_suspend(device_t dev)
  732 {
  733     int error;
  734 
  735     GIANT_REQUIRED;
  736 
  737     error = bus_generic_suspend(dev);
  738     if (error == 0)
  739         acpi_set_power_children(dev, ACPI_STATE_D3);
  740 
  741     return (error);
  742 }
  743 
  744 static int
  745 acpi_resume(device_t dev)
  746 {
  747 
  748     GIANT_REQUIRED;
  749 
  750     acpi_set_power_children(dev, ACPI_STATE_D0);
  751 
  752     return (bus_generic_resume(dev));
  753 }
  754 
  755 static int
  756 acpi_shutdown(device_t dev)
  757 {
  758 
  759     GIANT_REQUIRED;
  760 
  761     /* Allow children to shutdown first. */
  762     bus_generic_shutdown(dev);
  763 
  764     /*
  765      * Enable any GPEs that are able to power-on the system (i.e., RTC).
  766      * Also, disable any that are not valid for this state (most).
  767      */
  768     acpi_wake_prep_walk(ACPI_STATE_S5);
  769 
  770     return (0);
  771 }
  772 
  773 /*
  774  * Handle a new device being added
  775  */
  776 static device_t
  777 acpi_add_child(device_t bus, u_int order, const char *name, int unit)
  778 {
  779     struct acpi_device  *ad;
  780     device_t            child;
  781 
  782     if ((ad = malloc(sizeof(*ad), M_ACPIDEV, M_NOWAIT | M_ZERO)) == NULL)
  783         return (NULL);
  784 
  785     resource_list_init(&ad->ad_rl);
  786 
  787     child = device_add_child_ordered(bus, order, name, unit);
  788     if (child != NULL)
  789         device_set_ivars(child, ad);
  790     else
  791         free(ad, M_ACPIDEV);
  792     return (child);
  793 }
  794 
  795 static int
  796 acpi_print_child(device_t bus, device_t child)
  797 {
  798     struct acpi_device   *adev = device_get_ivars(child);
  799     struct resource_list *rl = &adev->ad_rl;
  800     int retval = 0;
  801 
  802     retval += bus_print_child_header(bus, child);
  803     retval += resource_list_print_type(rl, "port",  SYS_RES_IOPORT, "%#jx");
  804     retval += resource_list_print_type(rl, "iomem", SYS_RES_MEMORY, "%#jx");
  805     retval += resource_list_print_type(rl, "irq",   SYS_RES_IRQ,    "%jd");
  806     retval += resource_list_print_type(rl, "drq",   SYS_RES_DRQ,    "%jd");
  807     if (device_get_flags(child))
  808         retval += printf(" flags %#x", device_get_flags(child));
  809     retval += bus_print_child_domain(bus, child);
  810     retval += bus_print_child_footer(bus, child);
  811 
  812     return (retval);
  813 }
  814 
  815 /*
  816  * If this device is an ACPI child but no one claimed it, attempt
  817  * to power it off.  We'll power it back up when a driver is added.
  818  *
  819  * XXX Disabled for now since many necessary devices (like fdc and
  820  * ATA) don't claim the devices we created for them but still expect
  821  * them to be powered up.
  822  */
  823 static void
  824 acpi_probe_nomatch(device_t bus, device_t child)
  825 {
  826 #ifdef ACPI_ENABLE_POWERDOWN_NODRIVER
  827     acpi_set_powerstate(child, ACPI_STATE_D3);
  828 #endif
  829 }
  830 
  831 /*
  832  * If a new driver has a chance to probe a child, first power it up.
  833  *
  834  * XXX Disabled for now (see acpi_probe_nomatch for details).
  835  */
  836 static void
  837 acpi_driver_added(device_t dev, driver_t *driver)
  838 {
  839     device_t child, *devlist;
  840     int i, numdevs;
  841 
  842     DEVICE_IDENTIFY(driver, dev);
  843     if (device_get_children(dev, &devlist, &numdevs))
  844             return;
  845     for (i = 0; i < numdevs; i++) {
  846         child = devlist[i];
  847         if (device_get_state(child) == DS_NOTPRESENT) {
  848 #ifdef ACPI_ENABLE_POWERDOWN_NODRIVER
  849             acpi_set_powerstate(child, ACPI_STATE_D0);
  850             if (device_probe_and_attach(child) != 0)
  851                 acpi_set_powerstate(child, ACPI_STATE_D3);
  852 #else
  853             device_probe_and_attach(child);
  854 #endif
  855         }
  856     }
  857     free(devlist, M_TEMP);
  858 }
  859 
  860 /* Location hint for devctl(8) */
  861 static int
  862 acpi_child_location_str_method(device_t cbdev, device_t child, char *buf,
  863     size_t buflen)
  864 {
  865     struct acpi_device *dinfo = device_get_ivars(child);
  866     char buf2[32];
  867     int pxm;
  868 
  869     if (dinfo->ad_handle) {
  870         snprintf(buf, buflen, "handle=%s", acpi_name(dinfo->ad_handle));
  871         if (ACPI_SUCCESS(acpi_GetInteger(dinfo->ad_handle, "_PXM", &pxm))) {
  872                 snprintf(buf2, 32, " _PXM=%d", pxm);
  873                 strlcat(buf, buf2, buflen);
  874         }
  875     } else {
  876         snprintf(buf, buflen, "unknown");
  877     }
  878     return (0);
  879 }
  880 
  881 /* PnP information for devctl(8) */
  882 static int
  883 acpi_child_pnpinfo_str_method(device_t cbdev, device_t child, char *buf,
  884     size_t buflen)
  885 {
  886     struct acpi_device *dinfo = device_get_ivars(child);
  887     ACPI_DEVICE_INFO *adinfo;
  888 
  889     if (ACPI_FAILURE(AcpiGetObjectInfo(dinfo->ad_handle, &adinfo))) {
  890         snprintf(buf, buflen, "unknown");
  891         return (0);
  892     }
  893 
  894     snprintf(buf, buflen, "_HID=%s _UID=%lu",
  895         (adinfo->Valid & ACPI_VALID_HID) ?
  896         adinfo->HardwareId.String : "none",
  897         (adinfo->Valid & ACPI_VALID_UID) ?
  898         strtoul(adinfo->UniqueId.String, NULL, 10) : 0UL);
  899     AcpiOsFree(adinfo);
  900 
  901     return (0);
  902 }
  903 
  904 /*
  905  * Handle per-device ivars
  906  */
  907 static int
  908 acpi_read_ivar(device_t dev, device_t child, int index, uintptr_t *result)
  909 {
  910     struct acpi_device  *ad;
  911 
  912     if ((ad = device_get_ivars(child)) == NULL) {
  913         device_printf(child, "device has no ivars\n");
  914         return (ENOENT);
  915     }
  916 
  917     /* ACPI and ISA compatibility ivars */
  918     switch(index) {
  919     case ACPI_IVAR_HANDLE:
  920         *(ACPI_HANDLE *)result = ad->ad_handle;
  921         break;
  922     case ACPI_IVAR_PRIVATE:
  923         *(void **)result = ad->ad_private;
  924         break;
  925     case ACPI_IVAR_FLAGS:
  926         *(int *)result = ad->ad_flags;
  927         break;
  928     case ISA_IVAR_VENDORID:
  929     case ISA_IVAR_SERIAL:
  930     case ISA_IVAR_COMPATID:
  931         *(int *)result = -1;
  932         break;
  933     case ISA_IVAR_LOGICALID:
  934         *(int *)result = acpi_isa_get_logicalid(child);
  935         break;
  936     default:
  937         return (ENOENT);
  938     }
  939 
  940     return (0);
  941 }
  942 
  943 static int
  944 acpi_write_ivar(device_t dev, device_t child, int index, uintptr_t value)
  945 {
  946     struct acpi_device  *ad;
  947 
  948     if ((ad = device_get_ivars(child)) == NULL) {
  949         device_printf(child, "device has no ivars\n");
  950         return (ENOENT);
  951     }
  952 
  953     switch(index) {
  954     case ACPI_IVAR_HANDLE:
  955         ad->ad_handle = (ACPI_HANDLE)value;
  956         break;
  957     case ACPI_IVAR_PRIVATE:
  958         ad->ad_private = (void *)value;
  959         break;
  960     case ACPI_IVAR_FLAGS:
  961         ad->ad_flags = (int)value;
  962         break;
  963     default:
  964         panic("bad ivar write request (%d)", index);
  965         return (ENOENT);
  966     }
  967 
  968     return (0);
  969 }
  970 
  971 /*
  972  * Handle child resource allocation/removal
  973  */
  974 static struct resource_list *
  975 acpi_get_rlist(device_t dev, device_t child)
  976 {
  977     struct acpi_device          *ad;
  978 
  979     ad = device_get_ivars(child);
  980     return (&ad->ad_rl);
  981 }
  982 
  983 static int
  984 acpi_match_resource_hint(device_t dev, int type, long value)
  985 {
  986     struct acpi_device *ad = device_get_ivars(dev);
  987     struct resource_list *rl = &ad->ad_rl;
  988     struct resource_list_entry *rle;
  989 
  990     STAILQ_FOREACH(rle, rl, link) {
  991         if (rle->type != type)
  992             continue;
  993         if (rle->start <= value && rle->end >= value)
  994             return (1);
  995     }
  996     return (0);
  997 }
  998 
  999 /*
 1000  * Wire device unit numbers based on resource matches in hints.
 1001  */
 1002 static void
 1003 acpi_hint_device_unit(device_t acdev, device_t child, const char *name,
 1004     int *unitp)
 1005 {
 1006     const char *s;
 1007     long value;
 1008     int line, matches, unit;
 1009 
 1010     /*
 1011      * Iterate over all the hints for the devices with the specified
 1012      * name to see if one's resources are a subset of this device.
 1013      */
 1014     line = 0;
 1015     for (;;) {
 1016         if (resource_find_dev(&line, name, &unit, "at", NULL) != 0)
 1017             break;
 1018 
 1019         /* Must have an "at" for acpi or isa. */
 1020         resource_string_value(name, unit, "at", &s);
 1021         if (!(strcmp(s, "acpi0") == 0 || strcmp(s, "acpi") == 0 ||
 1022             strcmp(s, "isa0") == 0 || strcmp(s, "isa") == 0))
 1023             continue;
 1024 
 1025         /*
 1026          * Check for matching resources.  We must have at least one match.
 1027          * Since I/O and memory resources cannot be shared, if we get a
 1028          * match on either of those, ignore any mismatches in IRQs or DRQs.
 1029          *
 1030          * XXX: We may want to revisit this to be more lenient and wire
 1031          * as long as it gets one match.
 1032          */
 1033         matches = 0;
 1034         if (resource_long_value(name, unit, "port", &value) == 0) {
 1035             /*
 1036              * Floppy drive controllers are notorious for having a
 1037              * wide variety of resources not all of which include the
 1038              * first port that is specified by the hint (typically
 1039              * 0x3f0) (see the comment above fdc_isa_alloc_resources()
 1040              * in fdc_isa.c).  However, they do all seem to include
 1041              * port + 2 (e.g. 0x3f2) so for a floppy device, look for
 1042              * 'value + 2' in the port resources instead of the hint
 1043              * value.
 1044              */
 1045             if (strcmp(name, "fdc") == 0)
 1046                 value += 2;
 1047             if (acpi_match_resource_hint(child, SYS_RES_IOPORT, value))
 1048                 matches++;
 1049             else
 1050                 continue;
 1051         }
 1052         if (resource_long_value(name, unit, "maddr", &value) == 0) {
 1053             if (acpi_match_resource_hint(child, SYS_RES_MEMORY, value))
 1054                 matches++;
 1055             else
 1056                 continue;
 1057         }
 1058         if (matches > 0)
 1059             goto matched;
 1060         if (resource_long_value(name, unit, "irq", &value) == 0) {
 1061             if (acpi_match_resource_hint(child, SYS_RES_IRQ, value))
 1062                 matches++;
 1063             else
 1064                 continue;
 1065         }
 1066         if (resource_long_value(name, unit, "drq", &value) == 0) {
 1067             if (acpi_match_resource_hint(child, SYS_RES_DRQ, value))
 1068                 matches++;
 1069             else
 1070                 continue;
 1071         }
 1072 
 1073     matched:
 1074         if (matches > 0) {
 1075             /* We have a winner! */
 1076             *unitp = unit;
 1077             break;
 1078         }
 1079     }
 1080 }
 1081 
 1082 /*
 1083  * Fetch the NUMA domain for a device by mapping the value returned by
 1084  * _PXM to a NUMA domain.  If the device does not have a _PXM method,
 1085  * -2 is returned.  If any other error occurs, -1 is returned.
 1086  */
 1087 static int
 1088 acpi_parse_pxm(device_t dev)
 1089 {
 1090 #ifdef DEVICE_NUMA
 1091         ACPI_HANDLE handle;
 1092         ACPI_STATUS status;
 1093         int pxm;
 1094 
 1095         handle = acpi_get_handle(dev);
 1096         if (handle == NULL)
 1097                 return (-2);
 1098         status = acpi_GetInteger(handle, "_PXM", &pxm);
 1099         if (ACPI_SUCCESS(status))
 1100                 return (acpi_map_pxm_to_vm_domainid(pxm));
 1101         if (status == AE_NOT_FOUND)
 1102                 return (-2);
 1103 #endif
 1104         return (-1);
 1105 }
 1106 
 1107 int
 1108 acpi_get_cpus(device_t dev, device_t child, enum cpu_sets op, size_t setsize,
 1109     cpuset_t *cpuset)
 1110 {
 1111         int d, error;
 1112 
 1113         d = acpi_parse_pxm(child);
 1114         if (d < 0)
 1115                 return (bus_generic_get_cpus(dev, child, op, setsize, cpuset));
 1116 
 1117         switch (op) {
 1118         case LOCAL_CPUS:
 1119                 if (setsize != sizeof(cpuset_t))
 1120                         return (EINVAL);
 1121                 *cpuset = cpuset_domain[d];
 1122                 return (0);
 1123         case INTR_CPUS:
 1124                 error = bus_generic_get_cpus(dev, child, op, setsize, cpuset);
 1125                 if (error != 0)
 1126                         return (error);
 1127                 if (setsize != sizeof(cpuset_t))
 1128                         return (EINVAL);
 1129                 CPU_AND(cpuset, &cpuset_domain[d]);
 1130                 return (0);
 1131         default:
 1132                 return (bus_generic_get_cpus(dev, child, op, setsize, cpuset));
 1133         }
 1134 }
 1135 
 1136 /*
 1137  * Fetch the NUMA domain for the given device 'dev'.
 1138  *
 1139  * If a device has a _PXM method, map that to a NUMA domain.
 1140  * Otherwise, pass the request up to the parent.
 1141  * If there's no matching domain or the domain cannot be
 1142  * determined, return ENOENT.
 1143  */
 1144 int
 1145 acpi_get_domain(device_t dev, device_t child, int *domain)
 1146 {
 1147         int d;
 1148 
 1149         d = acpi_parse_pxm(child);
 1150         if (d >= 0) {
 1151                 *domain = d;
 1152                 return (0);
 1153         }
 1154         if (d == -1)
 1155                 return (ENOENT);
 1156 
 1157         /* No _PXM node; go up a level */
 1158         return (bus_generic_get_domain(dev, child, domain));
 1159 }
 1160 
 1161 /*
 1162  * Pre-allocate/manage all memory and IO resources.  Since rman can't handle
 1163  * duplicates, we merge any in the sysresource attach routine.
 1164  */
 1165 static int
 1166 acpi_sysres_alloc(device_t dev)
 1167 {
 1168     struct resource *res;
 1169     struct resource_list *rl;
 1170     struct resource_list_entry *rle;
 1171     struct rman *rm;
 1172     char *sysres_ids[] = { "PNP0C01", "PNP0C02", NULL };
 1173     device_t *children;
 1174     int child_count, i;
 1175 
 1176     /*
 1177      * Probe/attach any sysresource devices.  This would be unnecessary if we
 1178      * had multi-pass probe/attach.
 1179      */
 1180     if (device_get_children(dev, &children, &child_count) != 0)
 1181         return (ENXIO);
 1182     for (i = 0; i < child_count; i++) {
 1183         if (ACPI_ID_PROBE(dev, children[i], sysres_ids) != NULL)
 1184             device_probe_and_attach(children[i]);
 1185     }
 1186     free(children, M_TEMP);
 1187 
 1188     rl = BUS_GET_RESOURCE_LIST(device_get_parent(dev), dev);
 1189     STAILQ_FOREACH(rle, rl, link) {
 1190         if (rle->res != NULL) {
 1191             device_printf(dev, "duplicate resource for %jx\n", rle->start);
 1192             continue;
 1193         }
 1194 
 1195         /* Only memory and IO resources are valid here. */
 1196         switch (rle->type) {
 1197         case SYS_RES_IOPORT:
 1198             rm = &acpi_rman_io;
 1199             break;
 1200         case SYS_RES_MEMORY:
 1201             rm = &acpi_rman_mem;
 1202             break;
 1203         default:
 1204             continue;
 1205         }
 1206 
 1207         /* Pre-allocate resource and add to our rman pool. */
 1208         res = BUS_ALLOC_RESOURCE(device_get_parent(dev), dev, rle->type,
 1209             &rle->rid, rle->start, rle->start + rle->count - 1, rle->count, 0);
 1210         if (res != NULL) {
 1211             rman_manage_region(rm, rman_get_start(res), rman_get_end(res));
 1212             rle->res = res;
 1213         } else if (bootverbose)
 1214             device_printf(dev, "reservation of %jx, %jx (%d) failed\n",
 1215                 rle->start, rle->count, rle->type);
 1216     }
 1217     return (0);
 1218 }
 1219 
 1220 static char *pcilink_ids[] = { "PNP0C0F", NULL };
 1221 static char *sysres_ids[] = { "PNP0C01", "PNP0C02", NULL };
 1222 
 1223 /*
 1224  * Reserve declared resources for devices found during attach once system
 1225  * resources have been allocated.
 1226  */
 1227 static void
 1228 acpi_reserve_resources(device_t dev)
 1229 {
 1230     struct resource_list_entry *rle;
 1231     struct resource_list *rl;
 1232     struct acpi_device *ad;
 1233     struct acpi_softc *sc;
 1234     device_t *children;
 1235     int child_count, i;
 1236 
 1237     sc = device_get_softc(dev);
 1238     if (device_get_children(dev, &children, &child_count) != 0)
 1239         return;
 1240     for (i = 0; i < child_count; i++) {
 1241         ad = device_get_ivars(children[i]);
 1242         rl = &ad->ad_rl;
 1243 
 1244         /* Don't reserve system resources. */
 1245         if (ACPI_ID_PROBE(dev, children[i], sysres_ids) != NULL)
 1246             continue;
 1247 
 1248         STAILQ_FOREACH(rle, rl, link) {
 1249             /*
 1250              * Don't reserve IRQ resources.  There are many sticky things
 1251              * to get right otherwise (e.g. IRQs for psm, atkbd, and HPET
 1252              * when using legacy routing).
 1253              */
 1254             if (rle->type == SYS_RES_IRQ)
 1255                 continue;
 1256 
 1257             /*
 1258              * Don't reserve the resource if it is already allocated.
 1259              * The acpi_ec(4) driver can allocate its resources early
 1260              * if ECDT is present.
 1261              */
 1262             if (rle->res != NULL)
 1263                 continue;
 1264 
 1265             /*
 1266              * Try to reserve the resource from our parent.  If this
 1267              * fails because the resource is a system resource, just
 1268              * let it be.  The resource range is already reserved so
 1269              * that other devices will not use it.  If the driver
 1270              * needs to allocate the resource, then
 1271              * acpi_alloc_resource() will sub-alloc from the system
 1272              * resource.
 1273              */
 1274             resource_list_reserve(rl, dev, children[i], rle->type, &rle->rid,
 1275                 rle->start, rle->end, rle->count, 0);
 1276         }
 1277     }
 1278     free(children, M_TEMP);
 1279     sc->acpi_resources_reserved = 1;
 1280 }
 1281 
 1282 static int
 1283 acpi_set_resource(device_t dev, device_t child, int type, int rid,
 1284     rman_res_t start, rman_res_t count)
 1285 {
 1286     struct acpi_softc *sc = device_get_softc(dev);
 1287     struct acpi_device *ad = device_get_ivars(child);
 1288     struct resource_list *rl = &ad->ad_rl;
 1289     ACPI_DEVICE_INFO *devinfo;
 1290     rman_res_t end;
 1291     
 1292     /* Ignore IRQ resources for PCI link devices. */
 1293     if (type == SYS_RES_IRQ && ACPI_ID_PROBE(dev, child, pcilink_ids) != NULL)
 1294         return (0);
 1295 
 1296     /*
 1297      * Ignore most resources for PCI root bridges.  Some BIOSes
 1298      * incorrectly enumerate the memory ranges they decode as plain
 1299      * memory resources instead of as ResourceProducer ranges.  Other
 1300      * BIOSes incorrectly list system resource entries for I/O ranges
 1301      * under the PCI bridge.  Do allow the one known-correct case on
 1302      * x86 of a PCI bridge claiming the I/O ports used for PCI config
 1303      * access.
 1304      */
 1305     if (type == SYS_RES_MEMORY || type == SYS_RES_IOPORT) {
 1306         if (ACPI_SUCCESS(AcpiGetObjectInfo(ad->ad_handle, &devinfo))) {
 1307             if ((devinfo->Flags & ACPI_PCI_ROOT_BRIDGE) != 0) {
 1308 #if defined(__i386__) || defined(__amd64__)
 1309                 if (!(type == SYS_RES_IOPORT && start == CONF1_ADDR_PORT))
 1310 #endif
 1311                 {
 1312                     AcpiOsFree(devinfo);
 1313                     return (0);
 1314                 }
 1315             }
 1316             AcpiOsFree(devinfo);
 1317         }
 1318     }
 1319 
 1320     /* If the resource is already allocated, fail. */
 1321     if (resource_list_busy(rl, type, rid))
 1322         return (EBUSY);
 1323 
 1324     /* If the resource is already reserved, release it. */
 1325     if (resource_list_reserved(rl, type, rid))
 1326         resource_list_unreserve(rl, dev, child, type, rid);
 1327 
 1328     /* Add the resource. */
 1329     end = (start + count - 1);
 1330     resource_list_add(rl, type, rid, start, end, count);
 1331 
 1332     /* Don't reserve resources until the system resources are allocated. */
 1333     if (!sc->acpi_resources_reserved)
 1334         return (0);
 1335 
 1336     /* Don't reserve system resources. */
 1337     if (ACPI_ID_PROBE(dev, child, sysres_ids) != NULL)
 1338         return (0);
 1339 
 1340     /*
 1341      * Don't reserve IRQ resources.  There are many sticky things to
 1342      * get right otherwise (e.g. IRQs for psm, atkbd, and HPET when
 1343      * using legacy routing).
 1344      */
 1345     if (type == SYS_RES_IRQ)
 1346         return (0);
 1347 
 1348     /*
 1349      * Don't reserve resources for CPU devices.  Some of these
 1350      * resources need to be allocated as shareable, but reservations
 1351      * are always non-shareable.
 1352      */
 1353     if (device_get_devclass(child) == devclass_find("cpu"))
 1354         return (0);
 1355 
 1356     /*
 1357      * Reserve the resource.
 1358      *
 1359      * XXX: Ignores failure for now.  Failure here is probably a
 1360      * BIOS/firmware bug?
 1361      */
 1362     resource_list_reserve(rl, dev, child, type, &rid, start, end, count, 0);
 1363     return (0);
 1364 }
 1365 
 1366 static struct resource *
 1367 acpi_alloc_resource(device_t bus, device_t child, int type, int *rid,
 1368     rman_res_t start, rman_res_t end, rman_res_t count, u_int flags)
 1369 {
 1370     ACPI_RESOURCE ares;
 1371     struct acpi_device *ad;
 1372     struct resource_list_entry *rle;
 1373     struct resource_list *rl;
 1374     struct resource *res;
 1375     int isdefault = RMAN_IS_DEFAULT_RANGE(start, end);
 1376 
 1377     /*
 1378      * First attempt at allocating the resource.  For direct children,
 1379      * use resource_list_alloc() to handle reserved resources.  For
 1380      * other devices, pass the request up to our parent.
 1381      */
 1382     if (bus == device_get_parent(child)) {
 1383         ad = device_get_ivars(child);
 1384         rl = &ad->ad_rl;
 1385 
 1386         /*
 1387          * Simulate the behavior of the ISA bus for direct children
 1388          * devices.  That is, if a non-default range is specified for
 1389          * a resource that doesn't exist, use bus_set_resource() to
 1390          * add the resource before allocating it.  Note that these
 1391          * resources will not be reserved.
 1392          */
 1393         if (!isdefault && resource_list_find(rl, type, *rid) == NULL)
 1394                 resource_list_add(rl, type, *rid, start, end, count);
 1395         res = resource_list_alloc(rl, bus, child, type, rid, start, end, count,
 1396             flags);
 1397         if (res != NULL && type == SYS_RES_IRQ) {
 1398             /*
 1399              * Since bus_config_intr() takes immediate effect, we cannot
 1400              * configure the interrupt associated with a device when we
 1401              * parse the resources but have to defer it until a driver
 1402              * actually allocates the interrupt via bus_alloc_resource().
 1403              *
 1404              * XXX: Should we handle the lookup failing?
 1405              */
 1406             if (ACPI_SUCCESS(acpi_lookup_irq_resource(child, *rid, res, &ares)))
 1407                 acpi_config_intr(child, &ares);
 1408         }
 1409 
 1410         /*
 1411          * If this is an allocation of the "default" range for a given
 1412          * RID, fetch the exact bounds for this resource from the
 1413          * resource list entry to try to allocate the range from the
 1414          * system resource regions.
 1415          */
 1416         if (res == NULL && isdefault) {
 1417             rle = resource_list_find(rl, type, *rid);
 1418             if (rle != NULL) {
 1419                 start = rle->start;
 1420                 end = rle->end;
 1421                 count = rle->count;
 1422             }
 1423         }
 1424     } else
 1425         res = BUS_ALLOC_RESOURCE(device_get_parent(bus), child, type, rid,
 1426             start, end, count, flags);
 1427 
 1428     /*
 1429      * If the first attempt failed and this is an allocation of a
 1430      * specific range, try to satisfy the request via a suballocation
 1431      * from our system resource regions.
 1432      */
 1433     if (res == NULL && start + count - 1 == end)
 1434         res = acpi_alloc_sysres(child, type, rid, start, end, count, flags);
 1435     return (res);
 1436 }
 1437 
 1438 /*
 1439  * Attempt to allocate a specific resource range from the system
 1440  * resource ranges.  Note that we only handle memory and I/O port
 1441  * system resources.
 1442  */
 1443 struct resource *
 1444 acpi_alloc_sysres(device_t child, int type, int *rid, rman_res_t start,
 1445     rman_res_t end, rman_res_t count, u_int flags)
 1446 {
 1447     struct rman *rm;
 1448     struct resource *res;
 1449 
 1450     switch (type) {
 1451     case SYS_RES_IOPORT:
 1452         rm = &acpi_rman_io;
 1453         break;
 1454     case SYS_RES_MEMORY:
 1455         rm = &acpi_rman_mem;
 1456         break;
 1457     default:
 1458         return (NULL);
 1459     }
 1460 
 1461     KASSERT(start + count - 1 == end, ("wildcard resource range"));
 1462     res = rman_reserve_resource(rm, start, end, count, flags & ~RF_ACTIVE,
 1463         child);
 1464     if (res == NULL)
 1465         return (NULL);
 1466 
 1467     rman_set_rid(res, *rid);
 1468 
 1469     /* If requested, activate the resource using the parent's method. */
 1470     if (flags & RF_ACTIVE)
 1471         if (bus_activate_resource(child, type, *rid, res) != 0) {
 1472             rman_release_resource(res);
 1473             return (NULL);
 1474         }
 1475 
 1476     return (res);
 1477 }
 1478 
 1479 static int
 1480 acpi_is_resource_managed(int type, struct resource *r)
 1481 {
 1482 
 1483     /* We only handle memory and IO resources through rman. */
 1484     switch (type) {
 1485     case SYS_RES_IOPORT:
 1486         return (rman_is_region_manager(r, &acpi_rman_io));
 1487     case SYS_RES_MEMORY:
 1488         return (rman_is_region_manager(r, &acpi_rman_mem));
 1489     }
 1490     return (0);
 1491 }
 1492 
 1493 static int
 1494 acpi_adjust_resource(device_t bus, device_t child, int type, struct resource *r,
 1495     rman_res_t start, rman_res_t end)
 1496 {
 1497 
 1498     if (acpi_is_resource_managed(type, r))
 1499         return (rman_adjust_resource(r, start, end));
 1500     return (bus_generic_adjust_resource(bus, child, type, r, start, end));
 1501 }
 1502 
 1503 static int
 1504 acpi_release_resource(device_t bus, device_t child, int type, int rid,
 1505     struct resource *r)
 1506 {
 1507     int ret;
 1508 
 1509     /*
 1510      * If this resource belongs to one of our internal managers,
 1511      * deactivate it and release it to the local pool.
 1512      */
 1513     if (acpi_is_resource_managed(type, r)) {
 1514         if (rman_get_flags(r) & RF_ACTIVE) {
 1515             ret = bus_deactivate_resource(child, type, rid, r);
 1516             if (ret != 0)
 1517                 return (ret);
 1518         }
 1519         return (rman_release_resource(r));
 1520     }
 1521 
 1522     return (bus_generic_rl_release_resource(bus, child, type, rid, r));
 1523 }
 1524 
 1525 static void
 1526 acpi_delete_resource(device_t bus, device_t child, int type, int rid)
 1527 {
 1528     struct resource_list *rl;
 1529 
 1530     rl = acpi_get_rlist(bus, child);
 1531     if (resource_list_busy(rl, type, rid)) {
 1532         device_printf(bus, "delete_resource: Resource still owned by child"
 1533             " (type=%d, rid=%d)\n", type, rid);
 1534         return;
 1535     }
 1536     resource_list_unreserve(rl, bus, child, type, rid);
 1537     resource_list_delete(rl, type, rid);
 1538 }
 1539 
 1540 /* Allocate an IO port or memory resource, given its GAS. */
 1541 int
 1542 acpi_bus_alloc_gas(device_t dev, int *type, int *rid, ACPI_GENERIC_ADDRESS *gas,
 1543     struct resource **res, u_int flags)
 1544 {
 1545     int error, res_type;
 1546 
 1547     error = ENOMEM;
 1548     if (type == NULL || rid == NULL || gas == NULL || res == NULL)
 1549         return (EINVAL);
 1550 
 1551     /* We only support memory and IO spaces. */
 1552     switch (gas->SpaceId) {
 1553     case ACPI_ADR_SPACE_SYSTEM_MEMORY:
 1554         res_type = SYS_RES_MEMORY;
 1555         break;
 1556     case ACPI_ADR_SPACE_SYSTEM_IO:
 1557         res_type = SYS_RES_IOPORT;
 1558         break;
 1559     default:
 1560         return (EOPNOTSUPP);
 1561     }
 1562 
 1563     /*
 1564      * If the register width is less than 8, assume the BIOS author means
 1565      * it is a bit field and just allocate a byte.
 1566      */
 1567     if (gas->BitWidth && gas->BitWidth < 8)
 1568         gas->BitWidth = 8;
 1569 
 1570     /* Validate the address after we're sure we support the space. */
 1571     if (gas->Address == 0 || gas->BitWidth == 0)
 1572         return (EINVAL);
 1573 
 1574     bus_set_resource(dev, res_type, *rid, gas->Address,
 1575         gas->BitWidth / 8);
 1576     *res = bus_alloc_resource_any(dev, res_type, rid, RF_ACTIVE | flags);
 1577     if (*res != NULL) {
 1578         *type = res_type;
 1579         error = 0;
 1580     } else
 1581         bus_delete_resource(dev, res_type, *rid);
 1582 
 1583     return (error);
 1584 }
 1585 
 1586 /* Probe _HID and _CID for compatible ISA PNP ids. */
 1587 static uint32_t
 1588 acpi_isa_get_logicalid(device_t dev)
 1589 {
 1590     ACPI_DEVICE_INFO    *devinfo;
 1591     ACPI_HANDLE         h;
 1592     uint32_t            pnpid;
 1593 
 1594     ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
 1595 
 1596     /* Fetch and validate the HID. */
 1597     if ((h = acpi_get_handle(dev)) == NULL ||
 1598         ACPI_FAILURE(AcpiGetObjectInfo(h, &devinfo)))
 1599         return_VALUE (0);
 1600 
 1601     pnpid = (devinfo->Valid & ACPI_VALID_HID) != 0 &&
 1602         devinfo->HardwareId.Length >= ACPI_EISAID_STRING_SIZE ?
 1603         PNP_EISAID(devinfo->HardwareId.String) : 0;
 1604     AcpiOsFree(devinfo);
 1605 
 1606     return_VALUE (pnpid);
 1607 }
 1608 
 1609 static int
 1610 acpi_isa_get_compatid(device_t dev, uint32_t *cids, int count)
 1611 {
 1612     ACPI_DEVICE_INFO    *devinfo;
 1613     ACPI_PNP_DEVICE_ID  *ids;
 1614     ACPI_HANDLE         h;
 1615     uint32_t            *pnpid;
 1616     int                 i, valid;
 1617 
 1618     ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
 1619 
 1620     pnpid = cids;
 1621 
 1622     /* Fetch and validate the CID */
 1623     if ((h = acpi_get_handle(dev)) == NULL ||
 1624         ACPI_FAILURE(AcpiGetObjectInfo(h, &devinfo)))
 1625         return_VALUE (0);
 1626 
 1627     if ((devinfo->Valid & ACPI_VALID_CID) == 0) {
 1628         AcpiOsFree(devinfo);
 1629         return_VALUE (0);
 1630     }
 1631 
 1632     if (devinfo->CompatibleIdList.Count < count)
 1633         count = devinfo->CompatibleIdList.Count;
 1634     ids = devinfo->CompatibleIdList.Ids;
 1635     for (i = 0, valid = 0; i < count; i++)
 1636         if (ids[i].Length >= ACPI_EISAID_STRING_SIZE &&
 1637             strncmp(ids[i].String, "PNP", 3) == 0) {
 1638             *pnpid++ = PNP_EISAID(ids[i].String);
 1639             valid++;
 1640         }
 1641     AcpiOsFree(devinfo);
 1642 
 1643     return_VALUE (valid);
 1644 }
 1645 
 1646 static char *
 1647 acpi_device_id_probe(device_t bus, device_t dev, char **ids) 
 1648 {
 1649     ACPI_HANDLE h;
 1650     ACPI_OBJECT_TYPE t;
 1651     int i;
 1652 
 1653     h = acpi_get_handle(dev);
 1654     if (ids == NULL || h == NULL)
 1655         return (NULL);
 1656     t = acpi_get_type(dev);
 1657     if (t != ACPI_TYPE_DEVICE && t != ACPI_TYPE_PROCESSOR)
 1658         return (NULL);
 1659 
 1660     /* Try to match one of the array of IDs with a HID or CID. */
 1661     for (i = 0; ids[i] != NULL; i++) {
 1662         if (acpi_MatchHid(h, ids[i]))
 1663             return (ids[i]);
 1664     }
 1665     return (NULL);
 1666 }
 1667 
 1668 static ACPI_STATUS
 1669 acpi_device_eval_obj(device_t bus, device_t dev, ACPI_STRING pathname,
 1670     ACPI_OBJECT_LIST *parameters, ACPI_BUFFER *ret)
 1671 {
 1672     ACPI_HANDLE h;
 1673 
 1674     if (dev == NULL)
 1675         h = ACPI_ROOT_OBJECT;
 1676     else if ((h = acpi_get_handle(dev)) == NULL)
 1677         return (AE_BAD_PARAMETER);
 1678     return (AcpiEvaluateObject(h, pathname, parameters, ret));
 1679 }
 1680 
 1681 int
 1682 acpi_device_pwr_for_sleep(device_t bus, device_t dev, int *dstate)
 1683 {
 1684     struct acpi_softc *sc;
 1685     ACPI_HANDLE handle;
 1686     ACPI_STATUS status;
 1687     char sxd[8];
 1688 
 1689     handle = acpi_get_handle(dev);
 1690 
 1691     /*
 1692      * XXX If we find these devices, don't try to power them down.
 1693      * The serial and IRDA ports on my T23 hang the system when
 1694      * set to D3 and it appears that such legacy devices may
 1695      * need special handling in their drivers.
 1696      */
 1697     if (dstate == NULL || handle == NULL ||
 1698         acpi_MatchHid(handle, "PNP0500") ||
 1699         acpi_MatchHid(handle, "PNP0501") ||
 1700         acpi_MatchHid(handle, "PNP0502") ||
 1701         acpi_MatchHid(handle, "PNP0510") ||
 1702         acpi_MatchHid(handle, "PNP0511"))
 1703         return (ENXIO);
 1704 
 1705     /*
 1706      * Override next state with the value from _SxD, if present.
 1707      * Note illegal _S0D is evaluated because some systems expect this.
 1708      */
 1709     sc = device_get_softc(bus);
 1710     snprintf(sxd, sizeof(sxd), "_S%dD", sc->acpi_sstate);
 1711     status = acpi_GetInteger(handle, sxd, dstate);
 1712     if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
 1713             device_printf(dev, "failed to get %s on %s: %s\n", sxd,
 1714                 acpi_name(handle), AcpiFormatException(status));
 1715             return (ENXIO);
 1716     }
 1717 
 1718     return (0);
 1719 }
 1720 
 1721 /* Callback arg for our implementation of walking the namespace. */
 1722 struct acpi_device_scan_ctx {
 1723     acpi_scan_cb_t      user_fn;
 1724     void                *arg;
 1725     ACPI_HANDLE         parent;
 1726 };
 1727 
 1728 static ACPI_STATUS
 1729 acpi_device_scan_cb(ACPI_HANDLE h, UINT32 level, void *arg, void **retval)
 1730 {
 1731     struct acpi_device_scan_ctx *ctx;
 1732     device_t dev, old_dev;
 1733     ACPI_STATUS status;
 1734     ACPI_OBJECT_TYPE type;
 1735 
 1736     /*
 1737      * Skip this device if we think we'll have trouble with it or it is
 1738      * the parent where the scan began.
 1739      */
 1740     ctx = (struct acpi_device_scan_ctx *)arg;
 1741     if (acpi_avoid(h) || h == ctx->parent)
 1742         return (AE_OK);
 1743 
 1744     /* If this is not a valid device type (e.g., a method), skip it. */
 1745     if (ACPI_FAILURE(AcpiGetType(h, &type)))
 1746         return (AE_OK);
 1747     if (type != ACPI_TYPE_DEVICE && type != ACPI_TYPE_PROCESSOR &&
 1748         type != ACPI_TYPE_THERMAL && type != ACPI_TYPE_POWER)
 1749         return (AE_OK);
 1750 
 1751     /*
 1752      * Call the user function with the current device.  If it is unchanged
 1753      * afterwards, return.  Otherwise, we update the handle to the new dev.
 1754      */
 1755     old_dev = acpi_get_device(h);
 1756     dev = old_dev;
 1757     status = ctx->user_fn(h, &dev, level, ctx->arg);
 1758     if (ACPI_FAILURE(status) || old_dev == dev)
 1759         return (status);
 1760 
 1761     /* Remove the old child and its connection to the handle. */
 1762     if (old_dev != NULL) {
 1763         device_delete_child(device_get_parent(old_dev), old_dev);
 1764         AcpiDetachData(h, acpi_fake_objhandler);
 1765     }
 1766 
 1767     /* Recreate the handle association if the user created a device. */
 1768     if (dev != NULL)
 1769         AcpiAttachData(h, acpi_fake_objhandler, dev);
 1770 
 1771     return (AE_OK);
 1772 }
 1773 
 1774 static ACPI_STATUS
 1775 acpi_device_scan_children(device_t bus, device_t dev, int max_depth,
 1776     acpi_scan_cb_t user_fn, void *arg)
 1777 {
 1778     ACPI_HANDLE h;
 1779     struct acpi_device_scan_ctx ctx;
 1780 
 1781     if (acpi_disabled("children"))
 1782         return (AE_OK);
 1783 
 1784     if (dev == NULL)
 1785         h = ACPI_ROOT_OBJECT;
 1786     else if ((h = acpi_get_handle(dev)) == NULL)
 1787         return (AE_BAD_PARAMETER);
 1788     ctx.user_fn = user_fn;
 1789     ctx.arg = arg;
 1790     ctx.parent = h;
 1791     return (AcpiWalkNamespace(ACPI_TYPE_ANY, h, max_depth,
 1792         acpi_device_scan_cb, NULL, &ctx, NULL));
 1793 }
 1794 
 1795 /*
 1796  * Even though ACPI devices are not PCI, we use the PCI approach for setting
 1797  * device power states since it's close enough to ACPI.
 1798  */
 1799 static int
 1800 acpi_set_powerstate(device_t child, int state)
 1801 {
 1802     ACPI_HANDLE h;
 1803     ACPI_STATUS status;
 1804 
 1805     h = acpi_get_handle(child);
 1806     if (state < ACPI_STATE_D0 || state > ACPI_D_STATES_MAX)
 1807         return (EINVAL);
 1808     if (h == NULL)
 1809         return (0);
 1810 
 1811     /* Ignore errors if the power methods aren't present. */
 1812     status = acpi_pwr_switch_consumer(h, state);
 1813     if (ACPI_SUCCESS(status)) {
 1814         if (bootverbose)
 1815             device_printf(child, "set ACPI power state D%d on %s\n",
 1816                 state, acpi_name(h));
 1817     } else if (status != AE_NOT_FOUND)
 1818         device_printf(child,
 1819             "failed to set ACPI power state D%d on %s: %s\n", state,
 1820             acpi_name(h), AcpiFormatException(status));
 1821 
 1822     return (0);
 1823 }
 1824 
 1825 static int
 1826 acpi_isa_pnp_probe(device_t bus, device_t child, struct isa_pnp_id *ids)
 1827 {
 1828     int                 result, cid_count, i;
 1829     uint32_t            lid, cids[8];
 1830 
 1831     ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
 1832 
 1833     /*
 1834      * ISA-style drivers attached to ACPI may persist and
 1835      * probe manually if we return ENOENT.  We never want
 1836      * that to happen, so don't ever return it.
 1837      */
 1838     result = ENXIO;
 1839 
 1840     /* Scan the supplied IDs for a match */
 1841     lid = acpi_isa_get_logicalid(child);
 1842     cid_count = acpi_isa_get_compatid(child, cids, 8);
 1843     while (ids && ids->ip_id) {
 1844         if (lid == ids->ip_id) {
 1845             result = 0;
 1846             goto out;
 1847         }
 1848         for (i = 0; i < cid_count; i++) {
 1849             if (cids[i] == ids->ip_id) {
 1850                 result = 0;
 1851                 goto out;
 1852             }
 1853         }
 1854         ids++;
 1855     }
 1856 
 1857  out:
 1858     if (result == 0 && ids->ip_desc)
 1859         device_set_desc(child, ids->ip_desc);
 1860 
 1861     return_VALUE (result);
 1862 }
 1863 
 1864 #if defined(__i386__) || defined(__amd64__)
 1865 /*
 1866  * Look for a MCFG table.  If it is present, use the settings for
 1867  * domain (segment) 0 to setup PCI config space access via the memory
 1868  * map.
 1869  */
 1870 static void
 1871 acpi_enable_pcie(void)
 1872 {
 1873         ACPI_TABLE_HEADER *hdr;
 1874         ACPI_MCFG_ALLOCATION *alloc, *end;
 1875         ACPI_STATUS status;
 1876 
 1877         status = AcpiGetTable(ACPI_SIG_MCFG, 1, &hdr);
 1878         if (ACPI_FAILURE(status))
 1879                 return;
 1880 
 1881         end = (ACPI_MCFG_ALLOCATION *)((char *)hdr + hdr->Length);
 1882         alloc = (ACPI_MCFG_ALLOCATION *)((ACPI_TABLE_MCFG *)hdr + 1);
 1883         while (alloc < end) {
 1884                 if (alloc->PciSegment == 0) {
 1885                         pcie_cfgregopen(alloc->Address, alloc->StartBusNumber,
 1886                             alloc->EndBusNumber);
 1887                         return;
 1888                 }
 1889                 alloc++;
 1890         }
 1891 }
 1892 #endif
 1893 
 1894 /*
 1895  * Scan all of the ACPI namespace and attach child devices.
 1896  *
 1897  * We should only expect to find devices in the \_PR, \_TZ, \_SI, and
 1898  * \_SB scopes, and \_PR and \_TZ became obsolete in the ACPI 2.0 spec.
 1899  * However, in violation of the spec, some systems place their PCI link
 1900  * devices in \, so we have to walk the whole namespace.  We check the
 1901  * type of namespace nodes, so this should be ok.
 1902  */
 1903 static void
 1904 acpi_probe_children(device_t bus)
 1905 {
 1906 
 1907     ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
 1908 
 1909     /*
 1910      * Scan the namespace and insert placeholders for all the devices that
 1911      * we find.  We also probe/attach any early devices.
 1912      *
 1913      * Note that we use AcpiWalkNamespace rather than AcpiGetDevices because
 1914      * we want to create nodes for all devices, not just those that are
 1915      * currently present. (This assumes that we don't want to create/remove
 1916      * devices as they appear, which might be smarter.)
 1917      */
 1918     ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "namespace scan\n"));
 1919     AcpiWalkNamespace(ACPI_TYPE_ANY, ACPI_ROOT_OBJECT, 100, acpi_probe_child,
 1920         NULL, bus, NULL);
 1921 
 1922     /* Pre-allocate resources for our rman from any sysresource devices. */
 1923     acpi_sysres_alloc(bus);
 1924 
 1925     /* Reserve resources already allocated to children. */
 1926     acpi_reserve_resources(bus);
 1927 
 1928     /* Create any static children by calling device identify methods. */
 1929     ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "device identify routines\n"));
 1930     bus_generic_probe(bus);
 1931 
 1932     /* Probe/attach all children, created statically and from the namespace. */
 1933     ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "acpi bus_generic_attach\n"));
 1934     bus_generic_attach(bus);
 1935 
 1936     /* Attach wake sysctls. */
 1937     acpi_wake_sysctl_walk(bus);
 1938 
 1939     ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "done attaching children\n"));
 1940     return_VOID;
 1941 }
 1942 
 1943 /*
 1944  * Determine the probe order for a given device.
 1945  */
 1946 static void
 1947 acpi_probe_order(ACPI_HANDLE handle, int *order)
 1948 {
 1949         ACPI_OBJECT_TYPE type;
 1950 
 1951         /*
 1952          * 0. CPUs
 1953          * 1. I/O port and memory system resource holders
 1954          * 2. Clocks and timers (to handle early accesses)
 1955          * 3. Embedded controllers (to handle early accesses)
 1956          * 4. PCI Link Devices
 1957          */
 1958         AcpiGetType(handle, &type);
 1959         if (type == ACPI_TYPE_PROCESSOR)
 1960                 *order = 0;
 1961         else if (acpi_MatchHid(handle, "PNP0C01") ||
 1962             acpi_MatchHid(handle, "PNP0C02"))
 1963                 *order = 1;
 1964         else if (acpi_MatchHid(handle, "PNP0100") ||
 1965             acpi_MatchHid(handle, "PNP0103") ||
 1966             acpi_MatchHid(handle, "PNP0B00"))
 1967                 *order = 2;
 1968         else if (acpi_MatchHid(handle, "PNP0C09"))
 1969                 *order = 3;
 1970         else if (acpi_MatchHid(handle, "PNP0C0F"))
 1971                 *order = 4;
 1972 }
 1973 
 1974 /*
 1975  * Evaluate a child device and determine whether we might attach a device to
 1976  * it.
 1977  */
 1978 static ACPI_STATUS
 1979 acpi_probe_child(ACPI_HANDLE handle, UINT32 level, void *context, void **status)
 1980 {
 1981     struct acpi_prw_data prw;
 1982     ACPI_OBJECT_TYPE type;
 1983     ACPI_HANDLE h;
 1984     device_t bus, child;
 1985     char *handle_str;
 1986     int order;
 1987 
 1988     ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
 1989 
 1990     if (acpi_disabled("children"))
 1991         return_ACPI_STATUS (AE_OK);
 1992 
 1993     /* Skip this device if we think we'll have trouble with it. */
 1994     if (acpi_avoid(handle))
 1995         return_ACPI_STATUS (AE_OK);
 1996 
 1997     bus = (device_t)context;
 1998     if (ACPI_SUCCESS(AcpiGetType(handle, &type))) {
 1999         handle_str = acpi_name(handle);
 2000         switch (type) {
 2001         case ACPI_TYPE_DEVICE:
 2002             /*
 2003              * Since we scan from \, be sure to skip system scope objects.
 2004              * \_SB_ and \_TZ_ are defined in ACPICA as devices to work around
 2005              * BIOS bugs.  For example, \_SB_ is to allow \_SB_._INI to be run
 2006              * during the initialization and \_TZ_ is to support Notify() on it.
 2007              */
 2008             if (strcmp(handle_str, "\\_SB_") == 0 ||
 2009                 strcmp(handle_str, "\\_TZ_") == 0)
 2010                 break;
 2011             if (acpi_parse_prw(handle, &prw) == 0)
 2012                 AcpiSetupGpeForWake(handle, prw.gpe_handle, prw.gpe_bit);
 2013 
 2014             /*
 2015              * Ignore devices that do not have a _HID or _CID.  They should
 2016              * be discovered by other buses (e.g. the PCI bus driver).
 2017              */
 2018             if (!acpi_has_hid(handle))
 2019                 break;
 2020             /* FALLTHROUGH */
 2021         case ACPI_TYPE_PROCESSOR:
 2022         case ACPI_TYPE_THERMAL:
 2023         case ACPI_TYPE_POWER:
 2024             /* 
 2025              * Create a placeholder device for this node.  Sort the
 2026              * placeholder so that the probe/attach passes will run
 2027              * breadth-first.  Orders less than ACPI_DEV_BASE_ORDER
 2028              * are reserved for special objects (i.e., system
 2029              * resources).
 2030              */
 2031             ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "scanning '%s'\n", handle_str));
 2032             order = level * 10 + ACPI_DEV_BASE_ORDER;
 2033             acpi_probe_order(handle, &order);
 2034             child = BUS_ADD_CHILD(bus, order, NULL, -1);
 2035             if (child == NULL)
 2036                 break;
 2037 
 2038             /* Associate the handle with the device_t and vice versa. */
 2039             acpi_set_handle(child, handle);
 2040             AcpiAttachData(handle, acpi_fake_objhandler, child);
 2041 
 2042             /*
 2043              * Check that the device is present.  If it's not present,
 2044              * leave it disabled (so that we have a device_t attached to
 2045              * the handle, but we don't probe it).
 2046              *
 2047              * XXX PCI link devices sometimes report "present" but not
 2048              * "functional" (i.e. if disabled).  Go ahead and probe them
 2049              * anyway since we may enable them later.
 2050              */
 2051             if (type == ACPI_TYPE_DEVICE && !acpi_DeviceIsPresent(child)) {
 2052                 /* Never disable PCI link devices. */
 2053                 if (acpi_MatchHid(handle, "PNP0C0F"))
 2054                     break;
 2055                 /*
 2056                  * Docking stations should remain enabled since the system
 2057                  * may be undocked at boot.
 2058                  */
 2059                 if (ACPI_SUCCESS(AcpiGetHandle(handle, "_DCK", &h)))
 2060                     break;
 2061 
 2062                 device_disable(child);
 2063                 break;
 2064             }
 2065 
 2066             /*
 2067              * Get the device's resource settings and attach them.
 2068              * Note that if the device has _PRS but no _CRS, we need
 2069              * to decide when it's appropriate to try to configure the
 2070              * device.  Ignore the return value here; it's OK for the
 2071              * device not to have any resources.
 2072              */
 2073             acpi_parse_resources(child, handle, &acpi_res_parse_set, NULL);
 2074             break;
 2075         }
 2076     }
 2077 
 2078     return_ACPI_STATUS (AE_OK);
 2079 }
 2080 
 2081 /*
 2082  * AcpiAttachData() requires an object handler but never uses it.  This is a
 2083  * placeholder object handler so we can store a device_t in an ACPI_HANDLE.
 2084  */
 2085 void
 2086 acpi_fake_objhandler(ACPI_HANDLE h, void *data)
 2087 {
 2088 }
 2089 
 2090 static void
 2091 acpi_shutdown_final(void *arg, int howto)
 2092 {
 2093     struct acpi_softc *sc = (struct acpi_softc *)arg;
 2094     register_t intr;
 2095     ACPI_STATUS status;
 2096 
 2097     /*
 2098      * XXX Shutdown code should only run on the BSP (cpuid 0).
 2099      * Some chipsets do not power off the system correctly if called from
 2100      * an AP.
 2101      */
 2102     if ((howto & RB_POWEROFF) != 0) {
 2103         status = AcpiEnterSleepStatePrep(ACPI_STATE_S5);
 2104         if (ACPI_FAILURE(status)) {
 2105             device_printf(sc->acpi_dev, "AcpiEnterSleepStatePrep failed - %s\n",
 2106                 AcpiFormatException(status));
 2107             return;
 2108         }
 2109         device_printf(sc->acpi_dev, "Powering system off\n");
 2110         intr = intr_disable();
 2111         status = AcpiEnterSleepState(ACPI_STATE_S5);
 2112         if (ACPI_FAILURE(status)) {
 2113             intr_restore(intr);
 2114             device_printf(sc->acpi_dev, "power-off failed - %s\n",
 2115                 AcpiFormatException(status));
 2116         } else {
 2117             DELAY(1000000);
 2118             intr_restore(intr);
 2119             device_printf(sc->acpi_dev, "power-off failed - timeout\n");
 2120         }
 2121     } else if ((howto & RB_HALT) == 0 && sc->acpi_handle_reboot) {
 2122         /* Reboot using the reset register. */
 2123         status = AcpiReset();
 2124         if (ACPI_SUCCESS(status)) {
 2125             DELAY(1000000);
 2126             device_printf(sc->acpi_dev, "reset failed - timeout\n");
 2127         } else if (status != AE_NOT_EXIST)
 2128             device_printf(sc->acpi_dev, "reset failed - %s\n",
 2129                 AcpiFormatException(status));
 2130     } else if (sc->acpi_do_disable && panicstr == NULL) {
 2131         /*
 2132          * Only disable ACPI if the user requested.  On some systems, writing
 2133          * the disable value to SMI_CMD hangs the system.
 2134          */
 2135         device_printf(sc->acpi_dev, "Shutting down\n");
 2136         AcpiTerminate();
 2137     }
 2138 }
 2139 
 2140 static void
 2141 acpi_enable_fixed_events(struct acpi_softc *sc)
 2142 {
 2143     static int  first_time = 1;
 2144 
 2145     /* Enable and clear fixed events and install handlers. */
 2146     if ((AcpiGbl_FADT.Flags & ACPI_FADT_POWER_BUTTON) == 0) {
 2147         AcpiClearEvent(ACPI_EVENT_POWER_BUTTON);
 2148         AcpiInstallFixedEventHandler(ACPI_EVENT_POWER_BUTTON,
 2149                                      acpi_event_power_button_sleep, sc);
 2150         if (first_time)
 2151             device_printf(sc->acpi_dev, "Power Button (fixed)\n");
 2152     }
 2153     if ((AcpiGbl_FADT.Flags & ACPI_FADT_SLEEP_BUTTON) == 0) {
 2154         AcpiClearEvent(ACPI_EVENT_SLEEP_BUTTON);
 2155         AcpiInstallFixedEventHandler(ACPI_EVENT_SLEEP_BUTTON,
 2156                                      acpi_event_sleep_button_sleep, sc);
 2157         if (first_time)
 2158             device_printf(sc->acpi_dev, "Sleep Button (fixed)\n");
 2159     }
 2160 
 2161     first_time = 0;
 2162 }
 2163 
 2164 /*
 2165  * Returns true if the device is actually present and should
 2166  * be attached to.  This requires the present, enabled, UI-visible 
 2167  * and diagnostics-passed bits to be set.
 2168  */
 2169 BOOLEAN
 2170 acpi_DeviceIsPresent(device_t dev)
 2171 {
 2172     ACPI_DEVICE_INFO    *devinfo;
 2173     ACPI_HANDLE         h;
 2174     BOOLEAN             present;
 2175 
 2176     if ((h = acpi_get_handle(dev)) == NULL ||
 2177         ACPI_FAILURE(AcpiGetObjectInfo(h, &devinfo)))
 2178         return (FALSE);
 2179 
 2180     /* Onboard serial ports on certain AMD motherboards have an invalid _STA
 2181      * method that always returns 0.  Force them to always be treated as present.
 2182      *
 2183      * This may solely be a quirk of a preproduction BIOS.
 2184      */
 2185     if (acpi_MatchHid(h, "AMDI0020") || acpi_MatchHid(h, "AMDI0010"))
 2186         return (TRUE);
 2187 
 2188     /* If no _STA method, must be present */
 2189     present = (devinfo->Valid & ACPI_VALID_STA) == 0 ||
 2190         ACPI_DEVICE_PRESENT(devinfo->CurrentStatus) ? TRUE : FALSE;
 2191 
 2192     AcpiOsFree(devinfo);
 2193     return (present);
 2194 }
 2195 
 2196 /*
 2197  * Returns true if the battery is actually present and inserted.
 2198  */
 2199 BOOLEAN
 2200 acpi_BatteryIsPresent(device_t dev)
 2201 {
 2202     ACPI_DEVICE_INFO    *devinfo;
 2203     ACPI_HANDLE         h;
 2204     BOOLEAN             present;
 2205 
 2206     if ((h = acpi_get_handle(dev)) == NULL ||
 2207         ACPI_FAILURE(AcpiGetObjectInfo(h, &devinfo)))
 2208         return (FALSE);
 2209 
 2210     /* If no _STA method, must be present */
 2211     present = (devinfo->Valid & ACPI_VALID_STA) == 0 ||
 2212         ACPI_BATTERY_PRESENT(devinfo->CurrentStatus) ? TRUE : FALSE;
 2213 
 2214     AcpiOsFree(devinfo);
 2215     return (present);
 2216 }
 2217 
 2218 /*
 2219  * Returns true if a device has at least one valid device ID.
 2220  */
 2221 static BOOLEAN
 2222 acpi_has_hid(ACPI_HANDLE h)
 2223 {
 2224     ACPI_DEVICE_INFO    *devinfo;
 2225     BOOLEAN             ret;
 2226 
 2227     if (h == NULL ||
 2228         ACPI_FAILURE(AcpiGetObjectInfo(h, &devinfo)))
 2229         return (FALSE);
 2230 
 2231     ret = FALSE;
 2232     if ((devinfo->Valid & ACPI_VALID_HID) != 0)
 2233         ret = TRUE;
 2234     else if ((devinfo->Valid & ACPI_VALID_CID) != 0)
 2235         if (devinfo->CompatibleIdList.Count > 0)
 2236             ret = TRUE;
 2237 
 2238     AcpiOsFree(devinfo);
 2239     return (ret);
 2240 }
 2241 
 2242 /*
 2243  * Match a HID string against a handle
 2244  */
 2245 BOOLEAN
 2246 acpi_MatchHid(ACPI_HANDLE h, const char *hid) 
 2247 {
 2248     ACPI_DEVICE_INFO    *devinfo;
 2249     BOOLEAN             ret;
 2250     int                 i;
 2251 
 2252     if (hid == NULL || h == NULL ||
 2253         ACPI_FAILURE(AcpiGetObjectInfo(h, &devinfo)))
 2254         return (FALSE);
 2255 
 2256     ret = FALSE;
 2257     if ((devinfo->Valid & ACPI_VALID_HID) != 0 &&
 2258         strcmp(hid, devinfo->HardwareId.String) == 0)
 2259             ret = TRUE;
 2260     else if ((devinfo->Valid & ACPI_VALID_CID) != 0)
 2261         for (i = 0; i < devinfo->CompatibleIdList.Count; i++) {
 2262             if (strcmp(hid, devinfo->CompatibleIdList.Ids[i].String) == 0) {
 2263                 ret = TRUE;
 2264                 break;
 2265             }
 2266         }
 2267 
 2268     AcpiOsFree(devinfo);
 2269     return (ret);
 2270 }
 2271 
 2272 /*
 2273  * Return the handle of a named object within our scope, ie. that of (parent)
 2274  * or one if its parents.
 2275  */
 2276 ACPI_STATUS
 2277 acpi_GetHandleInScope(ACPI_HANDLE parent, char *path, ACPI_HANDLE *result)
 2278 {
 2279     ACPI_HANDLE         r;
 2280     ACPI_STATUS         status;
 2281 
 2282     /* Walk back up the tree to the root */
 2283     for (;;) {
 2284         status = AcpiGetHandle(parent, path, &r);
 2285         if (ACPI_SUCCESS(status)) {
 2286             *result = r;
 2287             return (AE_OK);
 2288         }
 2289         /* XXX Return error here? */
 2290         if (status != AE_NOT_FOUND)
 2291             return (AE_OK);
 2292         if (ACPI_FAILURE(AcpiGetParent(parent, &r)))
 2293             return (AE_NOT_FOUND);
 2294         parent = r;
 2295     }
 2296 }
 2297 
 2298 /*
 2299  * Allocate a buffer with a preset data size.
 2300  */
 2301 ACPI_BUFFER *
 2302 acpi_AllocBuffer(int size)
 2303 {
 2304     ACPI_BUFFER *buf;
 2305 
 2306     if ((buf = malloc(size + sizeof(*buf), M_ACPIDEV, M_NOWAIT)) == NULL)
 2307         return (NULL);
 2308     buf->Length = size;
 2309     buf->Pointer = (void *)(buf + 1);
 2310     return (buf);
 2311 }
 2312 
 2313 ACPI_STATUS
 2314 acpi_SetInteger(ACPI_HANDLE handle, char *path, UINT32 number)
 2315 {
 2316     ACPI_OBJECT arg1;
 2317     ACPI_OBJECT_LIST args;
 2318 
 2319     arg1.Type = ACPI_TYPE_INTEGER;
 2320     arg1.Integer.Value = number;
 2321     args.Count = 1;
 2322     args.Pointer = &arg1;
 2323 
 2324     return (AcpiEvaluateObject(handle, path, &args, NULL));
 2325 }
 2326 
 2327 /*
 2328  * Evaluate a path that should return an integer.
 2329  */
 2330 ACPI_STATUS
 2331 acpi_GetInteger(ACPI_HANDLE handle, char *path, UINT32 *number)
 2332 {
 2333     ACPI_STATUS status;
 2334     ACPI_BUFFER buf;
 2335     ACPI_OBJECT param;
 2336 
 2337     if (handle == NULL)
 2338         handle = ACPI_ROOT_OBJECT;
 2339 
 2340     /*
 2341      * Assume that what we've been pointed at is an Integer object, or
 2342      * a method that will return an Integer.
 2343      */
 2344     buf.Pointer = &param;
 2345     buf.Length = sizeof(param);
 2346     status = AcpiEvaluateObject(handle, path, NULL, &buf);
 2347     if (ACPI_SUCCESS(status)) {
 2348         if (param.Type == ACPI_TYPE_INTEGER)
 2349             *number = param.Integer.Value;
 2350         else
 2351             status = AE_TYPE;
 2352     }
 2353 
 2354     /* 
 2355      * In some applications, a method that's expected to return an Integer
 2356      * may instead return a Buffer (probably to simplify some internal
 2357      * arithmetic).  We'll try to fetch whatever it is, and if it's a Buffer,
 2358      * convert it into an Integer as best we can.
 2359      *
 2360      * This is a hack.
 2361      */
 2362     if (status == AE_BUFFER_OVERFLOW) {
 2363         if ((buf.Pointer = AcpiOsAllocate(buf.Length)) == NULL) {
 2364             status = AE_NO_MEMORY;
 2365         } else {
 2366             status = AcpiEvaluateObject(handle, path, NULL, &buf);
 2367             if (ACPI_SUCCESS(status))
 2368                 status = acpi_ConvertBufferToInteger(&buf, number);
 2369             AcpiOsFree(buf.Pointer);
 2370         }
 2371     }
 2372     return (status);
 2373 }
 2374 
 2375 ACPI_STATUS
 2376 acpi_ConvertBufferToInteger(ACPI_BUFFER *bufp, UINT32 *number)
 2377 {
 2378     ACPI_OBJECT *p;
 2379     UINT8       *val;
 2380     int         i;
 2381 
 2382     p = (ACPI_OBJECT *)bufp->Pointer;
 2383     if (p->Type == ACPI_TYPE_INTEGER) {
 2384         *number = p->Integer.Value;
 2385         return (AE_OK);
 2386     }
 2387     if (p->Type != ACPI_TYPE_BUFFER)
 2388         return (AE_TYPE);
 2389     if (p->Buffer.Length > sizeof(int))
 2390         return (AE_BAD_DATA);
 2391 
 2392     *number = 0;
 2393     val = p->Buffer.Pointer;
 2394     for (i = 0; i < p->Buffer.Length; i++)
 2395         *number += val[i] << (i * 8);
 2396     return (AE_OK);
 2397 }
 2398 
 2399 /*
 2400  * Iterate over the elements of an a package object, calling the supplied
 2401  * function for each element.
 2402  *
 2403  * XXX possible enhancement might be to abort traversal on error.
 2404  */
 2405 ACPI_STATUS
 2406 acpi_ForeachPackageObject(ACPI_OBJECT *pkg,
 2407         void (*func)(ACPI_OBJECT *comp, void *arg), void *arg)
 2408 {
 2409     ACPI_OBJECT *comp;
 2410     int         i;
 2411 
 2412     if (pkg == NULL || pkg->Type != ACPI_TYPE_PACKAGE)
 2413         return (AE_BAD_PARAMETER);
 2414 
 2415     /* Iterate over components */
 2416     i = 0;
 2417     comp = pkg->Package.Elements;
 2418     for (; i < pkg->Package.Count; i++, comp++)
 2419         func(comp, arg);
 2420 
 2421     return (AE_OK);
 2422 }
 2423 
 2424 /*
 2425  * Find the (index)th resource object in a set.
 2426  */
 2427 ACPI_STATUS
 2428 acpi_FindIndexedResource(ACPI_BUFFER *buf, int index, ACPI_RESOURCE **resp)
 2429 {
 2430     ACPI_RESOURCE       *rp;
 2431     int                 i;
 2432 
 2433     rp = (ACPI_RESOURCE *)buf->Pointer;
 2434     i = index;
 2435     while (i-- > 0) {
 2436         /* Range check */
 2437         if (rp > (ACPI_RESOURCE *)((u_int8_t *)buf->Pointer + buf->Length))
 2438             return (AE_BAD_PARAMETER);
 2439 
 2440         /* Check for terminator */
 2441         if (rp->Type == ACPI_RESOURCE_TYPE_END_TAG || rp->Length == 0)
 2442             return (AE_NOT_FOUND);
 2443         rp = ACPI_NEXT_RESOURCE(rp);
 2444     }
 2445     if (resp != NULL)
 2446         *resp = rp;
 2447 
 2448     return (AE_OK);
 2449 }
 2450 
 2451 /*
 2452  * Append an ACPI_RESOURCE to an ACPI_BUFFER.
 2453  *
 2454  * Given a pointer to an ACPI_RESOURCE structure, expand the ACPI_BUFFER
 2455  * provided to contain it.  If the ACPI_BUFFER is empty, allocate a sensible
 2456  * backing block.  If the ACPI_RESOURCE is NULL, return an empty set of
 2457  * resources.
 2458  */
 2459 #define ACPI_INITIAL_RESOURCE_BUFFER_SIZE       512
 2460 
 2461 ACPI_STATUS
 2462 acpi_AppendBufferResource(ACPI_BUFFER *buf, ACPI_RESOURCE *res)
 2463 {
 2464     ACPI_RESOURCE       *rp;
 2465     void                *newp;
 2466 
 2467     /* Initialise the buffer if necessary. */
 2468     if (buf->Pointer == NULL) {
 2469         buf->Length = ACPI_INITIAL_RESOURCE_BUFFER_SIZE;
 2470         if ((buf->Pointer = AcpiOsAllocate(buf->Length)) == NULL)
 2471             return (AE_NO_MEMORY);
 2472         rp = (ACPI_RESOURCE *)buf->Pointer;
 2473         rp->Type = ACPI_RESOURCE_TYPE_END_TAG;
 2474         rp->Length = ACPI_RS_SIZE_MIN;
 2475     }
 2476     if (res == NULL)
 2477         return (AE_OK);
 2478 
 2479     /*
 2480      * Scan the current buffer looking for the terminator.
 2481      * This will either find the terminator or hit the end
 2482      * of the buffer and return an error.
 2483      */
 2484     rp = (ACPI_RESOURCE *)buf->Pointer;
 2485     for (;;) {
 2486         /* Range check, don't go outside the buffer */
 2487         if (rp >= (ACPI_RESOURCE *)((u_int8_t *)buf->Pointer + buf->Length))
 2488             return (AE_BAD_PARAMETER);
 2489         if (rp->Type == ACPI_RESOURCE_TYPE_END_TAG || rp->Length == 0)
 2490             break;
 2491         rp = ACPI_NEXT_RESOURCE(rp);
 2492     }
 2493 
 2494     /*
 2495      * Check the size of the buffer and expand if required.
 2496      *
 2497      * Required size is:
 2498      *  size of existing resources before terminator + 
 2499      *  size of new resource and header +
 2500      *  size of terminator.
 2501      *
 2502      * Note that this loop should really only run once, unless
 2503      * for some reason we are stuffing a *really* huge resource.
 2504      */
 2505     while ((((u_int8_t *)rp - (u_int8_t *)buf->Pointer) + 
 2506             res->Length + ACPI_RS_SIZE_NO_DATA +
 2507             ACPI_RS_SIZE_MIN) >= buf->Length) {
 2508         if ((newp = AcpiOsAllocate(buf->Length * 2)) == NULL)
 2509             return (AE_NO_MEMORY);
 2510         bcopy(buf->Pointer, newp, buf->Length);
 2511         rp = (ACPI_RESOURCE *)((u_int8_t *)newp +
 2512                                ((u_int8_t *)rp - (u_int8_t *)buf->Pointer));
 2513         AcpiOsFree(buf->Pointer);
 2514         buf->Pointer = newp;
 2515         buf->Length += buf->Length;
 2516     }
 2517 
 2518     /* Insert the new resource. */
 2519     bcopy(res, rp, res->Length + ACPI_RS_SIZE_NO_DATA);
 2520 
 2521     /* And add the terminator. */
 2522     rp = ACPI_NEXT_RESOURCE(rp);
 2523     rp->Type = ACPI_RESOURCE_TYPE_END_TAG;
 2524     rp->Length = ACPI_RS_SIZE_MIN;
 2525 
 2526     return (AE_OK);
 2527 }
 2528 
 2529 ACPI_STATUS
 2530 acpi_EvaluateOSC(ACPI_HANDLE handle, uint8_t *uuid, int revision, int count,
 2531     uint32_t *caps_in, uint32_t *caps_out, bool query)
 2532 {
 2533         ACPI_OBJECT arg[4], *ret;
 2534         ACPI_OBJECT_LIST arglist;
 2535         ACPI_BUFFER buf;
 2536         ACPI_STATUS status;
 2537 
 2538         arglist.Pointer = arg;
 2539         arglist.Count = 4;
 2540         arg[0].Type = ACPI_TYPE_BUFFER;
 2541         arg[0].Buffer.Length = ACPI_UUID_LENGTH;
 2542         arg[0].Buffer.Pointer = uuid;
 2543         arg[1].Type = ACPI_TYPE_INTEGER;
 2544         arg[1].Integer.Value = revision;
 2545         arg[2].Type = ACPI_TYPE_INTEGER;
 2546         arg[2].Integer.Value = count;
 2547         arg[3].Type = ACPI_TYPE_BUFFER;
 2548         arg[3].Buffer.Length = count * sizeof(*caps_in);
 2549         arg[3].Buffer.Pointer = (uint8_t *)caps_in;
 2550         caps_in[0] = query ? 1 : 0;
 2551         buf.Pointer = NULL;
 2552         buf.Length = ACPI_ALLOCATE_BUFFER;
 2553         status = AcpiEvaluateObjectTyped(handle, "_OSC", &arglist, &buf,
 2554             ACPI_TYPE_BUFFER);
 2555         if (ACPI_FAILURE(status))
 2556                 return (status);
 2557         if (caps_out != NULL) {
 2558                 ret = buf.Pointer;
 2559                 if (ret->Buffer.Length != count * sizeof(*caps_out)) {
 2560                         AcpiOsFree(buf.Pointer);
 2561                         return (AE_BUFFER_OVERFLOW);
 2562                 }
 2563                 bcopy(ret->Buffer.Pointer, caps_out, ret->Buffer.Length);
 2564         }
 2565         AcpiOsFree(buf.Pointer);
 2566         return (status);
 2567 }
 2568 
 2569 /*
 2570  * Set interrupt model.
 2571  */
 2572 ACPI_STATUS
 2573 acpi_SetIntrModel(int model)
 2574 {
 2575 
 2576     return (acpi_SetInteger(ACPI_ROOT_OBJECT, "_PIC", model));
 2577 }
 2578 
 2579 /*
 2580  * Walk subtables of a table and call a callback routine for each
 2581  * subtable.  The caller should provide the first subtable and a
 2582  * pointer to the end of the table.  This can be used to walk tables
 2583  * such as MADT and SRAT that use subtable entries.
 2584  */
 2585 void
 2586 acpi_walk_subtables(void *first, void *end, acpi_subtable_handler *handler,
 2587     void *arg)
 2588 {
 2589     ACPI_SUBTABLE_HEADER *entry;
 2590 
 2591     for (entry = first; (void *)entry < end; ) {
 2592         /* Avoid an infinite loop if we hit a bogus entry. */
 2593         if (entry->Length < sizeof(ACPI_SUBTABLE_HEADER))
 2594             return;
 2595 
 2596         handler(entry, arg);
 2597         entry = ACPI_ADD_PTR(ACPI_SUBTABLE_HEADER, entry, entry->Length);
 2598     }
 2599 }
 2600 
 2601 /*
 2602  * DEPRECATED.  This interface has serious deficiencies and will be
 2603  * removed.
 2604  *
 2605  * Immediately enter the sleep state.  In the old model, acpiconf(8) ran
 2606  * rc.suspend and rc.resume so we don't have to notify devd(8) to do this.
 2607  */
 2608 ACPI_STATUS
 2609 acpi_SetSleepState(struct acpi_softc *sc, int state)
 2610 {
 2611     static int once;
 2612 
 2613     if (!once) {
 2614         device_printf(sc->acpi_dev,
 2615 "warning: acpi_SetSleepState() deprecated, need to update your software\n");
 2616         once = 1;
 2617     }
 2618     return (acpi_EnterSleepState(sc, state));
 2619 }
 2620 
 2621 #if defined(__amd64__) || defined(__i386__)
 2622 static void
 2623 acpi_sleep_force_task(void *context)
 2624 {
 2625     struct acpi_softc *sc = (struct acpi_softc *)context;
 2626 
 2627     if (ACPI_FAILURE(acpi_EnterSleepState(sc, sc->acpi_next_sstate)))
 2628         device_printf(sc->acpi_dev, "force sleep state S%d failed\n",
 2629             sc->acpi_next_sstate);
 2630 }
 2631 
 2632 static void
 2633 acpi_sleep_force(void *arg)
 2634 {
 2635     struct acpi_softc *sc = (struct acpi_softc *)arg;
 2636 
 2637     device_printf(sc->acpi_dev,
 2638         "suspend request timed out, forcing sleep now\n");
 2639     /*
 2640      * XXX Suspending from callout causes freezes in DEVICE_SUSPEND().
 2641      * Suspend from acpi_task thread instead.
 2642      */
 2643     if (ACPI_FAILURE(AcpiOsExecute(OSL_NOTIFY_HANDLER,
 2644         acpi_sleep_force_task, sc)))
 2645         device_printf(sc->acpi_dev, "AcpiOsExecute() for sleeping failed\n");
 2646 }
 2647 #endif
 2648 
 2649 /*
 2650  * Request that the system enter the given suspend state.  All /dev/apm
 2651  * devices and devd(8) will be notified.  Userland then has a chance to
 2652  * save state and acknowledge the request.  The system sleeps once all
 2653  * acks are in.
 2654  */
 2655 int
 2656 acpi_ReqSleepState(struct acpi_softc *sc, int state)
 2657 {
 2658 #if defined(__amd64__) || defined(__i386__)
 2659     struct apm_clone_data *clone;
 2660     ACPI_STATUS status;
 2661 
 2662     if (state < ACPI_STATE_S1 || state > ACPI_S_STATES_MAX)
 2663         return (EINVAL);
 2664     if (!acpi_sleep_states[state])
 2665         return (EOPNOTSUPP);
 2666 
 2667     /*
 2668      * If a reboot/shutdown/suspend request is already in progress or
 2669      * suspend is blocked due to an upcoming shutdown, just return.
 2670      */
 2671     if (rebooting || sc->acpi_next_sstate != 0 || suspend_blocked) {
 2672         return (0);
 2673     }
 2674 
 2675     /* Wait until sleep is enabled. */
 2676     while (sc->acpi_sleep_disabled) {
 2677         AcpiOsSleep(1000);
 2678     }
 2679 
 2680     ACPI_LOCK(acpi);
 2681 
 2682     sc->acpi_next_sstate = state;
 2683 
 2684     /* S5 (soft-off) should be entered directly with no waiting. */
 2685     if (state == ACPI_STATE_S5) {
 2686         ACPI_UNLOCK(acpi);
 2687         status = acpi_EnterSleepState(sc, state);
 2688         return (ACPI_SUCCESS(status) ? 0 : ENXIO);
 2689     }
 2690 
 2691     /* Record the pending state and notify all apm devices. */
 2692     STAILQ_FOREACH(clone, &sc->apm_cdevs, entries) {
 2693         clone->notify_status = APM_EV_NONE;
 2694         if ((clone->flags & ACPI_EVF_DEVD) == 0) {
 2695             selwakeuppri(&clone->sel_read, PZERO);
 2696             KNOTE_LOCKED(&clone->sel_read.si_note, 0);
 2697         }
 2698     }
 2699 
 2700     /* If devd(8) is not running, immediately enter the sleep state. */
 2701     if (!devctl_process_running()) {
 2702         ACPI_UNLOCK(acpi);
 2703         status = acpi_EnterSleepState(sc, state);
 2704         return (ACPI_SUCCESS(status) ? 0 : ENXIO);
 2705     }
 2706 
 2707     /*
 2708      * Set a timeout to fire if userland doesn't ack the suspend request
 2709      * in time.  This way we still eventually go to sleep if we were
 2710      * overheating or running low on battery, even if userland is hung.
 2711      * We cancel this timeout once all userland acks are in or the
 2712      * suspend request is aborted.
 2713      */
 2714     callout_reset(&sc->susp_force_to, 10 * hz, acpi_sleep_force, sc);
 2715     ACPI_UNLOCK(acpi);
 2716 
 2717     /* Now notify devd(8) also. */
 2718     acpi_UserNotify("Suspend", ACPI_ROOT_OBJECT, state);
 2719 
 2720     return (0);
 2721 #else
 2722     /* This platform does not support acpi suspend/resume. */
 2723     return (EOPNOTSUPP);
 2724 #endif
 2725 }
 2726 
 2727 /*
 2728  * Acknowledge (or reject) a pending sleep state.  The caller has
 2729  * prepared for suspend and is now ready for it to proceed.  If the
 2730  * error argument is non-zero, it indicates suspend should be cancelled
 2731  * and gives an errno value describing why.  Once all votes are in,
 2732  * we suspend the system.
 2733  */
 2734 int
 2735 acpi_AckSleepState(struct apm_clone_data *clone, int error)
 2736 {
 2737 #if defined(__amd64__) || defined(__i386__)
 2738     struct acpi_softc *sc;
 2739     int ret, sleeping;
 2740 
 2741     /* If no pending sleep state, return an error. */
 2742     ACPI_LOCK(acpi);
 2743     sc = clone->acpi_sc;
 2744     if (sc->acpi_next_sstate == 0) {
 2745         ACPI_UNLOCK(acpi);
 2746         return (ENXIO);
 2747     }
 2748 
 2749     /* Caller wants to abort suspend process. */
 2750     if (error) {
 2751         sc->acpi_next_sstate = 0;
 2752         callout_stop(&sc->susp_force_to);
 2753         device_printf(sc->acpi_dev,
 2754             "listener on %s cancelled the pending suspend\n",
 2755             devtoname(clone->cdev));
 2756         ACPI_UNLOCK(acpi);
 2757         return (0);
 2758     }
 2759 
 2760     /*
 2761      * Mark this device as acking the suspend request.  Then, walk through
 2762      * all devices, seeing if they agree yet.  We only count devices that
 2763      * are writable since read-only devices couldn't ack the request.
 2764      */
 2765     sleeping = TRUE;
 2766     clone->notify_status = APM_EV_ACKED;
 2767     STAILQ_FOREACH(clone, &sc->apm_cdevs, entries) {
 2768         if ((clone->flags & ACPI_EVF_WRITE) != 0 &&
 2769             clone->notify_status != APM_EV_ACKED) {
 2770             sleeping = FALSE;
 2771             break;
 2772         }
 2773     }
 2774 
 2775     /* If all devices have voted "yes", we will suspend now. */
 2776     if (sleeping)
 2777         callout_stop(&sc->susp_force_to);
 2778     ACPI_UNLOCK(acpi);
 2779     ret = 0;
 2780     if (sleeping) {
 2781         if (ACPI_FAILURE(acpi_EnterSleepState(sc, sc->acpi_next_sstate)))
 2782                 ret = ENODEV;
 2783     }
 2784     return (ret);
 2785 #else
 2786     /* This platform does not support acpi suspend/resume. */
 2787     return (EOPNOTSUPP);
 2788 #endif
 2789 }
 2790 
 2791 static void
 2792 acpi_sleep_enable(void *arg)
 2793 {
 2794     struct acpi_softc   *sc = (struct acpi_softc *)arg;
 2795 
 2796     ACPI_LOCK_ASSERT(acpi);
 2797 
 2798     /* Reschedule if the system is not fully up and running. */
 2799     if (!AcpiGbl_SystemAwakeAndRunning) {
 2800         callout_schedule(&acpi_sleep_timer, hz * ACPI_MINIMUM_AWAKETIME);
 2801         return;
 2802     }
 2803 
 2804     sc->acpi_sleep_disabled = FALSE;
 2805 }
 2806 
 2807 static ACPI_STATUS
 2808 acpi_sleep_disable(struct acpi_softc *sc)
 2809 {
 2810     ACPI_STATUS         status;
 2811 
 2812     /* Fail if the system is not fully up and running. */
 2813     if (!AcpiGbl_SystemAwakeAndRunning)
 2814         return (AE_ERROR);
 2815 
 2816     ACPI_LOCK(acpi);
 2817     status = sc->acpi_sleep_disabled ? AE_ERROR : AE_OK;
 2818     sc->acpi_sleep_disabled = TRUE;
 2819     ACPI_UNLOCK(acpi);
 2820 
 2821     return (status);
 2822 }
 2823 
 2824 enum acpi_sleep_state {
 2825     ACPI_SS_NONE,
 2826     ACPI_SS_GPE_SET,
 2827     ACPI_SS_DEV_SUSPEND,
 2828     ACPI_SS_SLP_PREP,
 2829     ACPI_SS_SLEPT,
 2830 };
 2831 
 2832 /*
 2833  * Enter the desired system sleep state.
 2834  *
 2835  * Currently we support S1-S5 but S4 is only S4BIOS
 2836  */
 2837 static ACPI_STATUS
 2838 acpi_EnterSleepState(struct acpi_softc *sc, int state)
 2839 {
 2840     register_t intr;
 2841     ACPI_STATUS status;
 2842     ACPI_EVENT_STATUS power_button_status;
 2843     enum acpi_sleep_state slp_state;
 2844     int sleep_result;
 2845 
 2846     ACPI_FUNCTION_TRACE_U32((char *)(uintptr_t)__func__, state);
 2847 
 2848     if (state < ACPI_STATE_S1 || state > ACPI_S_STATES_MAX)
 2849         return_ACPI_STATUS (AE_BAD_PARAMETER);
 2850     if (!acpi_sleep_states[state]) {
 2851         device_printf(sc->acpi_dev, "Sleep state S%d not supported by BIOS\n",
 2852             state);
 2853         return (AE_SUPPORT);
 2854     }
 2855 
 2856     /* Re-entry once we're suspending is not allowed. */
 2857     status = acpi_sleep_disable(sc);
 2858     if (ACPI_FAILURE(status)) {
 2859         device_printf(sc->acpi_dev,
 2860             "suspend request ignored (not ready yet)\n");
 2861         return (status);
 2862     }
 2863 
 2864     if (state == ACPI_STATE_S5) {
 2865         /*
 2866          * Shut down cleanly and power off.  This will call us back through the
 2867          * shutdown handlers.
 2868          */
 2869         shutdown_nice(RB_POWEROFF);
 2870         return_ACPI_STATUS (AE_OK);
 2871     }
 2872 
 2873     EVENTHANDLER_INVOKE(power_suspend_early);
 2874     stop_all_proc();
 2875     EVENTHANDLER_INVOKE(power_suspend);
 2876 
 2877 #ifdef EARLY_AP_STARTUP
 2878     MPASS(mp_ncpus == 1 || smp_started);
 2879     thread_lock(curthread);
 2880     sched_bind(curthread, 0);
 2881     thread_unlock(curthread);
 2882 #else
 2883     if (smp_started) {
 2884         thread_lock(curthread);
 2885         sched_bind(curthread, 0);
 2886         thread_unlock(curthread);
 2887     }
 2888 #endif
 2889 
 2890     /*
 2891      * Be sure to hold Giant across DEVICE_SUSPEND/RESUME since non-MPSAFE
 2892      * drivers need this.
 2893      */
 2894     mtx_lock(&Giant);
 2895 
 2896     slp_state = ACPI_SS_NONE;
 2897 
 2898     sc->acpi_sstate = state;
 2899 
 2900     /* Enable any GPEs as appropriate and requested by the user. */
 2901     acpi_wake_prep_walk(state);
 2902     slp_state = ACPI_SS_GPE_SET;
 2903 
 2904     /*
 2905      * Inform all devices that we are going to sleep.  If at least one
 2906      * device fails, DEVICE_SUSPEND() automatically resumes the tree.
 2907      *
 2908      * XXX Note that a better two-pass approach with a 'veto' pass
 2909      * followed by a "real thing" pass would be better, but the current
 2910      * bus interface does not provide for this.
 2911      */
 2912     if (DEVICE_SUSPEND(root_bus) != 0) {
 2913         device_printf(sc->acpi_dev, "device_suspend failed\n");
 2914         goto backout;
 2915     }
 2916     slp_state = ACPI_SS_DEV_SUSPEND;
 2917 
 2918     status = AcpiEnterSleepStatePrep(state);
 2919     if (ACPI_FAILURE(status)) {
 2920         device_printf(sc->acpi_dev, "AcpiEnterSleepStatePrep failed - %s\n",
 2921                       AcpiFormatException(status));
 2922         goto backout;
 2923     }
 2924     slp_state = ACPI_SS_SLP_PREP;
 2925 
 2926     if (sc->acpi_sleep_delay > 0)
 2927         DELAY(sc->acpi_sleep_delay * 1000000);
 2928 
 2929     suspendclock();
 2930     intr = intr_disable();
 2931     if (state != ACPI_STATE_S1) {
 2932         sleep_result = acpi_sleep_machdep(sc, state);
 2933         acpi_wakeup_machdep(sc, state, sleep_result, 0);
 2934 
 2935         /*
 2936          * XXX According to ACPI specification SCI_EN bit should be restored
 2937          * by ACPI platform (BIOS, firmware) to its pre-sleep state.
 2938          * Unfortunately some BIOSes fail to do that and that leads to
 2939          * unexpected and serious consequences during wake up like a system
 2940          * getting stuck in SMI handlers.
 2941          * This hack is picked up from Linux, which claims that it follows
 2942          * Windows behavior.
 2943          */
 2944         if (sleep_result == 1 && state != ACPI_STATE_S4)
 2945             AcpiWriteBitRegister(ACPI_BITREG_SCI_ENABLE, ACPI_ENABLE_EVENT);
 2946 
 2947         if (sleep_result == 1 && state == ACPI_STATE_S3) {
 2948             /*
 2949              * Prevent mis-interpretation of the wakeup by power button
 2950              * as a request for power off.
 2951              * Ideally we should post an appropriate wakeup event,
 2952              * perhaps using acpi_event_power_button_wake or alike.
 2953              *
 2954              * Clearing of power button status after wakeup is mandated
 2955              * by ACPI specification in section "Fixed Power Button".
 2956              *
 2957              * XXX As of ACPICA 20121114 AcpiGetEventStatus provides
 2958              * status as 0/1 corressponding to inactive/active despite
 2959              * its type being ACPI_EVENT_STATUS.  In other words,
 2960              * we should not test for ACPI_EVENT_FLAG_SET for time being.
 2961              */
 2962             if (ACPI_SUCCESS(AcpiGetEventStatus(ACPI_EVENT_POWER_BUTTON,
 2963                 &power_button_status)) && power_button_status != 0) {
 2964                 AcpiClearEvent(ACPI_EVENT_POWER_BUTTON);
 2965                 device_printf(sc->acpi_dev,
 2966                     "cleared fixed power button status\n");
 2967             }
 2968         }
 2969 
 2970         intr_restore(intr);
 2971 
 2972         /* call acpi_wakeup_machdep() again with interrupt enabled */
 2973         acpi_wakeup_machdep(sc, state, sleep_result, 1);
 2974 
 2975         AcpiLeaveSleepStatePrep(state);
 2976 
 2977         if (sleep_result == -1)
 2978                 goto backout;
 2979 
 2980         /* Re-enable ACPI hardware on wakeup from sleep state 4. */
 2981         if (state == ACPI_STATE_S4)
 2982             AcpiEnable();
 2983     } else {
 2984         status = AcpiEnterSleepState(state);
 2985         intr_restore(intr);
 2986         AcpiLeaveSleepStatePrep(state);
 2987         if (ACPI_FAILURE(status)) {
 2988             device_printf(sc->acpi_dev, "AcpiEnterSleepState failed - %s\n",
 2989                           AcpiFormatException(status));
 2990             goto backout;
 2991         }
 2992     }
 2993     slp_state = ACPI_SS_SLEPT;
 2994 
 2995     /*
 2996      * Back out state according to how far along we got in the suspend
 2997      * process.  This handles both the error and success cases.
 2998      */
 2999 backout:
 3000     if (slp_state >= ACPI_SS_SLP_PREP)
 3001         resumeclock();
 3002     if (slp_state >= ACPI_SS_GPE_SET) {
 3003         acpi_wake_prep_walk(state);
 3004         sc->acpi_sstate = ACPI_STATE_S0;
 3005     }
 3006     if (slp_state >= ACPI_SS_DEV_SUSPEND)
 3007         DEVICE_RESUME(root_bus);
 3008     if (slp_state >= ACPI_SS_SLP_PREP)
 3009         AcpiLeaveSleepState(state);
 3010     if (slp_state >= ACPI_SS_SLEPT) {
 3011 #if defined(__i386__) || defined(__amd64__)
 3012         /* NB: we are still using ACPI timecounter at this point. */
 3013         resume_TSC();
 3014 #endif
 3015         acpi_resync_clock(sc);
 3016         acpi_enable_fixed_events(sc);
 3017     }
 3018     sc->acpi_next_sstate = 0;
 3019 
 3020     mtx_unlock(&Giant);
 3021 
 3022 #ifdef EARLY_AP_STARTUP
 3023     thread_lock(curthread);
 3024     sched_unbind(curthread);
 3025     thread_unlock(curthread);
 3026 #else
 3027     if (smp_started) {
 3028         thread_lock(curthread);
 3029         sched_unbind(curthread);
 3030         thread_unlock(curthread);
 3031     }
 3032 #endif
 3033 
 3034     resume_all_proc();
 3035 
 3036     EVENTHANDLER_INVOKE(power_resume);
 3037 
 3038     /* Allow another sleep request after a while. */
 3039     callout_schedule(&acpi_sleep_timer, hz * ACPI_MINIMUM_AWAKETIME);
 3040 
 3041     /* Run /etc/rc.resume after we are back. */
 3042     if (devctl_process_running())
 3043         acpi_UserNotify("Resume", ACPI_ROOT_OBJECT, state);
 3044 
 3045     return_ACPI_STATUS (status);
 3046 }
 3047 
 3048 static void
 3049 acpi_resync_clock(struct acpi_softc *sc)
 3050 {
 3051 #ifdef __amd64__
 3052     if (!acpi_reset_clock)
 3053         return;
 3054 
 3055     /*
 3056      * Warm up timecounter again and reset system clock.
 3057      */
 3058     (void)timecounter->tc_get_timecount(timecounter);
 3059     (void)timecounter->tc_get_timecount(timecounter);
 3060     inittodr(time_second + sc->acpi_sleep_delay);
 3061 #endif
 3062 }
 3063 
 3064 /* Enable or disable the device's wake GPE. */
 3065 int
 3066 acpi_wake_set_enable(device_t dev, int enable)
 3067 {
 3068     struct acpi_prw_data prw;
 3069     ACPI_STATUS status;
 3070     int flags;
 3071 
 3072     /* Make sure the device supports waking the system and get the GPE. */
 3073     if (acpi_parse_prw(acpi_get_handle(dev), &prw) != 0)
 3074         return (ENXIO);
 3075 
 3076     flags = acpi_get_flags(dev);
 3077     if (enable) {
 3078         status = AcpiSetGpeWakeMask(prw.gpe_handle, prw.gpe_bit,
 3079             ACPI_GPE_ENABLE);
 3080         if (ACPI_FAILURE(status)) {
 3081             device_printf(dev, "enable wake failed\n");
 3082             return (ENXIO);
 3083         }
 3084         acpi_set_flags(dev, flags | ACPI_FLAG_WAKE_ENABLED);
 3085     } else {
 3086         status = AcpiSetGpeWakeMask(prw.gpe_handle, prw.gpe_bit,
 3087             ACPI_GPE_DISABLE);
 3088         if (ACPI_FAILURE(status)) {
 3089             device_printf(dev, "disable wake failed\n");
 3090             return (ENXIO);
 3091         }
 3092         acpi_set_flags(dev, flags & ~ACPI_FLAG_WAKE_ENABLED);
 3093     }
 3094 
 3095     return (0);
 3096 }
 3097 
 3098 static int
 3099 acpi_wake_sleep_prep(ACPI_HANDLE handle, int sstate)
 3100 {
 3101     struct acpi_prw_data prw;
 3102     device_t dev;
 3103 
 3104     /* Check that this is a wake-capable device and get its GPE. */
 3105     if (acpi_parse_prw(handle, &prw) != 0)
 3106         return (ENXIO);
 3107     dev = acpi_get_device(handle);
 3108 
 3109     /*
 3110      * The destination sleep state must be less than (i.e., higher power)
 3111      * or equal to the value specified by _PRW.  If this GPE cannot be
 3112      * enabled for the next sleep state, then disable it.  If it can and
 3113      * the user requested it be enabled, turn on any required power resources
 3114      * and set _PSW.
 3115      */
 3116     if (sstate > prw.lowest_wake) {
 3117         AcpiSetGpeWakeMask(prw.gpe_handle, prw.gpe_bit, ACPI_GPE_DISABLE);
 3118         if (bootverbose)
 3119             device_printf(dev, "wake_prep disabled wake for %s (S%d)\n",
 3120                 acpi_name(handle), sstate);
 3121     } else if (dev && (acpi_get_flags(dev) & ACPI_FLAG_WAKE_ENABLED) != 0) {
 3122         acpi_pwr_wake_enable(handle, 1);
 3123         acpi_SetInteger(handle, "_PSW", 1);
 3124         if (bootverbose)
 3125             device_printf(dev, "wake_prep enabled for %s (S%d)\n",
 3126                 acpi_name(handle), sstate);
 3127     }
 3128 
 3129     return (0);
 3130 }
 3131 
 3132 static int
 3133 acpi_wake_run_prep(ACPI_HANDLE handle, int sstate)
 3134 {
 3135     struct acpi_prw_data prw;
 3136     device_t dev;
 3137 
 3138     /*
 3139      * Check that this is a wake-capable device and get its GPE.  Return
 3140      * now if the user didn't enable this device for wake.
 3141      */
 3142     if (acpi_parse_prw(handle, &prw) != 0)
 3143         return (ENXIO);
 3144     dev = acpi_get_device(handle);
 3145     if (dev == NULL || (acpi_get_flags(dev) & ACPI_FLAG_WAKE_ENABLED) == 0)
 3146         return (0);
 3147 
 3148     /*
 3149      * If this GPE couldn't be enabled for the previous sleep state, it was
 3150      * disabled before going to sleep so re-enable it.  If it was enabled,
 3151      * clear _PSW and turn off any power resources it used.
 3152      */
 3153     if (sstate > prw.lowest_wake) {
 3154         AcpiSetGpeWakeMask(prw.gpe_handle, prw.gpe_bit, ACPI_GPE_ENABLE);
 3155         if (bootverbose)
 3156             device_printf(dev, "run_prep re-enabled %s\n", acpi_name(handle));
 3157     } else {
 3158         acpi_SetInteger(handle, "_PSW", 0);
 3159         acpi_pwr_wake_enable(handle, 0);
 3160         if (bootverbose)
 3161             device_printf(dev, "run_prep cleaned up for %s\n",
 3162                 acpi_name(handle));
 3163     }
 3164 
 3165     return (0);
 3166 }
 3167 
 3168 static ACPI_STATUS
 3169 acpi_wake_prep(ACPI_HANDLE handle, UINT32 level, void *context, void **status)
 3170 {
 3171     int sstate;
 3172 
 3173     /* If suspending, run the sleep prep function, otherwise wake. */
 3174     sstate = *(int *)context;
 3175     if (AcpiGbl_SystemAwakeAndRunning)
 3176         acpi_wake_sleep_prep(handle, sstate);
 3177     else
 3178         acpi_wake_run_prep(handle, sstate);
 3179     return (AE_OK);
 3180 }
 3181 
 3182 /* Walk the tree rooted at acpi0 to prep devices for suspend/resume. */
 3183 static int
 3184 acpi_wake_prep_walk(int sstate)
 3185 {
 3186     ACPI_HANDLE sb_handle;
 3187 
 3188     if (ACPI_SUCCESS(AcpiGetHandle(ACPI_ROOT_OBJECT, "\\_SB_", &sb_handle)))
 3189         AcpiWalkNamespace(ACPI_TYPE_DEVICE, sb_handle, 100,
 3190             acpi_wake_prep, NULL, &sstate, NULL);
 3191     return (0);
 3192 }
 3193 
 3194 /* Walk the tree rooted at acpi0 to attach per-device wake sysctls. */
 3195 static int
 3196 acpi_wake_sysctl_walk(device_t dev)
 3197 {
 3198     int error, i, numdevs;
 3199     device_t *devlist;
 3200     device_t child;
 3201     ACPI_STATUS status;
 3202 
 3203     error = device_get_children(dev, &devlist, &numdevs);
 3204     if (error != 0 || numdevs == 0) {
 3205         if (numdevs == 0)
 3206             free(devlist, M_TEMP);
 3207         return (error);
 3208     }
 3209     for (i = 0; i < numdevs; i++) {
 3210         child = devlist[i];
 3211         acpi_wake_sysctl_walk(child);
 3212         if (!device_is_attached(child))
 3213             continue;
 3214         status = AcpiEvaluateObject(acpi_get_handle(child), "_PRW", NULL, NULL);
 3215         if (ACPI_SUCCESS(status)) {
 3216             SYSCTL_ADD_PROC(device_get_sysctl_ctx(child),
 3217                 SYSCTL_CHILDREN(device_get_sysctl_tree(child)), OID_AUTO,
 3218                 "wake", CTLTYPE_INT | CTLFLAG_RW, child, 0,
 3219                 acpi_wake_set_sysctl, "I", "Device set to wake the system");
 3220         }
 3221     }
 3222     free(devlist, M_TEMP);
 3223 
 3224     return (0);
 3225 }
 3226 
 3227 /* Enable or disable wake from userland. */
 3228 static int
 3229 acpi_wake_set_sysctl(SYSCTL_HANDLER_ARGS)
 3230 {
 3231     int enable, error;
 3232     device_t dev;
 3233 
 3234     dev = (device_t)arg1;
 3235     enable = (acpi_get_flags(dev) & ACPI_FLAG_WAKE_ENABLED) ? 1 : 0;
 3236 
 3237     error = sysctl_handle_int(oidp, &enable, 0, req);
 3238     if (error != 0 || req->newptr == NULL)
 3239         return (error);
 3240     if (enable != 0 && enable != 1)
 3241         return (EINVAL);
 3242 
 3243     return (acpi_wake_set_enable(dev, enable));
 3244 }
 3245 
 3246 /* Parse a device's _PRW into a structure. */
 3247 int
 3248 acpi_parse_prw(ACPI_HANDLE h, struct acpi_prw_data *prw)
 3249 {
 3250     ACPI_STATUS                 status;
 3251     ACPI_BUFFER                 prw_buffer;
 3252     ACPI_OBJECT                 *res, *res2;
 3253     int                         error, i, power_count;
 3254 
 3255     if (h == NULL || prw == NULL)
 3256         return (EINVAL);
 3257 
 3258     /*
 3259      * The _PRW object (7.2.9) is only required for devices that have the
 3260      * ability to wake the system from a sleeping state.
 3261      */
 3262     error = EINVAL;
 3263     prw_buffer.Pointer = NULL;
 3264     prw_buffer.Length = ACPI_ALLOCATE_BUFFER;
 3265     status = AcpiEvaluateObject(h, "_PRW", NULL, &prw_buffer);
 3266     if (ACPI_FAILURE(status))
 3267         return (ENOENT);
 3268     res = (ACPI_OBJECT *)prw_buffer.Pointer;
 3269     if (res == NULL)
 3270         return (ENOENT);
 3271     if (!ACPI_PKG_VALID(res, 2))
 3272         goto out;
 3273 
 3274     /*
 3275      * Element 1 of the _PRW object:
 3276      * The lowest power system sleeping state that can be entered while still
 3277      * providing wake functionality.  The sleeping state being entered must
 3278      * be less than (i.e., higher power) or equal to this value.
 3279      */
 3280     if (acpi_PkgInt32(res, 1, &prw->lowest_wake) != 0)
 3281         goto out;
 3282 
 3283     /*
 3284      * Element 0 of the _PRW object:
 3285      */
 3286     switch (res->Package.Elements[0].Type) {
 3287     case ACPI_TYPE_INTEGER:
 3288         /*
 3289          * If the data type of this package element is numeric, then this
 3290          * _PRW package element is the bit index in the GPEx_EN, in the
 3291          * GPE blocks described in the FADT, of the enable bit that is
 3292          * enabled for the wake event.
 3293          */
 3294         prw->gpe_handle = NULL;
 3295         prw->gpe_bit = res->Package.Elements[0].Integer.Value;
 3296         error = 0;
 3297         break;
 3298     case ACPI_TYPE_PACKAGE:
 3299         /*
 3300          * If the data type of this package element is a package, then this
 3301          * _PRW package element is itself a package containing two
 3302          * elements.  The first is an object reference to the GPE Block
 3303          * device that contains the GPE that will be triggered by the wake
 3304          * event.  The second element is numeric and it contains the bit
 3305          * index in the GPEx_EN, in the GPE Block referenced by the
 3306          * first element in the package, of the enable bit that is enabled for
 3307          * the wake event.
 3308          *
 3309          * For example, if this field is a package then it is of the form:
 3310          * Package() {\_SB.PCI0.ISA.GPE, 2}
 3311          */
 3312         res2 = &res->Package.Elements[0];
 3313         if (!ACPI_PKG_VALID(res2, 2))
 3314             goto out;
 3315         prw->gpe_handle = acpi_GetReference(NULL, &res2->Package.Elements[0]);
 3316         if (prw->gpe_handle == NULL)
 3317             goto out;
 3318         if (acpi_PkgInt32(res2, 1, &prw->gpe_bit) != 0)
 3319             goto out;
 3320         error = 0;
 3321         break;
 3322     default:
 3323         goto out;
 3324     }
 3325 
 3326     /* Elements 2 to N of the _PRW object are power resources. */
 3327     power_count = res->Package.Count - 2;
 3328     if (power_count > ACPI_PRW_MAX_POWERRES) {
 3329         printf("ACPI device %s has too many power resources\n", acpi_name(h));
 3330         power_count = 0;
 3331     }
 3332     prw->power_res_count = power_count;
 3333     for (i = 0; i < power_count; i++)
 3334         prw->power_res[i] = res->Package.Elements[i];
 3335 
 3336 out:
 3337     if (prw_buffer.Pointer != NULL)
 3338         AcpiOsFree(prw_buffer.Pointer);
 3339     return (error);
 3340 }
 3341 
 3342 /*
 3343  * ACPI Event Handlers
 3344  */
 3345 
 3346 /* System Event Handlers (registered by EVENTHANDLER_REGISTER) */
 3347 
 3348 static void
 3349 acpi_system_eventhandler_sleep(void *arg, int state)
 3350 {
 3351     struct acpi_softc *sc = (struct acpi_softc *)arg;
 3352     int ret;
 3353 
 3354     ACPI_FUNCTION_TRACE_U32((char *)(uintptr_t)__func__, state);
 3355 
 3356     /* Check if button action is disabled or unknown. */
 3357     if (state == ACPI_STATE_UNKNOWN)
 3358         return;
 3359 
 3360     /* Request that the system prepare to enter the given suspend state. */
 3361     ret = acpi_ReqSleepState(sc, state);
 3362     if (ret != 0)
 3363         device_printf(sc->acpi_dev,
 3364             "request to enter state S%d failed (err %d)\n", state, ret);
 3365 
 3366     return_VOID;
 3367 }
 3368 
 3369 static void
 3370 acpi_system_eventhandler_wakeup(void *arg, int state)
 3371 {
 3372 
 3373     ACPI_FUNCTION_TRACE_U32((char *)(uintptr_t)__func__, state);
 3374 
 3375     /* Currently, nothing to do for wakeup. */
 3376 
 3377     return_VOID;
 3378 }
 3379 
 3380 /* 
 3381  * ACPICA Event Handlers (FixedEvent, also called from button notify handler)
 3382  */
 3383 static void
 3384 acpi_invoke_sleep_eventhandler(void *context)
 3385 {
 3386 
 3387     EVENTHANDLER_INVOKE(acpi_sleep_event, *(int *)context);
 3388 }
 3389 
 3390 static void
 3391 acpi_invoke_wake_eventhandler(void *context)
 3392 {
 3393 
 3394     EVENTHANDLER_INVOKE(acpi_wakeup_event, *(int *)context);
 3395 }
 3396 
 3397 UINT32
 3398 acpi_event_power_button_sleep(void *context)
 3399 {
 3400     struct acpi_softc   *sc = (struct acpi_softc *)context;
 3401 
 3402     ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
 3403 
 3404     if (ACPI_FAILURE(AcpiOsExecute(OSL_NOTIFY_HANDLER,
 3405         acpi_invoke_sleep_eventhandler, &sc->acpi_power_button_sx)))
 3406         return_VALUE (ACPI_INTERRUPT_NOT_HANDLED);
 3407     return_VALUE (ACPI_INTERRUPT_HANDLED);
 3408 }
 3409 
 3410 UINT32
 3411 acpi_event_power_button_wake(void *context)
 3412 {
 3413     struct acpi_softc   *sc = (struct acpi_softc *)context;
 3414 
 3415     ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
 3416 
 3417     if (ACPI_FAILURE(AcpiOsExecute(OSL_NOTIFY_HANDLER,
 3418         acpi_invoke_wake_eventhandler, &sc->acpi_power_button_sx)))
 3419         return_VALUE (ACPI_INTERRUPT_NOT_HANDLED);
 3420     return_VALUE (ACPI_INTERRUPT_HANDLED);
 3421 }
 3422 
 3423 UINT32
 3424 acpi_event_sleep_button_sleep(void *context)
 3425 {
 3426     struct acpi_softc   *sc = (struct acpi_softc *)context;
 3427 
 3428     ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
 3429 
 3430     if (ACPI_FAILURE(AcpiOsExecute(OSL_NOTIFY_HANDLER,
 3431         acpi_invoke_sleep_eventhandler, &sc->acpi_sleep_button_sx)))
 3432         return_VALUE (ACPI_INTERRUPT_NOT_HANDLED);
 3433     return_VALUE (ACPI_INTERRUPT_HANDLED);
 3434 }
 3435 
 3436 UINT32
 3437 acpi_event_sleep_button_wake(void *context)
 3438 {
 3439     struct acpi_softc   *sc = (struct acpi_softc *)context;
 3440 
 3441     ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
 3442 
 3443     if (ACPI_FAILURE(AcpiOsExecute(OSL_NOTIFY_HANDLER,
 3444         acpi_invoke_wake_eventhandler, &sc->acpi_sleep_button_sx)))
 3445         return_VALUE (ACPI_INTERRUPT_NOT_HANDLED);
 3446     return_VALUE (ACPI_INTERRUPT_HANDLED);
 3447 }
 3448 
 3449 /*
 3450  * XXX This static buffer is suboptimal.  There is no locking so only
 3451  * use this for single-threaded callers.
 3452  */
 3453 char *
 3454 acpi_name(ACPI_HANDLE handle)
 3455 {
 3456     ACPI_BUFFER buf;
 3457     static char data[256];
 3458 
 3459     buf.Length = sizeof(data);
 3460     buf.Pointer = data;
 3461 
 3462     if (handle && ACPI_SUCCESS(AcpiGetName(handle, ACPI_FULL_PATHNAME, &buf)))
 3463         return (data);
 3464     return ("(unknown)");
 3465 }
 3466 
 3467 /*
 3468  * Debugging/bug-avoidance.  Avoid trying to fetch info on various
 3469  * parts of the namespace.
 3470  */
 3471 int
 3472 acpi_avoid(ACPI_HANDLE handle)
 3473 {
 3474     char        *cp, *env, *np;
 3475     int         len;
 3476 
 3477     np = acpi_name(handle);
 3478     if (*np == '\\')
 3479         np++;
 3480     if ((env = kern_getenv("debug.acpi.avoid")) == NULL)
 3481         return (0);
 3482 
 3483     /* Scan the avoid list checking for a match */
 3484     cp = env;
 3485     for (;;) {
 3486         while (*cp != 0 && isspace(*cp))
 3487             cp++;
 3488         if (*cp == 0)
 3489             break;
 3490         len = 0;
 3491         while (cp[len] != 0 && !isspace(cp[len]))
 3492             len++;
 3493         if (!strncmp(cp, np, len)) {
 3494             freeenv(env);
 3495             return(1);
 3496         }
 3497         cp += len;
 3498     }
 3499     freeenv(env);
 3500 
 3501     return (0);
 3502 }
 3503 
 3504 /*
 3505  * Debugging/bug-avoidance.  Disable ACPI subsystem components.
 3506  */
 3507 int
 3508 acpi_disabled(char *subsys)
 3509 {
 3510     char        *cp, *env;
 3511     int         len;
 3512 
 3513     if ((env = kern_getenv("debug.acpi.disabled")) == NULL)
 3514         return (0);
 3515     if (strcmp(env, "all") == 0) {
 3516         freeenv(env);
 3517         return (1);
 3518     }
 3519 
 3520     /* Scan the disable list, checking for a match. */
 3521     cp = env;
 3522     for (;;) {
 3523         while (*cp != '\0' && isspace(*cp))
 3524             cp++;
 3525         if (*cp == '\0')
 3526             break;
 3527         len = 0;
 3528         while (cp[len] != '\0' && !isspace(cp[len]))
 3529             len++;
 3530         if (strncmp(cp, subsys, len) == 0) {
 3531             freeenv(env);
 3532             return (1);
 3533         }
 3534         cp += len;
 3535     }
 3536     freeenv(env);
 3537 
 3538     return (0);
 3539 }
 3540 
 3541 static void
 3542 acpi_lookup(void *arg, const char *name, device_t *dev)
 3543 {
 3544     ACPI_HANDLE handle;
 3545 
 3546     if (*dev != NULL)
 3547         return;
 3548 
 3549     /*
 3550      * Allow any handle name that is specified as an absolute path and
 3551      * starts with '\'.  We could restrict this to \_SB and friends,
 3552      * but see acpi_probe_children() for notes on why we scan the entire
 3553      * namespace for devices.
 3554      *
 3555      * XXX: The pathname argument to AcpiGetHandle() should be fixed to
 3556      * be const.
 3557      */
 3558     if (name[0] != '\\')
 3559         return;
 3560     if (ACPI_FAILURE(AcpiGetHandle(ACPI_ROOT_OBJECT, __DECONST(char *, name),
 3561         &handle)))
 3562         return;
 3563     *dev = acpi_get_device(handle);
 3564 }
 3565 
 3566 /*
 3567  * Control interface.
 3568  *
 3569  * We multiplex ioctls for all participating ACPI devices here.  Individual 
 3570  * drivers wanting to be accessible via /dev/acpi should use the
 3571  * register/deregister interface to make their handlers visible.
 3572  */
 3573 struct acpi_ioctl_hook
 3574 {
 3575     TAILQ_ENTRY(acpi_ioctl_hook) link;
 3576     u_long                       cmd;
 3577     acpi_ioctl_fn                fn;
 3578     void                         *arg;
 3579 };
 3580 
 3581 static TAILQ_HEAD(,acpi_ioctl_hook)     acpi_ioctl_hooks;
 3582 static int                              acpi_ioctl_hooks_initted;
 3583 
 3584 int
 3585 acpi_register_ioctl(u_long cmd, acpi_ioctl_fn fn, void *arg)
 3586 {
 3587     struct acpi_ioctl_hook      *hp;
 3588 
 3589     if ((hp = malloc(sizeof(*hp), M_ACPIDEV, M_NOWAIT)) == NULL)
 3590         return (ENOMEM);
 3591     hp->cmd = cmd;
 3592     hp->fn = fn;
 3593     hp->arg = arg;
 3594 
 3595     ACPI_LOCK(acpi);
 3596     if (acpi_ioctl_hooks_initted == 0) {
 3597         TAILQ_INIT(&acpi_ioctl_hooks);
 3598         acpi_ioctl_hooks_initted = 1;
 3599     }
 3600     TAILQ_INSERT_TAIL(&acpi_ioctl_hooks, hp, link);
 3601     ACPI_UNLOCK(acpi);
 3602 
 3603     return (0);
 3604 }
 3605 
 3606 void
 3607 acpi_deregister_ioctl(u_long cmd, acpi_ioctl_fn fn)
 3608 {
 3609     struct acpi_ioctl_hook      *hp;
 3610 
 3611     ACPI_LOCK(acpi);
 3612     TAILQ_FOREACH(hp, &acpi_ioctl_hooks, link)
 3613         if (hp->cmd == cmd && hp->fn == fn)
 3614             break;
 3615 
 3616     if (hp != NULL) {
 3617         TAILQ_REMOVE(&acpi_ioctl_hooks, hp, link);
 3618         free(hp, M_ACPIDEV);
 3619     }
 3620     ACPI_UNLOCK(acpi);
 3621 }
 3622 
 3623 static int
 3624 acpiopen(struct cdev *dev, int flag, int fmt, struct thread *td)
 3625 {
 3626     return (0);
 3627 }
 3628 
 3629 static int
 3630 acpiclose(struct cdev *dev, int flag, int fmt, struct thread *td)
 3631 {
 3632     return (0);
 3633 }
 3634 
 3635 static int
 3636 acpiioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td)
 3637 {
 3638     struct acpi_softc           *sc;
 3639     struct acpi_ioctl_hook      *hp;
 3640     int                         error, state;
 3641 
 3642     error = 0;
 3643     hp = NULL;
 3644     sc = dev->si_drv1;
 3645 
 3646     /*
 3647      * Scan the list of registered ioctls, looking for handlers.
 3648      */
 3649     ACPI_LOCK(acpi);
 3650     if (acpi_ioctl_hooks_initted)
 3651         TAILQ_FOREACH(hp, &acpi_ioctl_hooks, link) {
 3652             if (hp->cmd == cmd)
 3653                 break;
 3654         }
 3655     ACPI_UNLOCK(acpi);
 3656     if (hp)
 3657         return (hp->fn(cmd, addr, hp->arg));
 3658 
 3659     /*
 3660      * Core ioctls are not permitted for non-writable user.
 3661      * Currently, other ioctls just fetch information.
 3662      * Not changing system behavior.
 3663      */
 3664     if ((flag & FWRITE) == 0)
 3665         return (EPERM);
 3666 
 3667     /* Core system ioctls. */
 3668     switch (cmd) {
 3669     case ACPIIO_REQSLPSTATE:
 3670         state = *(int *)addr;
 3671         if (state != ACPI_STATE_S5)
 3672             return (acpi_ReqSleepState(sc, state));
 3673         device_printf(sc->acpi_dev, "power off via acpi ioctl not supported\n");
 3674         error = EOPNOTSUPP;
 3675         break;
 3676     case ACPIIO_ACKSLPSTATE:
 3677         error = *(int *)addr;
 3678         error = acpi_AckSleepState(sc->acpi_clone, error);
 3679         break;
 3680     case ACPIIO_SETSLPSTATE:    /* DEPRECATED */
 3681         state = *(int *)addr;
 3682         if (state < ACPI_STATE_S0 || state > ACPI_S_STATES_MAX)
 3683             return (EINVAL);
 3684         if (!acpi_sleep_states[state])
 3685             return (EOPNOTSUPP);
 3686         if (ACPI_FAILURE(acpi_SetSleepState(sc, state)))
 3687             error = ENXIO;
 3688         break;
 3689     default:
 3690         error = ENXIO;
 3691         break;
 3692     }
 3693 
 3694     return (error);
 3695 }
 3696 
 3697 static int
 3698 acpi_sname2sstate(const char *sname)
 3699 {
 3700     int sstate;
 3701 
 3702     if (toupper(sname[0]) == 'S') {
 3703         sstate = sname[1] - '';
 3704         if (sstate >= ACPI_STATE_S0 && sstate <= ACPI_STATE_S5 &&
 3705             sname[2] == '\0')
 3706             return (sstate);
 3707     } else if (strcasecmp(sname, "NONE") == 0)
 3708         return (ACPI_STATE_UNKNOWN);
 3709     return (-1);
 3710 }
 3711 
 3712 static const char *
 3713 acpi_sstate2sname(int sstate)
 3714 {
 3715     static const char *snames[] = { "S0", "S1", "S2", "S3", "S4", "S5" };
 3716 
 3717     if (sstate >= ACPI_STATE_S0 && sstate <= ACPI_STATE_S5)
 3718         return (snames[sstate]);
 3719     else if (sstate == ACPI_STATE_UNKNOWN)
 3720         return ("NONE");
 3721     return (NULL);
 3722 }
 3723 
 3724 static int
 3725 acpi_supported_sleep_state_sysctl(SYSCTL_HANDLER_ARGS)
 3726 {
 3727     int error;
 3728     struct sbuf sb;
 3729     UINT8 state;
 3730 
 3731     sbuf_new(&sb, NULL, 32, SBUF_AUTOEXTEND);
 3732     for (state = ACPI_STATE_S1; state < ACPI_S_STATE_COUNT; state++)
 3733         if (acpi_sleep_states[state])
 3734             sbuf_printf(&sb, "%s ", acpi_sstate2sname(state));
 3735     sbuf_trim(&sb);
 3736     sbuf_finish(&sb);
 3737     error = sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req);
 3738     sbuf_delete(&sb);
 3739     return (error);
 3740 }
 3741 
 3742 static int
 3743 acpi_sleep_state_sysctl(SYSCTL_HANDLER_ARGS)
 3744 {
 3745     char sleep_state[10];
 3746     int error, new_state, old_state;
 3747 
 3748     old_state = *(int *)oidp->oid_arg1;
 3749     strlcpy(sleep_state, acpi_sstate2sname(old_state), sizeof(sleep_state));
 3750     error = sysctl_handle_string(oidp, sleep_state, sizeof(sleep_state), req);
 3751     if (error == 0 && req->newptr != NULL) {
 3752         new_state = acpi_sname2sstate(sleep_state);
 3753         if (new_state < ACPI_STATE_S1)
 3754             return (EINVAL);
 3755         if (new_state < ACPI_S_STATE_COUNT && !acpi_sleep_states[new_state])
 3756             return (EOPNOTSUPP);
 3757         if (new_state != old_state)
 3758             *(int *)oidp->oid_arg1 = new_state;
 3759     }
 3760     return (error);
 3761 }
 3762 
 3763 /* Inform devctl(4) when we receive a Notify. */
 3764 void
 3765 acpi_UserNotify(const char *subsystem, ACPI_HANDLE h, uint8_t notify)
 3766 {
 3767     char                notify_buf[16];
 3768     ACPI_BUFFER         handle_buf;
 3769     ACPI_STATUS         status;
 3770 
 3771     if (subsystem == NULL)
 3772         return;
 3773 
 3774     handle_buf.Pointer = NULL;
 3775     handle_buf.Length = ACPI_ALLOCATE_BUFFER;
 3776     status = AcpiNsHandleToPathname(h, &handle_buf, FALSE);
 3777     if (ACPI_FAILURE(status))
 3778         return;
 3779     snprintf(notify_buf, sizeof(notify_buf), "notify=0x%02x", notify);
 3780     devctl_notify("ACPI", subsystem, handle_buf.Pointer, notify_buf);
 3781     AcpiOsFree(handle_buf.Pointer);
 3782 }
 3783 
 3784 #ifdef ACPI_DEBUG
 3785 /*
 3786  * Support for parsing debug options from the kernel environment.
 3787  *
 3788  * Bits may be set in the AcpiDbgLayer and AcpiDbgLevel debug registers
 3789  * by specifying the names of the bits in the debug.acpi.layer and
 3790  * debug.acpi.level environment variables.  Bits may be unset by 
 3791  * prefixing the bit name with !.
 3792  */
 3793 struct debugtag
 3794 {
 3795     char        *name;
 3796     UINT32      value;
 3797 };
 3798 
 3799 static struct debugtag  dbg_layer[] = {
 3800     {"ACPI_UTILITIES",          ACPI_UTILITIES},
 3801     {"ACPI_HARDWARE",           ACPI_HARDWARE},
 3802     {"ACPI_EVENTS",             ACPI_EVENTS},
 3803     {"ACPI_TABLES",             ACPI_TABLES},
 3804     {"ACPI_NAMESPACE",          ACPI_NAMESPACE},
 3805     {"ACPI_PARSER",             ACPI_PARSER},
 3806     {"ACPI_DISPATCHER",         ACPI_DISPATCHER},
 3807     {"ACPI_EXECUTER",           ACPI_EXECUTER},
 3808     {"ACPI_RESOURCES",          ACPI_RESOURCES},
 3809     {"ACPI_CA_DEBUGGER",        ACPI_CA_DEBUGGER},
 3810     {"ACPI_OS_SERVICES",        ACPI_OS_SERVICES},
 3811     {"ACPI_CA_DISASSEMBLER",    ACPI_CA_DISASSEMBLER},
 3812     {"ACPI_ALL_COMPONENTS",     ACPI_ALL_COMPONENTS},
 3813 
 3814     {"ACPI_AC_ADAPTER",         ACPI_AC_ADAPTER},
 3815     {"ACPI_BATTERY",            ACPI_BATTERY},
 3816     {"ACPI_BUS",                ACPI_BUS},
 3817     {"ACPI_BUTTON",             ACPI_BUTTON},
 3818     {"ACPI_EC",                 ACPI_EC},
 3819     {"ACPI_FAN",                ACPI_FAN},
 3820     {"ACPI_POWERRES",           ACPI_POWERRES},
 3821     {"ACPI_PROCESSOR",          ACPI_PROCESSOR},
 3822     {"ACPI_THERMAL",            ACPI_THERMAL},
 3823     {"ACPI_TIMER",              ACPI_TIMER},
 3824     {"ACPI_ALL_DRIVERS",        ACPI_ALL_DRIVERS},
 3825     {NULL, 0}
 3826 };
 3827 
 3828 static struct debugtag dbg_level[] = {
 3829     {"ACPI_LV_INIT",            ACPI_LV_INIT},
 3830     {"ACPI_LV_DEBUG_OBJECT",    ACPI_LV_DEBUG_OBJECT},
 3831     {"ACPI_LV_INFO",            ACPI_LV_INFO},
 3832     {"ACPI_LV_REPAIR",          ACPI_LV_REPAIR},
 3833     {"ACPI_LV_ALL_EXCEPTIONS",  ACPI_LV_ALL_EXCEPTIONS},
 3834 
 3835     /* Trace verbosity level 1 [Standard Trace Level] */
 3836     {"ACPI_LV_INIT_NAMES",      ACPI_LV_INIT_NAMES},
 3837     {"ACPI_LV_PARSE",           ACPI_LV_PARSE},
 3838     {"ACPI_LV_LOAD",            ACPI_LV_LOAD},
 3839     {"ACPI_LV_DISPATCH",        ACPI_LV_DISPATCH},
 3840     {"ACPI_LV_EXEC",            ACPI_LV_EXEC},
 3841     {"ACPI_LV_NAMES",           ACPI_LV_NAMES},
 3842     {"ACPI_LV_OPREGION",        ACPI_LV_OPREGION},
 3843     {"ACPI_LV_BFIELD",          ACPI_LV_BFIELD},
 3844     {"ACPI_LV_TABLES",          ACPI_LV_TABLES},
 3845     {"ACPI_LV_VALUES",          ACPI_LV_VALUES},
 3846     {"ACPI_LV_OBJECTS",         ACPI_LV_OBJECTS},
 3847     {"ACPI_LV_RESOURCES",       ACPI_LV_RESOURCES},
 3848     {"ACPI_LV_USER_REQUESTS",   ACPI_LV_USER_REQUESTS},
 3849     {"ACPI_LV_PACKAGE",         ACPI_LV_PACKAGE},
 3850     {"ACPI_LV_VERBOSITY1",      ACPI_LV_VERBOSITY1},
 3851 
 3852     /* Trace verbosity level 2 [Function tracing and memory allocation] */
 3853     {"ACPI_LV_ALLOCATIONS",     ACPI_LV_ALLOCATIONS},
 3854     {"ACPI_LV_FUNCTIONS",       ACPI_LV_FUNCTIONS},
 3855     {"ACPI_LV_OPTIMIZATIONS",   ACPI_LV_OPTIMIZATIONS},
 3856     {"ACPI_LV_VERBOSITY2",      ACPI_LV_VERBOSITY2},
 3857     {"ACPI_LV_ALL",             ACPI_LV_ALL},
 3858 
 3859     /* Trace verbosity level 3 [Threading, I/O, and Interrupts] */
 3860     {"ACPI_LV_MUTEX",           ACPI_LV_MUTEX},
 3861     {"ACPI_LV_THREADS",         ACPI_LV_THREADS},
 3862     {"ACPI_LV_IO",              ACPI_LV_IO},
 3863     {"ACPI_LV_INTERRUPTS",      ACPI_LV_INTERRUPTS},
 3864     {"ACPI_LV_VERBOSITY3",      ACPI_LV_VERBOSITY3},
 3865 
 3866     /* Exceptionally verbose output -- also used in the global "DebugLevel"  */
 3867     {"ACPI_LV_AML_DISASSEMBLE", ACPI_LV_AML_DISASSEMBLE},
 3868     {"ACPI_LV_VERBOSE_INFO",    ACPI_LV_VERBOSE_INFO},
 3869     {"ACPI_LV_FULL_TABLES",     ACPI_LV_FULL_TABLES},
 3870     {"ACPI_LV_EVENTS",          ACPI_LV_EVENTS},
 3871     {"ACPI_LV_VERBOSE",         ACPI_LV_VERBOSE},
 3872     {NULL, 0}
 3873 };    
 3874 
 3875 static void
 3876 acpi_parse_debug(char *cp, struct debugtag *tag, UINT32 *flag)
 3877 {
 3878     char        *ep;
 3879     int         i, l;
 3880     int         set;
 3881 
 3882     while (*cp) {
 3883         if (isspace(*cp)) {
 3884             cp++;
 3885             continue;
 3886         }
 3887         ep = cp;
 3888         while (*ep && !isspace(*ep))
 3889             ep++;
 3890         if (*cp == '!') {
 3891             set = 0;
 3892             cp++;
 3893             if (cp == ep)
 3894                 continue;
 3895         } else {
 3896             set = 1;
 3897         }
 3898         l = ep - cp;
 3899         for (i = 0; tag[i].name != NULL; i++) {
 3900             if (!strncmp(cp, tag[i].name, l)) {
 3901                 if (set)
 3902                     *flag |= tag[i].value;
 3903                 else
 3904                     *flag &= ~tag[i].value;
 3905             }
 3906         }
 3907         cp = ep;
 3908     }
 3909 }
 3910 
 3911 static void
 3912 acpi_set_debugging(void *junk)
 3913 {
 3914     char        *layer, *level;
 3915 
 3916     if (cold) {
 3917         AcpiDbgLayer = 0;
 3918         AcpiDbgLevel = 0;
 3919     }
 3920 
 3921     layer = kern_getenv("debug.acpi.layer");
 3922     level = kern_getenv("debug.acpi.level");
 3923     if (layer == NULL && level == NULL)
 3924         return;
 3925 
 3926     printf("ACPI set debug");
 3927     if (layer != NULL) {
 3928         if (strcmp("NONE", layer) != 0)
 3929             printf(" layer '%s'", layer);
 3930         acpi_parse_debug(layer, &dbg_layer[0], &AcpiDbgLayer);
 3931         freeenv(layer);
 3932     }
 3933     if (level != NULL) {
 3934         if (strcmp("NONE", level) != 0)
 3935             printf(" level '%s'", level);
 3936         acpi_parse_debug(level, &dbg_level[0], &AcpiDbgLevel);
 3937         freeenv(level);
 3938     }
 3939     printf("\n");
 3940 }
 3941 
 3942 SYSINIT(acpi_debugging, SI_SUB_TUNABLES, SI_ORDER_ANY, acpi_set_debugging,
 3943         NULL);
 3944 
 3945 static int
 3946 acpi_debug_sysctl(SYSCTL_HANDLER_ARGS)
 3947 {
 3948     int          error, *dbg;
 3949     struct       debugtag *tag;
 3950     struct       sbuf sb;
 3951     char         temp[128];
 3952 
 3953     if (sbuf_new(&sb, NULL, 128, SBUF_AUTOEXTEND) == NULL)
 3954         return (ENOMEM);
 3955     if (strcmp(oidp->oid_arg1, "debug.acpi.layer") == 0) {
 3956         tag = &dbg_layer[0];
 3957         dbg = &AcpiDbgLayer;
 3958     } else {
 3959         tag = &dbg_level[0];
 3960         dbg = &AcpiDbgLevel;
 3961     }
 3962 
 3963     /* Get old values if this is a get request. */
 3964     ACPI_SERIAL_BEGIN(acpi);
 3965     if (*dbg == 0) {
 3966         sbuf_cpy(&sb, "NONE");
 3967     } else if (req->newptr == NULL) {
 3968         for (; tag->name != NULL; tag++) {
 3969             if ((*dbg & tag->value) == tag->value)
 3970                 sbuf_printf(&sb, "%s ", tag->name);
 3971         }
 3972     }
 3973     sbuf_trim(&sb);
 3974     sbuf_finish(&sb);
 3975     strlcpy(temp, sbuf_data(&sb), sizeof(temp));
 3976     sbuf_delete(&sb);
 3977 
 3978     error = sysctl_handle_string(oidp, temp, sizeof(temp), req);
 3979 
 3980     /* Check for error or no change */
 3981     if (error == 0 && req->newptr != NULL) {
 3982         *dbg = 0;
 3983         kern_setenv((char *)oidp->oid_arg1, temp);
 3984         acpi_set_debugging(NULL);
 3985     }
 3986     ACPI_SERIAL_END(acpi);
 3987 
 3988     return (error);
 3989 }
 3990 
 3991 SYSCTL_PROC(_debug_acpi, OID_AUTO, layer, CTLFLAG_RW | CTLTYPE_STRING,
 3992             "debug.acpi.layer", 0, acpi_debug_sysctl, "A", "");
 3993 SYSCTL_PROC(_debug_acpi, OID_AUTO, level, CTLFLAG_RW | CTLTYPE_STRING,
 3994             "debug.acpi.level", 0, acpi_debug_sysctl, "A", "");
 3995 #endif /* ACPI_DEBUG */
 3996 
 3997 static int
 3998 acpi_debug_objects_sysctl(SYSCTL_HANDLER_ARGS)
 3999 {
 4000         int     error;
 4001         int     old;
 4002 
 4003         old = acpi_debug_objects;
 4004         error = sysctl_handle_int(oidp, &acpi_debug_objects, 0, req);
 4005         if (error != 0 || req->newptr == NULL)
 4006                 return (error);
 4007         if (old == acpi_debug_objects || (old && acpi_debug_objects))
 4008                 return (0);
 4009 
 4010         ACPI_SERIAL_BEGIN(acpi);
 4011         AcpiGbl_EnableAmlDebugObject = acpi_debug_objects ? TRUE : FALSE;
 4012         ACPI_SERIAL_END(acpi);
 4013 
 4014         return (0);
 4015 }
 4016 
 4017 static int
 4018 acpi_parse_interfaces(char *str, struct acpi_interface *iface)
 4019 {
 4020         char *p;
 4021         size_t len;
 4022         int i, j;
 4023 
 4024         p = str;
 4025         while (isspace(*p) || *p == ',')
 4026                 p++;
 4027         len = strlen(p);
 4028         if (len == 0)
 4029                 return (0);
 4030         p = strdup(p, M_TEMP);
 4031         for (i = 0; i < len; i++)
 4032                 if (p[i] == ',')
 4033                         p[i] = '\0';
 4034         i = j = 0;
 4035         while (i < len)
 4036                 if (isspace(p[i]) || p[i] == '\0')
 4037                         i++;
 4038                 else {
 4039                         i += strlen(p + i) + 1;
 4040                         j++;
 4041                 }
 4042         if (j == 0) {
 4043                 free(p, M_TEMP);
 4044                 return (0);
 4045         }
 4046         iface->data = malloc(sizeof(*iface->data) * j, M_TEMP, M_WAITOK);
 4047         iface->num = j;
 4048         i = j = 0;
 4049         while (i < len)
 4050                 if (isspace(p[i]) || p[i] == '\0')
 4051                         i++;
 4052                 else {
 4053                         iface->data[j] = p + i;
 4054                         i += strlen(p + i) + 1;
 4055                         j++;
 4056                 }
 4057 
 4058         return (j);
 4059 }
 4060 
 4061 static void
 4062 acpi_free_interfaces(struct acpi_interface *iface)
 4063 {
 4064 
 4065         free(iface->data[0], M_TEMP);
 4066         free(iface->data, M_TEMP);
 4067 }
 4068 
 4069 static void
 4070 acpi_reset_interfaces(device_t dev)
 4071 {
 4072         struct acpi_interface list;
 4073         ACPI_STATUS status;
 4074         int i;
 4075 
 4076         if (acpi_parse_interfaces(acpi_install_interface, &list) > 0) {
 4077                 for (i = 0; i < list.num; i++) {
 4078                         status = AcpiInstallInterface(list.data[i]);
 4079                         if (ACPI_FAILURE(status))
 4080                                 device_printf(dev,
 4081                                     "failed to install _OSI(\"%s\"): %s\n",
 4082                                     list.data[i], AcpiFormatException(status));
 4083                         else if (bootverbose)
 4084                                 device_printf(dev, "installed _OSI(\"%s\")\n",
 4085                                     list.data[i]);
 4086                 }
 4087                 acpi_free_interfaces(&list);
 4088         }
 4089         if (acpi_parse_interfaces(acpi_remove_interface, &list) > 0) {
 4090                 for (i = 0; i < list.num; i++) {
 4091                         status = AcpiRemoveInterface(list.data[i]);
 4092                         if (ACPI_FAILURE(status))
 4093                                 device_printf(dev,
 4094                                     "failed to remove _OSI(\"%s\"): %s\n",
 4095                                     list.data[i], AcpiFormatException(status));
 4096                         else if (bootverbose)
 4097                                 device_printf(dev, "removed _OSI(\"%s\")\n",
 4098                                     list.data[i]);
 4099                 }
 4100                 acpi_free_interfaces(&list);
 4101         }
 4102 }
 4103 
 4104 static int
 4105 acpi_pm_func(u_long cmd, void *arg, ...)
 4106 {
 4107         int     state, acpi_state;
 4108         int     error;
 4109         struct  acpi_softc *sc;
 4110         va_list ap;
 4111 
 4112         error = 0;
 4113         switch (cmd) {
 4114         case POWER_CMD_SUSPEND:
 4115                 sc = (struct acpi_softc *)arg;
 4116                 if (sc == NULL) {
 4117                         error = EINVAL;
 4118                         goto out;
 4119                 }
 4120 
 4121                 va_start(ap, arg);
 4122                 state = va_arg(ap, int);
 4123                 va_end(ap);
 4124 
 4125                 switch (state) {
 4126                 case POWER_SLEEP_STATE_STANDBY:
 4127                         acpi_state = sc->acpi_standby_sx;
 4128                         break;
 4129                 case POWER_SLEEP_STATE_SUSPEND:
 4130                         acpi_state = sc->acpi_suspend_sx;
 4131                         break;
 4132                 case POWER_SLEEP_STATE_HIBERNATE:
 4133                         acpi_state = ACPI_STATE_S4;
 4134                         break;
 4135                 default:
 4136                         error = EINVAL;
 4137                         goto out;
 4138                 }
 4139 
 4140                 if (ACPI_FAILURE(acpi_EnterSleepState(sc, acpi_state)))
 4141                         error = ENXIO;
 4142                 break;
 4143         default:
 4144                 error = EINVAL;
 4145                 goto out;
 4146         }
 4147 
 4148 out:
 4149         return (error);
 4150 }
 4151 
 4152 static void
 4153 acpi_pm_register(void *arg)
 4154 {
 4155     if (!cold || resource_disabled("acpi", 0))
 4156         return;
 4157 
 4158     power_pm_register(POWER_PM_TYPE_ACPI, acpi_pm_func, NULL);
 4159 }
 4160 
 4161 SYSINIT(power, SI_SUB_KLD, SI_ORDER_ANY, acpi_pm_register, NULL);

Cache object: 50555ae76e987bdf70467b777acdcbdc


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.