The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/acpica/acpi.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 2000 Takanori Watanabe <takawata@jp.freebsd.org>
    3  * Copyright (c) 2000 Mitsuru IWASAKI <iwasaki@jp.freebsd.org>
    4  * Copyright (c) 2000, 2001 Michael Smith
    5  * Copyright (c) 2000 BSDi
    6  * All rights reserved.
    7  *
    8  * Redistribution and use in source and binary forms, with or without
    9  * modification, are permitted provided that the following conditions
   10  * are met:
   11  * 1. Redistributions of source code must retain the above copyright
   12  *    notice, this list of conditions and the following disclaimer.
   13  * 2. Redistributions in binary form must reproduce the above copyright
   14  *    notice, this list of conditions and the following disclaimer in the
   15  *    documentation and/or other materials provided with the distribution.
   16  *
   17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   27  * SUCH DAMAGE.
   28  */
   29 
   30 #include <sys/cdefs.h>
   31 __FBSDID("$FreeBSD: releng/8.1/sys/dev/acpica/acpi.c 209000 2010-06-10 20:01:33Z jhb $");
   32 
   33 #include "opt_acpi.h"
   34 #include <sys/param.h>
   35 #include <sys/kernel.h>
   36 #include <sys/proc.h>
   37 #include <sys/fcntl.h>
   38 #include <sys/malloc.h>
   39 #include <sys/module.h>
   40 #include <sys/bus.h>
   41 #include <sys/conf.h>
   42 #include <sys/ioccom.h>
   43 #include <sys/reboot.h>
   44 #include <sys/sysctl.h>
   45 #include <sys/ctype.h>
   46 #include <sys/linker.h>
   47 #include <sys/power.h>
   48 #include <sys/sbuf.h>
   49 #ifdef SMP
   50 #include <sys/sched.h>
   51 #endif
   52 #include <sys/smp.h>
   53 #include <sys/timetc.h>
   54 
   55 #if defined(__i386__) || defined(__amd64__)
   56 #include <machine/pci_cfgreg.h>
   57 #endif
   58 #include <machine/resource.h>
   59 #include <machine/bus.h>
   60 #include <sys/rman.h>
   61 #include <isa/isavar.h>
   62 #include <isa/pnpvar.h>
   63 
   64 #include <contrib/dev/acpica/include/acpi.h>
   65 #include <contrib/dev/acpica/include/accommon.h>
   66 #include <contrib/dev/acpica/include/acnamesp.h>
   67 
   68 #include <dev/acpica/acpivar.h>
   69 #include <dev/acpica/acpiio.h>
   70 
   71 #include "pci_if.h"
   72 #include <dev/pci/pcivar.h>
   73 #include <dev/pci/pci_private.h>
   74 
   75 #include <vm/vm_param.h>
   76 
   77 MALLOC_DEFINE(M_ACPIDEV, "acpidev", "ACPI devices");
   78 
   79 /* Hooks for the ACPI CA debugging infrastructure */
   80 #define _COMPONENT      ACPI_BUS
   81 ACPI_MODULE_NAME("ACPI")
   82 
   83 static d_open_t         acpiopen;
   84 static d_close_t        acpiclose;
   85 static d_ioctl_t        acpiioctl;
   86 
   87 static struct cdevsw acpi_cdevsw = {
   88         .d_version =    D_VERSION,
   89         .d_open =       acpiopen,
   90         .d_close =      acpiclose,
   91         .d_ioctl =      acpiioctl,
   92         .d_name =       "acpi",
   93 };
   94 
   95 /* Global mutex for locking access to the ACPI subsystem. */
   96 struct mtx      acpi_mutex;
   97 
   98 /* Bitmap of device quirks. */
   99 int             acpi_quirks;
  100 
  101 /* Supported sleep states. */
  102 static BOOLEAN  acpi_sleep_states[ACPI_S_STATE_COUNT];
  103 
  104 static int      acpi_modevent(struct module *mod, int event, void *junk);
  105 static int      acpi_probe(device_t dev);
  106 static int      acpi_attach(device_t dev);
  107 static int      acpi_suspend(device_t dev);
  108 static int      acpi_resume(device_t dev);
  109 static int      acpi_shutdown(device_t dev);
  110 static device_t acpi_add_child(device_t bus, int order, const char *name,
  111                         int unit);
  112 static int      acpi_print_child(device_t bus, device_t child);
  113 static void     acpi_probe_nomatch(device_t bus, device_t child);
  114 static void     acpi_driver_added(device_t dev, driver_t *driver);
  115 static int      acpi_read_ivar(device_t dev, device_t child, int index,
  116                         uintptr_t *result);
  117 static int      acpi_write_ivar(device_t dev, device_t child, int index,
  118                         uintptr_t value);
  119 static struct resource_list *acpi_get_rlist(device_t dev, device_t child);
  120 static int      acpi_sysres_alloc(device_t dev);
  121 static struct resource *acpi_alloc_resource(device_t bus, device_t child,
  122                         int type, int *rid, u_long start, u_long end,
  123                         u_long count, u_int flags);
  124 static int      acpi_release_resource(device_t bus, device_t child, int type,
  125                         int rid, struct resource *r);
  126 static void     acpi_delete_resource(device_t bus, device_t child, int type,
  127                     int rid);
  128 static uint32_t acpi_isa_get_logicalid(device_t dev);
  129 static int      acpi_isa_get_compatid(device_t dev, uint32_t *cids, int count);
  130 static char     *acpi_device_id_probe(device_t bus, device_t dev, char **ids);
  131 static ACPI_STATUS acpi_device_eval_obj(device_t bus, device_t dev,
  132                     ACPI_STRING pathname, ACPI_OBJECT_LIST *parameters,
  133                     ACPI_BUFFER *ret);
  134 static int      acpi_device_pwr_for_sleep(device_t bus, device_t dev,
  135                     int *dstate);
  136 static ACPI_STATUS acpi_device_scan_cb(ACPI_HANDLE h, UINT32 level,
  137                     void *context, void **retval);
  138 static ACPI_STATUS acpi_device_scan_children(device_t bus, device_t dev,
  139                     int max_depth, acpi_scan_cb_t user_fn, void *arg);
  140 static int      acpi_set_powerstate_method(device_t bus, device_t child,
  141                     int state);
  142 static int      acpi_isa_pnp_probe(device_t bus, device_t child,
  143                     struct isa_pnp_id *ids);
  144 static void     acpi_probe_children(device_t bus);
  145 static void     acpi_probe_order(ACPI_HANDLE handle, int *order);
  146 static ACPI_STATUS acpi_probe_child(ACPI_HANDLE handle, UINT32 level,
  147                     void *context, void **status);
  148 static BOOLEAN  acpi_MatchHid(ACPI_HANDLE h, const char *hid);
  149 static void     acpi_sleep_enable(void *arg);
  150 static ACPI_STATUS acpi_sleep_disable(struct acpi_softc *sc);
  151 static ACPI_STATUS acpi_EnterSleepState(struct acpi_softc *sc, int state);
  152 static void     acpi_shutdown_final(void *arg, int howto);
  153 static void     acpi_enable_fixed_events(struct acpi_softc *sc);
  154 static int      acpi_wake_sleep_prep(ACPI_HANDLE handle, int sstate);
  155 static int      acpi_wake_run_prep(ACPI_HANDLE handle, int sstate);
  156 static int      acpi_wake_prep_walk(int sstate);
  157 static int      acpi_wake_sysctl_walk(device_t dev);
  158 static int      acpi_wake_set_sysctl(SYSCTL_HANDLER_ARGS);
  159 static void     acpi_system_eventhandler_sleep(void *arg, int state);
  160 static void     acpi_system_eventhandler_wakeup(void *arg, int state);
  161 static int      acpi_sname2sstate(const char *sname);
  162 static const char *acpi_sstate2sname(int sstate);
  163 static int      acpi_supported_sleep_state_sysctl(SYSCTL_HANDLER_ARGS);
  164 static int      acpi_sleep_state_sysctl(SYSCTL_HANDLER_ARGS);
  165 static int      acpi_debug_objects_sysctl(SYSCTL_HANDLER_ARGS);
  166 static int      acpi_pm_func(u_long cmd, void *arg, ...);
  167 static int      acpi_child_location_str_method(device_t acdev, device_t child,
  168                                                char *buf, size_t buflen);
  169 static int      acpi_child_pnpinfo_str_method(device_t acdev, device_t child,
  170                                               char *buf, size_t buflen);
  171 #if defined(__i386__) || defined(__amd64__)
  172 static void     acpi_enable_pcie(void);
  173 #endif
  174 static void     acpi_hint_device_unit(device_t acdev, device_t child,
  175                     const char *name, int *unitp);
  176 
  177 static device_method_t acpi_methods[] = {
  178     /* Device interface */
  179     DEVMETHOD(device_probe,             acpi_probe),
  180     DEVMETHOD(device_attach,            acpi_attach),
  181     DEVMETHOD(device_shutdown,          acpi_shutdown),
  182     DEVMETHOD(device_detach,            bus_generic_detach),
  183     DEVMETHOD(device_suspend,           acpi_suspend),
  184     DEVMETHOD(device_resume,            acpi_resume),
  185 
  186     /* Bus interface */
  187     DEVMETHOD(bus_add_child,            acpi_add_child),
  188     DEVMETHOD(bus_print_child,          acpi_print_child),
  189     DEVMETHOD(bus_probe_nomatch,        acpi_probe_nomatch),
  190     DEVMETHOD(bus_driver_added,         acpi_driver_added),
  191     DEVMETHOD(bus_read_ivar,            acpi_read_ivar),
  192     DEVMETHOD(bus_write_ivar,           acpi_write_ivar),
  193     DEVMETHOD(bus_get_resource_list,    acpi_get_rlist),
  194     DEVMETHOD(bus_set_resource,         bus_generic_rl_set_resource),
  195     DEVMETHOD(bus_get_resource,         bus_generic_rl_get_resource),
  196     DEVMETHOD(bus_alloc_resource,       acpi_alloc_resource),
  197     DEVMETHOD(bus_release_resource,     acpi_release_resource),
  198     DEVMETHOD(bus_delete_resource,      acpi_delete_resource),
  199     DEVMETHOD(bus_child_pnpinfo_str,    acpi_child_pnpinfo_str_method),
  200     DEVMETHOD(bus_child_location_str,   acpi_child_location_str_method),
  201     DEVMETHOD(bus_activate_resource,    bus_generic_activate_resource),
  202     DEVMETHOD(bus_deactivate_resource,  bus_generic_deactivate_resource),
  203     DEVMETHOD(bus_setup_intr,           bus_generic_setup_intr),
  204     DEVMETHOD(bus_teardown_intr,        bus_generic_teardown_intr),
  205     DEVMETHOD(bus_hint_device_unit,     acpi_hint_device_unit),
  206 
  207     /* ACPI bus */
  208     DEVMETHOD(acpi_id_probe,            acpi_device_id_probe),
  209     DEVMETHOD(acpi_evaluate_object,     acpi_device_eval_obj),
  210     DEVMETHOD(acpi_pwr_for_sleep,       acpi_device_pwr_for_sleep),
  211     DEVMETHOD(acpi_scan_children,       acpi_device_scan_children),
  212 
  213     /* PCI emulation */
  214     DEVMETHOD(pci_set_powerstate,       acpi_set_powerstate_method),
  215 
  216     /* ISA emulation */
  217     DEVMETHOD(isa_pnp_probe,            acpi_isa_pnp_probe),
  218 
  219     {0, 0}
  220 };
  221 
  222 static driver_t acpi_driver = {
  223     "acpi",
  224     acpi_methods,
  225     sizeof(struct acpi_softc),
  226 };
  227 
  228 static devclass_t acpi_devclass;
  229 DRIVER_MODULE(acpi, nexus, acpi_driver, acpi_devclass, acpi_modevent, 0);
  230 MODULE_VERSION(acpi, 1);
  231 
  232 ACPI_SERIAL_DECL(acpi, "ACPI root bus");
  233 
  234 /* Local pools for managing system resources for ACPI child devices. */
  235 static struct rman acpi_rman_io, acpi_rman_mem;
  236 
  237 #define ACPI_MINIMUM_AWAKETIME  5
  238 
  239 /* Holds the description of the acpi0 device. */
  240 static char acpi_desc[ACPI_OEM_ID_SIZE + ACPI_OEM_TABLE_ID_SIZE + 2];
  241 
  242 SYSCTL_NODE(_debug, OID_AUTO, acpi, CTLFLAG_RD, NULL, "ACPI debugging");
  243 static char acpi_ca_version[12];
  244 SYSCTL_STRING(_debug_acpi, OID_AUTO, acpi_ca_version, CTLFLAG_RD,
  245               acpi_ca_version, 0, "Version of Intel ACPI-CA");
  246 
  247 /*
  248  * Allow override of whether methods execute in parallel or not.
  249  * Enable this for serial behavior, which fixes "AE_ALREADY_EXISTS"
  250  * errors for AML that really can't handle parallel method execution.
  251  * It is off by default since this breaks recursive methods and
  252  * some IBMs use such code.
  253  */
  254 static int acpi_serialize_methods;
  255 TUNABLE_INT("hw.acpi.serialize_methods", &acpi_serialize_methods);
  256 
  257 /* Allow users to dump Debug objects without ACPI debugger. */
  258 static int acpi_debug_objects;
  259 TUNABLE_INT("debug.acpi.enable_debug_objects", &acpi_debug_objects);
  260 SYSCTL_PROC(_debug_acpi, OID_AUTO, enable_debug_objects,
  261     CTLFLAG_RW | CTLTYPE_INT, NULL, 0, acpi_debug_objects_sysctl, "I",
  262     "Enable Debug objects");
  263 
  264 /* Allow the interpreter to ignore common mistakes in BIOS. */
  265 static int acpi_interpreter_slack = 1;
  266 TUNABLE_INT("debug.acpi.interpreter_slack", &acpi_interpreter_slack);
  267 SYSCTL_INT(_debug_acpi, OID_AUTO, interpreter_slack, CTLFLAG_RDTUN,
  268     &acpi_interpreter_slack, 1, "Turn on interpreter slack mode.");
  269 
  270 /* Power devices off and on in suspend and resume.  XXX Remove once tested. */
  271 static int acpi_do_powerstate = 1;
  272 TUNABLE_INT("debug.acpi.do_powerstate", &acpi_do_powerstate);
  273 SYSCTL_INT(_debug_acpi, OID_AUTO, do_powerstate, CTLFLAG_RW,
  274     &acpi_do_powerstate, 1, "Turn off devices when suspending.");
  275 
  276 /* Reset system clock while resuming.  XXX Remove once tested. */
  277 static int acpi_reset_clock = 1;
  278 TUNABLE_INT("debug.acpi.reset_clock", &acpi_reset_clock);
  279 SYSCTL_INT(_debug_acpi, OID_AUTO, reset_clock, CTLFLAG_RW,
  280     &acpi_reset_clock, 1, "Reset system clock while resuming.");
  281 
  282 /* Allow users to override quirks. */
  283 TUNABLE_INT("debug.acpi.quirks", &acpi_quirks);
  284 
  285 static int acpi_susp_bounce;
  286 SYSCTL_INT(_debug_acpi, OID_AUTO, suspend_bounce, CTLFLAG_RW,
  287     &acpi_susp_bounce, 0, "Don't actually suspend, just test devices.");
  288 
  289 /*
  290  * ACPI can only be loaded as a module by the loader; activating it after
  291  * system bootstrap time is not useful, and can be fatal to the system.
  292  * It also cannot be unloaded, since the entire system bus hierarchy hangs
  293  * off it.
  294  */
  295 static int
  296 acpi_modevent(struct module *mod, int event, void *junk)
  297 {
  298     switch (event) {
  299     case MOD_LOAD:
  300         if (!cold) {
  301             printf("The ACPI driver cannot be loaded after boot.\n");
  302             return (EPERM);
  303         }
  304         break;
  305     case MOD_UNLOAD:
  306         if (!cold && power_pm_get_type() == POWER_PM_TYPE_ACPI)
  307             return (EBUSY);
  308         break;
  309     default:
  310         break;
  311     }
  312     return (0);
  313 }
  314 
  315 /*
  316  * Perform early initialization.
  317  */
  318 ACPI_STATUS
  319 acpi_Startup(void)
  320 {
  321     static int started = 0;
  322     ACPI_STATUS status;
  323     int val;
  324 
  325     ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
  326 
  327     /* Only run the startup code once.  The MADT driver also calls this. */
  328     if (started)
  329         return_VALUE (AE_OK);
  330     started = 1;
  331 
  332     /*
  333      * Pre-allocate space for RSDT/XSDT and DSDT tables and allow resizing
  334      * if more tables exist.
  335      */
  336     if (ACPI_FAILURE(status = AcpiInitializeTables(NULL, 2, TRUE))) {
  337         printf("ACPI: Table initialisation failed: %s\n",
  338             AcpiFormatException(status));
  339         return_VALUE (status);
  340     }
  341 
  342     /* Set up any quirks we have for this system. */
  343     if (acpi_quirks == ACPI_Q_OK)
  344         acpi_table_quirks(&acpi_quirks);
  345 
  346     /* If the user manually set the disabled hint to 0, force-enable ACPI. */
  347     if (resource_int_value("acpi", 0, "disabled", &val) == 0 && val == 0)
  348         acpi_quirks &= ~ACPI_Q_BROKEN;
  349     if (acpi_quirks & ACPI_Q_BROKEN) {
  350         printf("ACPI disabled by blacklist.  Contact your BIOS vendor.\n");
  351         status = AE_SUPPORT;
  352     }
  353 
  354     return_VALUE (status);
  355 }
  356 
  357 /*
  358  * Detect ACPI and perform early initialisation.
  359  */
  360 int
  361 acpi_identify(void)
  362 {
  363     ACPI_TABLE_RSDP     *rsdp;
  364     ACPI_TABLE_HEADER   *rsdt;
  365     ACPI_PHYSICAL_ADDRESS paddr;
  366     struct sbuf         sb;
  367 
  368     ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
  369 
  370     if (!cold)
  371         return (ENXIO);
  372 
  373     /* Check that we haven't been disabled with a hint. */
  374     if (resource_disabled("acpi", 0))
  375         return (ENXIO);
  376 
  377     /* Check for other PM systems. */
  378     if (power_pm_get_type() != POWER_PM_TYPE_NONE &&
  379         power_pm_get_type() != POWER_PM_TYPE_ACPI) {
  380         printf("ACPI identify failed, other PM system enabled.\n");
  381         return (ENXIO);
  382     }
  383 
  384     /* Initialize root tables. */
  385     if (ACPI_FAILURE(acpi_Startup())) {
  386         printf("ACPI: Try disabling either ACPI or apic support.\n");
  387         return (ENXIO);
  388     }
  389 
  390     if ((paddr = AcpiOsGetRootPointer()) == 0 ||
  391         (rsdp = AcpiOsMapMemory(paddr, sizeof(ACPI_TABLE_RSDP))) == NULL)
  392         return (ENXIO);
  393     if (rsdp->Revision > 1 && rsdp->XsdtPhysicalAddress != 0)
  394         paddr = (ACPI_PHYSICAL_ADDRESS)rsdp->XsdtPhysicalAddress;
  395     else
  396         paddr = (ACPI_PHYSICAL_ADDRESS)rsdp->RsdtPhysicalAddress;
  397     AcpiOsUnmapMemory(rsdp, sizeof(ACPI_TABLE_RSDP));
  398 
  399     if ((rsdt = AcpiOsMapMemory(paddr, sizeof(ACPI_TABLE_HEADER))) == NULL)
  400         return (ENXIO);
  401     sbuf_new(&sb, acpi_desc, sizeof(acpi_desc), SBUF_FIXEDLEN);
  402     sbuf_bcat(&sb, rsdt->OemId, ACPI_OEM_ID_SIZE);
  403     sbuf_trim(&sb);
  404     sbuf_putc(&sb, ' ');
  405     sbuf_bcat(&sb, rsdt->OemTableId, ACPI_OEM_TABLE_ID_SIZE);
  406     sbuf_trim(&sb);
  407     sbuf_finish(&sb);
  408     sbuf_delete(&sb);
  409     AcpiOsUnmapMemory(rsdt, sizeof(ACPI_TABLE_HEADER));
  410 
  411     snprintf(acpi_ca_version, sizeof(acpi_ca_version), "%x", ACPI_CA_VERSION);
  412 
  413     return (0);
  414 }
  415 
  416 /*
  417  * Fetch some descriptive data from ACPI to put in our attach message.
  418  */
  419 static int
  420 acpi_probe(device_t dev)
  421 {
  422 
  423     ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
  424 
  425     device_set_desc(dev, acpi_desc);
  426 
  427     return_VALUE (0);
  428 }
  429 
  430 static int
  431 acpi_attach(device_t dev)
  432 {
  433     struct acpi_softc   *sc;
  434     ACPI_STATUS         status;
  435     int                 error, state;
  436     UINT32              flags;
  437     UINT8               TypeA, TypeB;
  438     char                *env;
  439 
  440     ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
  441 
  442     sc = device_get_softc(dev);
  443     sc->acpi_dev = dev;
  444     callout_init(&sc->susp_force_to, TRUE);
  445 
  446     error = ENXIO;
  447 
  448     /* Initialize resource manager. */
  449     acpi_rman_io.rm_type = RMAN_ARRAY;
  450     acpi_rman_io.rm_start = 0;
  451     acpi_rman_io.rm_end = 0xffff;
  452     acpi_rman_io.rm_descr = "ACPI I/O ports";
  453     if (rman_init(&acpi_rman_io) != 0)
  454         panic("acpi rman_init IO ports failed");
  455     acpi_rman_mem.rm_type = RMAN_ARRAY;
  456     acpi_rman_mem.rm_start = 0;
  457     acpi_rman_mem.rm_end = ~0ul;
  458     acpi_rman_mem.rm_descr = "ACPI I/O memory addresses";
  459     if (rman_init(&acpi_rman_mem) != 0)
  460         panic("acpi rman_init memory failed");
  461 
  462     /* Initialise the ACPI mutex */
  463     mtx_init(&acpi_mutex, "ACPI global lock", NULL, MTX_DEF);
  464 
  465     /*
  466      * Set the globals from our tunables.  This is needed because ACPI-CA
  467      * uses UINT8 for some values and we have no tunable_byte.
  468      */
  469     AcpiGbl_AllMethodsSerialized = acpi_serialize_methods ? TRUE : FALSE;
  470     AcpiGbl_EnableInterpreterSlack = acpi_interpreter_slack ? TRUE : FALSE;
  471     AcpiGbl_EnableAmlDebugObject = acpi_debug_objects ? TRUE : FALSE;
  472 
  473 #ifndef ACPI_DEBUG
  474     /*
  475      * Disable all debugging layers and levels.
  476      */
  477     AcpiDbgLayer = 0;
  478     AcpiDbgLevel = 0;
  479 #endif
  480 
  481     /* Start up the ACPI CA subsystem. */
  482     status = AcpiInitializeSubsystem();
  483     if (ACPI_FAILURE(status)) {
  484         device_printf(dev, "Could not initialize Subsystem: %s\n",
  485                       AcpiFormatException(status));
  486         goto out;
  487     }
  488 
  489     /* Load ACPI name space. */
  490     status = AcpiLoadTables();
  491     if (ACPI_FAILURE(status)) {
  492         device_printf(dev, "Could not load Namespace: %s\n",
  493                       AcpiFormatException(status));
  494         goto out;
  495     }
  496 
  497 #if defined(__i386__) || defined(__amd64__)
  498     /* Handle MCFG table if present. */
  499     acpi_enable_pcie();
  500 #endif
  501 
  502     /* Install the default address space handlers. */
  503     status = AcpiInstallAddressSpaceHandler(ACPI_ROOT_OBJECT,
  504                 ACPI_ADR_SPACE_SYSTEM_MEMORY, ACPI_DEFAULT_HANDLER, NULL, NULL);
  505     if (ACPI_FAILURE(status)) {
  506         device_printf(dev, "Could not initialise SystemMemory handler: %s\n",
  507                       AcpiFormatException(status));
  508         goto out;
  509     }
  510     status = AcpiInstallAddressSpaceHandler(ACPI_ROOT_OBJECT,
  511                 ACPI_ADR_SPACE_SYSTEM_IO, ACPI_DEFAULT_HANDLER, NULL, NULL);
  512     if (ACPI_FAILURE(status)) {
  513         device_printf(dev, "Could not initialise SystemIO handler: %s\n",
  514                       AcpiFormatException(status));
  515         goto out;
  516     }
  517     status = AcpiInstallAddressSpaceHandler(ACPI_ROOT_OBJECT,
  518                 ACPI_ADR_SPACE_PCI_CONFIG, ACPI_DEFAULT_HANDLER, NULL, NULL);
  519     if (ACPI_FAILURE(status)) {
  520         device_printf(dev, "could not initialise PciConfig handler: %s\n",
  521                       AcpiFormatException(status));
  522         goto out;
  523     }
  524 
  525     /*
  526      * Note that some systems (specifically, those with namespace evaluation
  527      * issues that require the avoidance of parts of the namespace) must
  528      * avoid running _INI and _STA on everything, as well as dodging the final
  529      * object init pass.
  530      *
  531      * For these devices, we set ACPI_NO_DEVICE_INIT and ACPI_NO_OBJECT_INIT).
  532      *
  533      * XXX We should arrange for the object init pass after we have attached
  534      *     all our child devices, but on many systems it works here.
  535      */
  536     flags = 0;
  537     if (testenv("debug.acpi.avoid"))
  538         flags = ACPI_NO_DEVICE_INIT | ACPI_NO_OBJECT_INIT;
  539 
  540     /* Bring the hardware and basic handlers online. */
  541     if (ACPI_FAILURE(status = AcpiEnableSubsystem(flags))) {
  542         device_printf(dev, "Could not enable ACPI: %s\n",
  543                       AcpiFormatException(status));
  544         goto out;
  545     }
  546 
  547     /*
  548      * Call the ECDT probe function to provide EC functionality before
  549      * the namespace has been evaluated.
  550      *
  551      * XXX This happens before the sysresource devices have been probed and
  552      * attached so its resources come from nexus0.  In practice, this isn't
  553      * a problem but should be addressed eventually.
  554      */
  555     acpi_ec_ecdt_probe(dev);
  556 
  557     /* Bring device objects and regions online. */
  558     if (ACPI_FAILURE(status = AcpiInitializeObjects(flags))) {
  559         device_printf(dev, "Could not initialize ACPI objects: %s\n",
  560                       AcpiFormatException(status));
  561         goto out;
  562     }
  563 
  564     /*
  565      * Setup our sysctl tree.
  566      *
  567      * XXX: This doesn't check to make sure that none of these fail.
  568      */
  569     sysctl_ctx_init(&sc->acpi_sysctl_ctx);
  570     sc->acpi_sysctl_tree = SYSCTL_ADD_NODE(&sc->acpi_sysctl_ctx,
  571                                SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO,
  572                                device_get_name(dev), CTLFLAG_RD, 0, "");
  573     SYSCTL_ADD_PROC(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree),
  574         OID_AUTO, "supported_sleep_state", CTLTYPE_STRING | CTLFLAG_RD,
  575         0, 0, acpi_supported_sleep_state_sysctl, "A", "");
  576     SYSCTL_ADD_PROC(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree),
  577         OID_AUTO, "power_button_state", CTLTYPE_STRING | CTLFLAG_RW,
  578         &sc->acpi_power_button_sx, 0, acpi_sleep_state_sysctl, "A", "");
  579     SYSCTL_ADD_PROC(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree),
  580         OID_AUTO, "sleep_button_state", CTLTYPE_STRING | CTLFLAG_RW,
  581         &sc->acpi_sleep_button_sx, 0, acpi_sleep_state_sysctl, "A", "");
  582     SYSCTL_ADD_PROC(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree),
  583         OID_AUTO, "lid_switch_state", CTLTYPE_STRING | CTLFLAG_RW,
  584         &sc->acpi_lid_switch_sx, 0, acpi_sleep_state_sysctl, "A", "");
  585     SYSCTL_ADD_PROC(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree),
  586         OID_AUTO, "standby_state", CTLTYPE_STRING | CTLFLAG_RW,
  587         &sc->acpi_standby_sx, 0, acpi_sleep_state_sysctl, "A", "");
  588     SYSCTL_ADD_PROC(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree),
  589         OID_AUTO, "suspend_state", CTLTYPE_STRING | CTLFLAG_RW,
  590         &sc->acpi_suspend_sx, 0, acpi_sleep_state_sysctl, "A", "");
  591     SYSCTL_ADD_INT(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree),
  592         OID_AUTO, "sleep_delay", CTLFLAG_RW, &sc->acpi_sleep_delay, 0,
  593         "sleep delay");
  594     SYSCTL_ADD_INT(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree),
  595         OID_AUTO, "s4bios", CTLFLAG_RW, &sc->acpi_s4bios, 0, "S4BIOS mode");
  596     SYSCTL_ADD_INT(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree),
  597         OID_AUTO, "verbose", CTLFLAG_RW, &sc->acpi_verbose, 0, "verbose mode");
  598     SYSCTL_ADD_INT(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree),
  599         OID_AUTO, "disable_on_reboot", CTLFLAG_RW,
  600         &sc->acpi_do_disable, 0, "Disable ACPI when rebooting/halting system");
  601     SYSCTL_ADD_INT(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree),
  602         OID_AUTO, "handle_reboot", CTLFLAG_RW,
  603         &sc->acpi_handle_reboot, 0, "Use ACPI Reset Register to reboot");
  604 
  605     /*
  606      * Default to 1 second before sleeping to give some machines time to
  607      * stabilize.
  608      */
  609     sc->acpi_sleep_delay = 1;
  610     if (bootverbose)
  611         sc->acpi_verbose = 1;
  612     if ((env = getenv("hw.acpi.verbose")) != NULL) {
  613         if (strcmp(env, "") != 0)
  614             sc->acpi_verbose = 1;
  615         freeenv(env);
  616     }
  617 
  618     /* Only enable S4BIOS by default if the FACS says it is available. */
  619     if (AcpiGbl_FACS->Flags & ACPI_FACS_S4_BIOS_PRESENT)
  620         sc->acpi_s4bios = 1;
  621 
  622     /* Probe all supported sleep states. */
  623     acpi_sleep_states[ACPI_STATE_S0] = TRUE;
  624     for (state = ACPI_STATE_S1; state < ACPI_S_STATE_COUNT; state++)
  625         if (ACPI_SUCCESS(AcpiGetSleepTypeData(state, &TypeA, &TypeB)))
  626             acpi_sleep_states[state] = TRUE;
  627 
  628     /*
  629      * Dispatch the default sleep state to devices.  The lid switch is set
  630      * to UNKNOWN by default to avoid surprising users.
  631      */
  632     sc->acpi_power_button_sx = acpi_sleep_states[ACPI_STATE_S5] ?
  633         ACPI_STATE_S5 : ACPI_STATE_UNKNOWN;
  634     sc->acpi_lid_switch_sx = ACPI_STATE_UNKNOWN;
  635     sc->acpi_standby_sx = acpi_sleep_states[ACPI_STATE_S1] ?
  636         ACPI_STATE_S1 : ACPI_STATE_UNKNOWN;
  637     sc->acpi_suspend_sx = acpi_sleep_states[ACPI_STATE_S3] ?
  638         ACPI_STATE_S3 : ACPI_STATE_UNKNOWN;
  639 
  640     /* Pick the first valid sleep state for the sleep button default. */
  641     sc->acpi_sleep_button_sx = ACPI_STATE_UNKNOWN;
  642     for (state = ACPI_STATE_S1; state <= ACPI_STATE_S4; state++)
  643         if (acpi_sleep_states[state]) {
  644             sc->acpi_sleep_button_sx = state;
  645             break;
  646         }
  647 
  648     acpi_enable_fixed_events(sc);
  649 
  650     /*
  651      * Scan the namespace and attach/initialise children.
  652      */
  653 
  654     /* Register our shutdown handler. */
  655     EVENTHANDLER_REGISTER(shutdown_final, acpi_shutdown_final, sc,
  656         SHUTDOWN_PRI_LAST);
  657 
  658     /*
  659      * Register our acpi event handlers.
  660      * XXX should be configurable eg. via userland policy manager.
  661      */
  662     EVENTHANDLER_REGISTER(acpi_sleep_event, acpi_system_eventhandler_sleep,
  663         sc, ACPI_EVENT_PRI_LAST);
  664     EVENTHANDLER_REGISTER(acpi_wakeup_event, acpi_system_eventhandler_wakeup,
  665         sc, ACPI_EVENT_PRI_LAST);
  666 
  667     /* Flag our initial states. */
  668     sc->acpi_enabled = TRUE;
  669     sc->acpi_sstate = ACPI_STATE_S0;
  670     sc->acpi_sleep_disabled = TRUE;
  671 
  672     /* Create the control device */
  673     sc->acpi_dev_t = make_dev(&acpi_cdevsw, 0, UID_ROOT, GID_WHEEL, 0644,
  674                               "acpi");
  675     sc->acpi_dev_t->si_drv1 = sc;
  676 
  677     if ((error = acpi_machdep_init(dev)))
  678         goto out;
  679 
  680     /* Register ACPI again to pass the correct argument of pm_func. */
  681     power_pm_register(POWER_PM_TYPE_ACPI, acpi_pm_func, sc);
  682 
  683     if (!acpi_disabled("bus"))
  684         acpi_probe_children(dev);
  685 
  686     /* Allow sleep request after a while. */
  687     timeout(acpi_sleep_enable, sc, hz * ACPI_MINIMUM_AWAKETIME);
  688 
  689     error = 0;
  690 
  691  out:
  692     return_VALUE (error);
  693 }
  694 
  695 static int
  696 acpi_suspend(device_t dev)
  697 {
  698     device_t child, *devlist;
  699     int error, i, numdevs, pstate;
  700 
  701     GIANT_REQUIRED;
  702 
  703     /* First give child devices a chance to suspend. */
  704     error = bus_generic_suspend(dev);
  705     if (error)
  706         return (error);
  707 
  708     /*
  709      * Now, set them into the appropriate power state, usually D3.  If the
  710      * device has an _SxD method for the next sleep state, use that power
  711      * state instead.
  712      */
  713     error = device_get_children(dev, &devlist, &numdevs);
  714     if (error)
  715         return (error);
  716     for (i = 0; i < numdevs; i++) {
  717         /* If the device is not attached, we've powered it down elsewhere. */
  718         child = devlist[i];
  719         if (!device_is_attached(child))
  720             continue;
  721 
  722         /*
  723          * Default to D3 for all sleep states.  The _SxD method is optional
  724          * so set the powerstate even if it's absent.
  725          */
  726         pstate = PCI_POWERSTATE_D3;
  727         error = acpi_device_pwr_for_sleep(device_get_parent(child),
  728             child, &pstate);
  729         if ((error == 0 || error == ESRCH) && acpi_do_powerstate)
  730             pci_set_powerstate(child, pstate);
  731     }
  732     free(devlist, M_TEMP);
  733     error = 0;
  734 
  735     return (error);
  736 }
  737 
  738 static int
  739 acpi_resume(device_t dev)
  740 {
  741     ACPI_HANDLE handle;
  742     int i, numdevs, error;
  743     device_t child, *devlist;
  744 
  745     GIANT_REQUIRED;
  746 
  747     /*
  748      * Put all devices in D0 before resuming them.  Call _S0D on each one
  749      * since some systems expect this.
  750      */
  751     error = device_get_children(dev, &devlist, &numdevs);
  752     if (error)
  753         return (error);
  754     for (i = 0; i < numdevs; i++) {
  755         child = devlist[i];
  756         handle = acpi_get_handle(child);
  757         if (handle)
  758             AcpiEvaluateObject(handle, "_S0D", NULL, NULL);
  759         if (device_is_attached(child) && acpi_do_powerstate)
  760             pci_set_powerstate(child, PCI_POWERSTATE_D0);
  761     }
  762     free(devlist, M_TEMP);
  763 
  764     return (bus_generic_resume(dev));
  765 }
  766 
  767 static int
  768 acpi_shutdown(device_t dev)
  769 {
  770 
  771     GIANT_REQUIRED;
  772 
  773     /* Allow children to shutdown first. */
  774     bus_generic_shutdown(dev);
  775 
  776     /*
  777      * Enable any GPEs that are able to power-on the system (i.e., RTC).
  778      * Also, disable any that are not valid for this state (most).
  779      */
  780     acpi_wake_prep_walk(ACPI_STATE_S5);
  781 
  782     return (0);
  783 }
  784 
  785 /*
  786  * Handle a new device being added
  787  */
  788 static device_t
  789 acpi_add_child(device_t bus, int order, const char *name, int unit)
  790 {
  791     struct acpi_device  *ad;
  792     device_t            child;
  793 
  794     if ((ad = malloc(sizeof(*ad), M_ACPIDEV, M_NOWAIT | M_ZERO)) == NULL)
  795         return (NULL);
  796 
  797     resource_list_init(&ad->ad_rl);
  798 
  799     child = device_add_child_ordered(bus, order, name, unit);
  800     if (child != NULL)
  801         device_set_ivars(child, ad);
  802     else
  803         free(ad, M_ACPIDEV);
  804     return (child);
  805 }
  806 
  807 static int
  808 acpi_print_child(device_t bus, device_t child)
  809 {
  810     struct acpi_device   *adev = device_get_ivars(child);
  811     struct resource_list *rl = &adev->ad_rl;
  812     int retval = 0;
  813 
  814     retval += bus_print_child_header(bus, child);
  815     retval += resource_list_print_type(rl, "port",  SYS_RES_IOPORT, "%#lx");
  816     retval += resource_list_print_type(rl, "iomem", SYS_RES_MEMORY, "%#lx");
  817     retval += resource_list_print_type(rl, "irq",   SYS_RES_IRQ,    "%ld");
  818     retval += resource_list_print_type(rl, "drq",   SYS_RES_DRQ,    "%ld");
  819     if (device_get_flags(child))
  820         retval += printf(" flags %#x", device_get_flags(child));
  821     retval += bus_print_child_footer(bus, child);
  822 
  823     return (retval);
  824 }
  825 
  826 /*
  827  * If this device is an ACPI child but no one claimed it, attempt
  828  * to power it off.  We'll power it back up when a driver is added.
  829  *
  830  * XXX Disabled for now since many necessary devices (like fdc and
  831  * ATA) don't claim the devices we created for them but still expect
  832  * them to be powered up.
  833  */
  834 static void
  835 acpi_probe_nomatch(device_t bus, device_t child)
  836 {
  837 #ifdef ACPI_ENABLE_POWERDOWN_NODRIVER
  838     pci_set_powerstate(child, PCI_POWERSTATE_D3);
  839 #endif
  840 }
  841 
  842 /*
  843  * If a new driver has a chance to probe a child, first power it up.
  844  *
  845  * XXX Disabled for now (see acpi_probe_nomatch for details).
  846  */
  847 static void
  848 acpi_driver_added(device_t dev, driver_t *driver)
  849 {
  850     device_t child, *devlist;
  851     int i, numdevs;
  852 
  853     DEVICE_IDENTIFY(driver, dev);
  854     if (device_get_children(dev, &devlist, &numdevs))
  855             return;
  856     for (i = 0; i < numdevs; i++) {
  857         child = devlist[i];
  858         if (device_get_state(child) == DS_NOTPRESENT) {
  859 #ifdef ACPI_ENABLE_POWERDOWN_NODRIVER
  860             pci_set_powerstate(child, PCI_POWERSTATE_D0);
  861             if (device_probe_and_attach(child) != 0)
  862                 pci_set_powerstate(child, PCI_POWERSTATE_D3);
  863 #else
  864             device_probe_and_attach(child);
  865 #endif
  866         }
  867     }
  868     free(devlist, M_TEMP);
  869 }
  870 
  871 /* Location hint for devctl(8) */
  872 static int
  873 acpi_child_location_str_method(device_t cbdev, device_t child, char *buf,
  874     size_t buflen)
  875 {
  876     struct acpi_device *dinfo = device_get_ivars(child);
  877 
  878     if (dinfo->ad_handle)
  879         snprintf(buf, buflen, "handle=%s", acpi_name(dinfo->ad_handle));
  880     else
  881         snprintf(buf, buflen, "unknown");
  882     return (0);
  883 }
  884 
  885 /* PnP information for devctl(8) */
  886 static int
  887 acpi_child_pnpinfo_str_method(device_t cbdev, device_t child, char *buf,
  888     size_t buflen)
  889 {
  890     struct acpi_device *dinfo = device_get_ivars(child);
  891     ACPI_DEVICE_INFO *adinfo;
  892 
  893     if (ACPI_FAILURE(AcpiGetObjectInfo(dinfo->ad_handle, &adinfo))) {
  894         snprintf(buf, buflen, "unknown");
  895         return (0);
  896     }
  897 
  898     snprintf(buf, buflen, "_HID=%s _UID=%lu",
  899         (adinfo->Valid & ACPI_VALID_HID) ?
  900         adinfo->HardwareId.String : "none",
  901         (adinfo->Valid & ACPI_VALID_UID) ?
  902         strtoul(adinfo->UniqueId.String, NULL, 10) : 0UL);
  903     AcpiOsFree(adinfo);
  904 
  905     return (0);
  906 }
  907 
  908 /*
  909  * Handle per-device ivars
  910  */
  911 static int
  912 acpi_read_ivar(device_t dev, device_t child, int index, uintptr_t *result)
  913 {
  914     struct acpi_device  *ad;
  915 
  916     if ((ad = device_get_ivars(child)) == NULL) {
  917         device_printf(child, "device has no ivars\n");
  918         return (ENOENT);
  919     }
  920 
  921     /* ACPI and ISA compatibility ivars */
  922     switch(index) {
  923     case ACPI_IVAR_HANDLE:
  924         *(ACPI_HANDLE *)result = ad->ad_handle;
  925         break;
  926     case ACPI_IVAR_MAGIC:
  927         *(uintptr_t *)result = ad->ad_magic;
  928         break;
  929     case ACPI_IVAR_PRIVATE:
  930         *(void **)result = ad->ad_private;
  931         break;
  932     case ACPI_IVAR_FLAGS:
  933         *(int *)result = ad->ad_flags;
  934         break;
  935     case ISA_IVAR_VENDORID:
  936     case ISA_IVAR_SERIAL:
  937     case ISA_IVAR_COMPATID:
  938         *(int *)result = -1;
  939         break;
  940     case ISA_IVAR_LOGICALID:
  941         *(int *)result = acpi_isa_get_logicalid(child);
  942         break;
  943     default:
  944         return (ENOENT);
  945     }
  946 
  947     return (0);
  948 }
  949 
  950 static int
  951 acpi_write_ivar(device_t dev, device_t child, int index, uintptr_t value)
  952 {
  953     struct acpi_device  *ad;
  954 
  955     if ((ad = device_get_ivars(child)) == NULL) {
  956         device_printf(child, "device has no ivars\n");
  957         return (ENOENT);
  958     }
  959 
  960     switch(index) {
  961     case ACPI_IVAR_HANDLE:
  962         ad->ad_handle = (ACPI_HANDLE)value;
  963         break;
  964     case ACPI_IVAR_MAGIC:
  965         ad->ad_magic = (uintptr_t)value;
  966         break;
  967     case ACPI_IVAR_PRIVATE:
  968         ad->ad_private = (void *)value;
  969         break;
  970     case ACPI_IVAR_FLAGS:
  971         ad->ad_flags = (int)value;
  972         break;
  973     default:
  974         panic("bad ivar write request (%d)", index);
  975         return (ENOENT);
  976     }
  977 
  978     return (0);
  979 }
  980 
  981 /*
  982  * Handle child resource allocation/removal
  983  */
  984 static struct resource_list *
  985 acpi_get_rlist(device_t dev, device_t child)
  986 {
  987     struct acpi_device          *ad;
  988 
  989     ad = device_get_ivars(child);
  990     return (&ad->ad_rl);
  991 }
  992 
  993 static int
  994 acpi_match_resource_hint(device_t dev, int type, long value)
  995 {
  996     struct acpi_device *ad = device_get_ivars(dev);
  997     struct resource_list *rl = &ad->ad_rl;
  998     struct resource_list_entry *rle;
  999 
 1000     STAILQ_FOREACH(rle, rl, link) {
 1001         if (rle->type != type)
 1002             continue;
 1003         if (rle->start <= value && rle->end >= value)
 1004             return (1);
 1005     }
 1006     return (0);
 1007 }
 1008 
 1009 /*
 1010  * Wire device unit numbers based on resource matches in hints.
 1011  */
 1012 static void
 1013 acpi_hint_device_unit(device_t acdev, device_t child, const char *name,
 1014     int *unitp)
 1015 {
 1016     const char *s;
 1017     long value;
 1018     int line, matches, unit;
 1019 
 1020     /*
 1021      * Iterate over all the hints for the devices with the specified
 1022      * name to see if one's resources are a subset of this device.
 1023      */
 1024     line = 0;
 1025     for (;;) {
 1026         if (resource_find_dev(&line, name, &unit, "at", NULL) != 0)
 1027             break;
 1028 
 1029         /* Must have an "at" for acpi or isa. */
 1030         resource_string_value(name, unit, "at", &s);
 1031         if (!(strcmp(s, "acpi0") == 0 || strcmp(s, "acpi") == 0 ||
 1032             strcmp(s, "isa0") == 0 || strcmp(s, "isa") == 0))
 1033             continue;
 1034 
 1035         /*
 1036          * Check for matching resources.  We must have at least one match.
 1037          * Since I/O and memory resources cannot be shared, if we get a
 1038          * match on either of those, ignore any mismatches in IRQs or DRQs.
 1039          *
 1040          * XXX: We may want to revisit this to be more lenient and wire
 1041          * as long as it gets one match.
 1042          */
 1043         matches = 0;
 1044         if (resource_long_value(name, unit, "port", &value) == 0) {
 1045             /*
 1046              * Floppy drive controllers are notorious for having a
 1047              * wide variety of resources not all of which include the
 1048              * first port that is specified by the hint (typically
 1049              * 0x3f0) (see the comment above fdc_isa_alloc_resources()
 1050              * in fdc_isa.c).  However, they do all seem to include
 1051              * port + 2 (e.g. 0x3f2) so for a floppy device, look for
 1052              * 'value + 2' in the port resources instead of the hint
 1053              * value.
 1054              */
 1055             if (strcmp(name, "fdc") == 0)
 1056                 value += 2;
 1057             if (acpi_match_resource_hint(child, SYS_RES_IOPORT, value))
 1058                 matches++;
 1059             else
 1060                 continue;
 1061         }
 1062         if (resource_long_value(name, unit, "maddr", &value) == 0) {
 1063             if (acpi_match_resource_hint(child, SYS_RES_MEMORY, value))
 1064                 matches++;
 1065             else
 1066                 continue;
 1067         }
 1068         if (matches > 0)
 1069             goto matched;
 1070         if (resource_long_value(name, unit, "irq", &value) == 0) {
 1071             if (acpi_match_resource_hint(child, SYS_RES_IRQ, value))
 1072                 matches++;
 1073             else
 1074                 continue;
 1075         }
 1076         if (resource_long_value(name, unit, "drq", &value) == 0) {
 1077             if (acpi_match_resource_hint(child, SYS_RES_DRQ, value))
 1078                 matches++;
 1079             else
 1080                 continue;
 1081         }
 1082 
 1083     matched:
 1084         if (matches > 0) {
 1085             /* We have a winner! */
 1086             *unitp = unit;
 1087             break;
 1088         }
 1089     }
 1090 }
 1091 
 1092 /*
 1093  * Pre-allocate/manage all memory and IO resources.  Since rman can't handle
 1094  * duplicates, we merge any in the sysresource attach routine.
 1095  */
 1096 static int
 1097 acpi_sysres_alloc(device_t dev)
 1098 {
 1099     struct resource *res;
 1100     struct resource_list *rl;
 1101     struct resource_list_entry *rle;
 1102     struct rman *rm;
 1103     char *sysres_ids[] = { "PNP0C01", "PNP0C02", NULL };
 1104     device_t *children;
 1105     int child_count, i;
 1106 
 1107     /*
 1108      * Probe/attach any sysresource devices.  This would be unnecessary if we
 1109      * had multi-pass probe/attach.
 1110      */
 1111     if (device_get_children(dev, &children, &child_count) != 0)
 1112         return (ENXIO);
 1113     for (i = 0; i < child_count; i++) {
 1114         if (ACPI_ID_PROBE(dev, children[i], sysres_ids) != NULL)
 1115             device_probe_and_attach(children[i]);
 1116     }
 1117     free(children, M_TEMP);
 1118 
 1119     rl = BUS_GET_RESOURCE_LIST(device_get_parent(dev), dev);
 1120     STAILQ_FOREACH(rle, rl, link) {
 1121         if (rle->res != NULL) {
 1122             device_printf(dev, "duplicate resource for %lx\n", rle->start);
 1123             continue;
 1124         }
 1125 
 1126         /* Only memory and IO resources are valid here. */
 1127         switch (rle->type) {
 1128         case SYS_RES_IOPORT:
 1129             rm = &acpi_rman_io;
 1130             break;
 1131         case SYS_RES_MEMORY:
 1132             rm = &acpi_rman_mem;
 1133             break;
 1134         default:
 1135             continue;
 1136         }
 1137 
 1138         /* Pre-allocate resource and add to our rman pool. */
 1139         res = BUS_ALLOC_RESOURCE(device_get_parent(dev), dev, rle->type,
 1140             &rle->rid, rle->start, rle->start + rle->count - 1, rle->count, 0);
 1141         if (res != NULL) {
 1142             rman_manage_region(rm, rman_get_start(res), rman_get_end(res));
 1143             rle->res = res;
 1144         } else
 1145             device_printf(dev, "reservation of %lx, %lx (%d) failed\n",
 1146                 rle->start, rle->count, rle->type);
 1147     }
 1148     return (0);
 1149 }
 1150 
 1151 static struct resource *
 1152 acpi_alloc_resource(device_t bus, device_t child, int type, int *rid,
 1153     u_long start, u_long end, u_long count, u_int flags)
 1154 {
 1155     ACPI_RESOURCE ares;
 1156     struct acpi_device *ad = device_get_ivars(child);
 1157     struct resource_list *rl = &ad->ad_rl;
 1158     struct resource_list_entry *rle;
 1159     struct resource *res;
 1160     struct rman *rm;
 1161 
 1162     res = NULL;
 1163 
 1164     /* We only handle memory and IO resources through rman. */
 1165     switch (type) {
 1166     case SYS_RES_IOPORT:
 1167         rm = &acpi_rman_io;
 1168         break;
 1169     case SYS_RES_MEMORY:
 1170         rm = &acpi_rman_mem;
 1171         break;
 1172     default:
 1173         rm = NULL;
 1174     }
 1175             
 1176     ACPI_SERIAL_BEGIN(acpi);
 1177 
 1178     /*
 1179      * If this is an allocation of the "default" range for a given RID, and
 1180      * we know what the resources for this device are (i.e., they're on the
 1181      * child's resource list), use those start/end values.
 1182      */
 1183     if (bus == device_get_parent(child) && start == 0UL && end == ~0UL) {
 1184         rle = resource_list_find(rl, type, *rid);
 1185         if (rle == NULL)
 1186             goto out;
 1187         start = rle->start;
 1188         end = rle->end;
 1189         count = rle->count;
 1190     }
 1191 
 1192     /*
 1193      * If this is an allocation of a specific range, see if we can satisfy
 1194      * the request from our system resource regions.  If we can't, pass the
 1195      * request up to the parent.
 1196      */
 1197     if (start + count - 1 == end && rm != NULL)
 1198         res = rman_reserve_resource(rm, start, end, count, flags & ~RF_ACTIVE,
 1199             child);
 1200     if (res == NULL) {
 1201         res = BUS_ALLOC_RESOURCE(device_get_parent(bus), child, type, rid,
 1202             start, end, count, flags);
 1203     } else {
 1204         rman_set_rid(res, *rid);
 1205 
 1206         /* If requested, activate the resource using the parent's method. */
 1207         if (flags & RF_ACTIVE)
 1208             if (bus_activate_resource(child, type, *rid, res) != 0) {
 1209                 rman_release_resource(res);
 1210                 res = NULL;
 1211                 goto out;
 1212             }
 1213     }
 1214 
 1215     if (res != NULL && device_get_parent(child) == bus)
 1216         switch (type) {
 1217         case SYS_RES_IRQ:
 1218             /*
 1219              * Since bus_config_intr() takes immediate effect, we cannot
 1220              * configure the interrupt associated with a device when we
 1221              * parse the resources but have to defer it until a driver
 1222              * actually allocates the interrupt via bus_alloc_resource().
 1223              *
 1224              * XXX: Should we handle the lookup failing?
 1225              */
 1226             if (ACPI_SUCCESS(acpi_lookup_irq_resource(child, *rid, res, &ares)))
 1227                 acpi_config_intr(child, &ares);
 1228             break;
 1229         }
 1230 
 1231 out:
 1232     ACPI_SERIAL_END(acpi);
 1233     return (res);
 1234 }
 1235 
 1236 static int
 1237 acpi_release_resource(device_t bus, device_t child, int type, int rid,
 1238     struct resource *r)
 1239 {
 1240     struct rman *rm;
 1241     int ret;
 1242 
 1243     /* We only handle memory and IO resources through rman. */
 1244     switch (type) {
 1245     case SYS_RES_IOPORT:
 1246         rm = &acpi_rman_io;
 1247         break;
 1248     case SYS_RES_MEMORY:
 1249         rm = &acpi_rman_mem;
 1250         break;
 1251     default:
 1252         rm = NULL;
 1253     }
 1254 
 1255     ACPI_SERIAL_BEGIN(acpi);
 1256 
 1257     /*
 1258      * If this resource belongs to one of our internal managers,
 1259      * deactivate it and release it to the local pool.  If it doesn't,
 1260      * pass this request up to the parent.
 1261      */
 1262     if (rm != NULL && rman_is_region_manager(r, rm)) {
 1263         if (rman_get_flags(r) & RF_ACTIVE) {
 1264             ret = bus_deactivate_resource(child, type, rid, r);
 1265             if (ret != 0)
 1266                 goto out;
 1267         }
 1268         ret = rman_release_resource(r);
 1269     } else
 1270         ret = BUS_RELEASE_RESOURCE(device_get_parent(bus), child, type, rid, r);
 1271 
 1272 out:
 1273     ACPI_SERIAL_END(acpi);
 1274     return (ret);
 1275 }
 1276 
 1277 static void
 1278 acpi_delete_resource(device_t bus, device_t child, int type, int rid)
 1279 {
 1280     struct resource_list *rl;
 1281 
 1282     rl = acpi_get_rlist(bus, child);
 1283     resource_list_delete(rl, type, rid);
 1284 }
 1285 
 1286 /* Allocate an IO port or memory resource, given its GAS. */
 1287 int
 1288 acpi_bus_alloc_gas(device_t dev, int *type, int *rid, ACPI_GENERIC_ADDRESS *gas,
 1289     struct resource **res, u_int flags)
 1290 {
 1291     int error, res_type;
 1292 
 1293     error = ENOMEM;
 1294     if (type == NULL || rid == NULL || gas == NULL || res == NULL)
 1295         return (EINVAL);
 1296 
 1297     /* We only support memory and IO spaces. */
 1298     switch (gas->SpaceId) {
 1299     case ACPI_ADR_SPACE_SYSTEM_MEMORY:
 1300         res_type = SYS_RES_MEMORY;
 1301         break;
 1302     case ACPI_ADR_SPACE_SYSTEM_IO:
 1303         res_type = SYS_RES_IOPORT;
 1304         break;
 1305     default:
 1306         return (EOPNOTSUPP);
 1307     }
 1308 
 1309     /*
 1310      * If the register width is less than 8, assume the BIOS author means
 1311      * it is a bit field and just allocate a byte.
 1312      */
 1313     if (gas->BitWidth && gas->BitWidth < 8)
 1314         gas->BitWidth = 8;
 1315 
 1316     /* Validate the address after we're sure we support the space. */
 1317     if (gas->Address == 0 || gas->BitWidth == 0)
 1318         return (EINVAL);
 1319 
 1320     bus_set_resource(dev, res_type, *rid, gas->Address,
 1321         gas->BitWidth / 8);
 1322     *res = bus_alloc_resource_any(dev, res_type, rid, RF_ACTIVE | flags);
 1323     if (*res != NULL) {
 1324         *type = res_type;
 1325         error = 0;
 1326     } else
 1327         bus_delete_resource(dev, res_type, *rid);
 1328 
 1329     return (error);
 1330 }
 1331 
 1332 /* Probe _HID and _CID for compatible ISA PNP ids. */
 1333 static uint32_t
 1334 acpi_isa_get_logicalid(device_t dev)
 1335 {
 1336     ACPI_DEVICE_INFO    *devinfo;
 1337     ACPI_HANDLE         h;
 1338     uint32_t            pnpid;
 1339 
 1340     ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
 1341 
 1342     /* Fetch and validate the HID. */
 1343     if ((h = acpi_get_handle(dev)) == NULL ||
 1344         ACPI_FAILURE(AcpiGetObjectInfo(h, &devinfo)))
 1345         return_VALUE (0);
 1346 
 1347     pnpid = (devinfo->Valid & ACPI_VALID_HID) != 0 &&
 1348         devinfo->HardwareId.Length >= ACPI_EISAID_STRING_SIZE ?
 1349         PNP_EISAID(devinfo->HardwareId.String) : 0;
 1350     AcpiOsFree(devinfo);
 1351 
 1352     return_VALUE (pnpid);
 1353 }
 1354 
 1355 static int
 1356 acpi_isa_get_compatid(device_t dev, uint32_t *cids, int count)
 1357 {
 1358     ACPI_DEVICE_INFO    *devinfo;
 1359     ACPI_DEVICE_ID      *ids;
 1360     ACPI_HANDLE         h;
 1361     uint32_t            *pnpid;
 1362     int                 i, valid;
 1363 
 1364     ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
 1365 
 1366     pnpid = cids;
 1367 
 1368     /* Fetch and validate the CID */
 1369     if ((h = acpi_get_handle(dev)) == NULL ||
 1370         ACPI_FAILURE(AcpiGetObjectInfo(h, &devinfo)))
 1371         return_VALUE (0);
 1372 
 1373     if ((devinfo->Valid & ACPI_VALID_CID) == 0) {
 1374         AcpiOsFree(devinfo);
 1375         return_VALUE (0);
 1376     }
 1377 
 1378     if (devinfo->CompatibleIdList.Count < count)
 1379         count = devinfo->CompatibleIdList.Count;
 1380     ids = devinfo->CompatibleIdList.Ids;
 1381     for (i = 0, valid = 0; i < count; i++)
 1382         if (ids[i].Length >= ACPI_EISAID_STRING_SIZE &&
 1383             strncmp(ids[i].String, "PNP", 3) == 0) {
 1384             *pnpid++ = PNP_EISAID(ids[i].String);
 1385             valid++;
 1386         }
 1387     AcpiOsFree(devinfo);
 1388 
 1389     return_VALUE (valid);
 1390 }
 1391 
 1392 static char *
 1393 acpi_device_id_probe(device_t bus, device_t dev, char **ids) 
 1394 {
 1395     ACPI_HANDLE h;
 1396     ACPI_OBJECT_TYPE t;
 1397     int i;
 1398 
 1399     h = acpi_get_handle(dev);
 1400     if (ids == NULL || h == NULL)
 1401         return (NULL);
 1402     t = acpi_get_type(dev);
 1403     if (t != ACPI_TYPE_DEVICE && t != ACPI_TYPE_PROCESSOR)
 1404         return (NULL);
 1405 
 1406     /* Try to match one of the array of IDs with a HID or CID. */
 1407     for (i = 0; ids[i] != NULL; i++) {
 1408         if (acpi_MatchHid(h, ids[i]))
 1409             return (ids[i]);
 1410     }
 1411     return (NULL);
 1412 }
 1413 
 1414 static ACPI_STATUS
 1415 acpi_device_eval_obj(device_t bus, device_t dev, ACPI_STRING pathname,
 1416     ACPI_OBJECT_LIST *parameters, ACPI_BUFFER *ret)
 1417 {
 1418     ACPI_HANDLE h;
 1419 
 1420     if (dev == NULL)
 1421         h = ACPI_ROOT_OBJECT;
 1422     else if ((h = acpi_get_handle(dev)) == NULL)
 1423         return (AE_BAD_PARAMETER);
 1424     return (AcpiEvaluateObject(h, pathname, parameters, ret));
 1425 }
 1426 
 1427 static int
 1428 acpi_device_pwr_for_sleep(device_t bus, device_t dev, int *dstate)
 1429 {
 1430     struct acpi_softc *sc;
 1431     ACPI_HANDLE handle;
 1432     ACPI_STATUS status;
 1433     char sxd[8];
 1434     int error;
 1435 
 1436     sc = device_get_softc(bus);
 1437     handle = acpi_get_handle(dev);
 1438 
 1439     /*
 1440      * XXX If we find these devices, don't try to power them down.
 1441      * The serial and IRDA ports on my T23 hang the system when
 1442      * set to D3 and it appears that such legacy devices may
 1443      * need special handling in their drivers.
 1444      */
 1445     if (handle == NULL ||
 1446         acpi_MatchHid(handle, "PNP0500") ||
 1447         acpi_MatchHid(handle, "PNP0501") ||
 1448         acpi_MatchHid(handle, "PNP0502") ||
 1449         acpi_MatchHid(handle, "PNP0510") ||
 1450         acpi_MatchHid(handle, "PNP0511"))
 1451         return (ENXIO);
 1452 
 1453     /*
 1454      * Override next state with the value from _SxD, if present.  If no
 1455      * dstate argument was provided, don't fetch the return value.
 1456      */
 1457     snprintf(sxd, sizeof(sxd), "_S%dD", sc->acpi_sstate);
 1458     if (dstate)
 1459         status = acpi_GetInteger(handle, sxd, dstate);
 1460     else
 1461         status = AcpiEvaluateObject(handle, sxd, NULL, NULL);
 1462 
 1463     switch (status) {
 1464     case AE_OK:
 1465         error = 0;
 1466         break;
 1467     case AE_NOT_FOUND:
 1468         error = ESRCH;
 1469         break;
 1470     default:
 1471         error = ENXIO;
 1472         break;
 1473     }
 1474 
 1475     return (error);
 1476 }
 1477 
 1478 /* Callback arg for our implementation of walking the namespace. */
 1479 struct acpi_device_scan_ctx {
 1480     acpi_scan_cb_t      user_fn;
 1481     void                *arg;
 1482     ACPI_HANDLE         parent;
 1483 };
 1484 
 1485 static ACPI_STATUS
 1486 acpi_device_scan_cb(ACPI_HANDLE h, UINT32 level, void *arg, void **retval)
 1487 {
 1488     struct acpi_device_scan_ctx *ctx;
 1489     device_t dev, old_dev;
 1490     ACPI_STATUS status;
 1491     ACPI_OBJECT_TYPE type;
 1492 
 1493     /*
 1494      * Skip this device if we think we'll have trouble with it or it is
 1495      * the parent where the scan began.
 1496      */
 1497     ctx = (struct acpi_device_scan_ctx *)arg;
 1498     if (acpi_avoid(h) || h == ctx->parent)
 1499         return (AE_OK);
 1500 
 1501     /* If this is not a valid device type (e.g., a method), skip it. */
 1502     if (ACPI_FAILURE(AcpiGetType(h, &type)))
 1503         return (AE_OK);
 1504     if (type != ACPI_TYPE_DEVICE && type != ACPI_TYPE_PROCESSOR &&
 1505         type != ACPI_TYPE_THERMAL && type != ACPI_TYPE_POWER)
 1506         return (AE_OK);
 1507 
 1508     /*
 1509      * Call the user function with the current device.  If it is unchanged
 1510      * afterwards, return.  Otherwise, we update the handle to the new dev.
 1511      */
 1512     old_dev = acpi_get_device(h);
 1513     dev = old_dev;
 1514     status = ctx->user_fn(h, &dev, level, ctx->arg);
 1515     if (ACPI_FAILURE(status) || old_dev == dev)
 1516         return (status);
 1517 
 1518     /* Remove the old child and its connection to the handle. */
 1519     if (old_dev != NULL) {
 1520         device_delete_child(device_get_parent(old_dev), old_dev);
 1521         AcpiDetachData(h, acpi_fake_objhandler);
 1522     }
 1523 
 1524     /* Recreate the handle association if the user created a device. */
 1525     if (dev != NULL)
 1526         AcpiAttachData(h, acpi_fake_objhandler, dev);
 1527 
 1528     return (AE_OK);
 1529 }
 1530 
 1531 static ACPI_STATUS
 1532 acpi_device_scan_children(device_t bus, device_t dev, int max_depth,
 1533     acpi_scan_cb_t user_fn, void *arg)
 1534 {
 1535     ACPI_HANDLE h;
 1536     struct acpi_device_scan_ctx ctx;
 1537 
 1538     if (acpi_disabled("children"))
 1539         return (AE_OK);
 1540 
 1541     if (dev == NULL)
 1542         h = ACPI_ROOT_OBJECT;
 1543     else if ((h = acpi_get_handle(dev)) == NULL)
 1544         return (AE_BAD_PARAMETER);
 1545     ctx.user_fn = user_fn;
 1546     ctx.arg = arg;
 1547     ctx.parent = h;
 1548     return (AcpiWalkNamespace(ACPI_TYPE_ANY, h, max_depth,
 1549         acpi_device_scan_cb, NULL, &ctx, NULL));
 1550 }
 1551 
 1552 /*
 1553  * Even though ACPI devices are not PCI, we use the PCI approach for setting
 1554  * device power states since it's close enough to ACPI.
 1555  */
 1556 static int
 1557 acpi_set_powerstate_method(device_t bus, device_t child, int state)
 1558 {
 1559     ACPI_HANDLE h;
 1560     ACPI_STATUS status;
 1561     int error;
 1562 
 1563     error = 0;
 1564     h = acpi_get_handle(child);
 1565     if (state < ACPI_STATE_D0 || state > ACPI_D_STATES_MAX)
 1566         return (EINVAL);
 1567     if (h == NULL)
 1568         return (0);
 1569 
 1570     /* Ignore errors if the power methods aren't present. */
 1571     status = acpi_pwr_switch_consumer(h, state);
 1572     if (ACPI_FAILURE(status) && status != AE_NOT_FOUND
 1573         && status != AE_BAD_PARAMETER)
 1574         device_printf(bus, "failed to set ACPI power state D%d on %s: %s\n",
 1575             state, acpi_name(h), AcpiFormatException(status));
 1576 
 1577     return (error);
 1578 }
 1579 
 1580 static int
 1581 acpi_isa_pnp_probe(device_t bus, device_t child, struct isa_pnp_id *ids)
 1582 {
 1583     int                 result, cid_count, i;
 1584     uint32_t            lid, cids[8];
 1585 
 1586     ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
 1587 
 1588     /*
 1589      * ISA-style drivers attached to ACPI may persist and
 1590      * probe manually if we return ENOENT.  We never want
 1591      * that to happen, so don't ever return it.
 1592      */
 1593     result = ENXIO;
 1594 
 1595     /* Scan the supplied IDs for a match */
 1596     lid = acpi_isa_get_logicalid(child);
 1597     cid_count = acpi_isa_get_compatid(child, cids, 8);
 1598     while (ids && ids->ip_id) {
 1599         if (lid == ids->ip_id) {
 1600             result = 0;
 1601             goto out;
 1602         }
 1603         for (i = 0; i < cid_count; i++) {
 1604             if (cids[i] == ids->ip_id) {
 1605                 result = 0;
 1606                 goto out;
 1607             }
 1608         }
 1609         ids++;
 1610     }
 1611 
 1612  out:
 1613     if (result == 0 && ids->ip_desc)
 1614         device_set_desc(child, ids->ip_desc);
 1615 
 1616     return_VALUE (result);
 1617 }
 1618 
 1619 #if defined(__i386__) || defined(__amd64__)
 1620 /*
 1621  * Look for a MCFG table.  If it is present, use the settings for
 1622  * domain (segment) 0 to setup PCI config space access via the memory
 1623  * map.
 1624  */
 1625 static void
 1626 acpi_enable_pcie(void)
 1627 {
 1628         ACPI_TABLE_HEADER *hdr;
 1629         ACPI_MCFG_ALLOCATION *alloc, *end;
 1630         ACPI_STATUS status;
 1631 
 1632         status = AcpiGetTable(ACPI_SIG_MCFG, 1, &hdr);
 1633         if (ACPI_FAILURE(status))
 1634                 return;
 1635 
 1636         end = (ACPI_MCFG_ALLOCATION *)((char *)hdr + hdr->Length);
 1637         alloc = (ACPI_MCFG_ALLOCATION *)((ACPI_TABLE_MCFG *)hdr + 1);
 1638         while (alloc < end) {
 1639                 if (alloc->PciSegment == 0) {
 1640                         pcie_cfgregopen(alloc->Address, alloc->StartBusNumber,
 1641                             alloc->EndBusNumber);
 1642                         return;
 1643                 }
 1644                 alloc++;
 1645         }
 1646 }
 1647 #endif
 1648 
 1649 /*
 1650  * Scan all of the ACPI namespace and attach child devices.
 1651  *
 1652  * We should only expect to find devices in the \_PR, \_TZ, \_SI, and
 1653  * \_SB scopes, and \_PR and \_TZ became obsolete in the ACPI 2.0 spec.
 1654  * However, in violation of the spec, some systems place their PCI link
 1655  * devices in \, so we have to walk the whole namespace.  We check the
 1656  * type of namespace nodes, so this should be ok.
 1657  */
 1658 static void
 1659 acpi_probe_children(device_t bus)
 1660 {
 1661 
 1662     ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
 1663 
 1664     /*
 1665      * Scan the namespace and insert placeholders for all the devices that
 1666      * we find.  We also probe/attach any early devices.
 1667      *
 1668      * Note that we use AcpiWalkNamespace rather than AcpiGetDevices because
 1669      * we want to create nodes for all devices, not just those that are
 1670      * currently present. (This assumes that we don't want to create/remove
 1671      * devices as they appear, which might be smarter.)
 1672      */
 1673     ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "namespace scan\n"));
 1674     AcpiWalkNamespace(ACPI_TYPE_ANY, ACPI_ROOT_OBJECT, 100, acpi_probe_child,
 1675         NULL, bus, NULL);
 1676 
 1677     /* Pre-allocate resources for our rman from any sysresource devices. */
 1678     acpi_sysres_alloc(bus);
 1679 
 1680     /* Create any static children by calling device identify methods. */
 1681     ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "device identify routines\n"));
 1682     bus_generic_probe(bus);
 1683 
 1684     /* Probe/attach all children, created staticly and from the namespace. */
 1685     ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "acpi bus_generic_attach\n"));
 1686     bus_generic_attach(bus);
 1687 
 1688     /* Attach wake sysctls. */
 1689     acpi_wake_sysctl_walk(bus);
 1690 
 1691     ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "done attaching children\n"));
 1692     return_VOID;
 1693 }
 1694 
 1695 /*
 1696  * Determine the probe order for a given device.
 1697  */
 1698 static void
 1699 acpi_probe_order(ACPI_HANDLE handle, int *order)
 1700 {
 1701     ACPI_OBJECT_TYPE type;
 1702 
 1703     /*
 1704      * 1. I/O port and memory system resource holders
 1705      * 2. Embedded controllers (to handle early accesses)
 1706      * 3. PCI Link Devices
 1707      * 100000. CPUs
 1708      */
 1709     AcpiGetType(handle, &type);
 1710     if (type == ACPI_TYPE_PROCESSOR)
 1711         *order = 1;
 1712     else if (acpi_MatchHid(handle, "PNP0C01") || acpi_MatchHid(handle, "PNP0C02"))
 1713         *order = 2;
 1714     else if (acpi_MatchHid(handle, "PNP0C09"))
 1715         *order = 3;
 1716     else if (acpi_MatchHid(handle, "PNP0C0F"))
 1717         *order = 4;
 1718 }
 1719 
 1720 /*
 1721  * Evaluate a child device and determine whether we might attach a device to
 1722  * it.
 1723  */
 1724 static ACPI_STATUS
 1725 acpi_probe_child(ACPI_HANDLE handle, UINT32 level, void *context, void **status)
 1726 {
 1727     ACPI_OBJECT_TYPE type;
 1728     ACPI_HANDLE h;
 1729     device_t bus, child;
 1730     int order;
 1731     char *handle_str, **search;
 1732     static char *scopes[] = {"\\_PR_", "\\_TZ_", "\\_SI_", "\\_SB_", NULL};
 1733 
 1734     ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
 1735 
 1736     /* Skip this device if we think we'll have trouble with it. */
 1737     if (acpi_avoid(handle))
 1738         return_ACPI_STATUS (AE_OK);
 1739 
 1740     bus = (device_t)context;
 1741     if (ACPI_SUCCESS(AcpiGetType(handle, &type))) {
 1742         switch (type) {
 1743         case ACPI_TYPE_DEVICE:
 1744         case ACPI_TYPE_PROCESSOR:
 1745         case ACPI_TYPE_THERMAL:
 1746         case ACPI_TYPE_POWER:
 1747             if (acpi_disabled("children"))
 1748                 break;
 1749 
 1750             /*
 1751              * Since we scan from \, be sure to skip system scope objects.
 1752              * At least \_SB and \_TZ are detected as devices (ACPI-CA bug?)
 1753              */
 1754             handle_str = acpi_name(handle);
 1755             for (search = scopes; *search != NULL; search++) {
 1756                 if (strcmp(handle_str, *search) == 0)
 1757                     break;
 1758             }
 1759             if (*search != NULL)
 1760                 break;
 1761 
 1762             /* 
 1763              * Create a placeholder device for this node.  Sort the
 1764              * placeholder so that the probe/attach passes will run
 1765              * breadth-first.  Orders less than ACPI_DEV_BASE_ORDER
 1766              * are reserved for special objects (i.e., system
 1767              * resources).  CPU devices have a very high order to
 1768              * ensure they are probed after other devices.
 1769              */
 1770             ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "scanning '%s'\n", handle_str));
 1771             order = level * 10 + 100;
 1772             acpi_probe_order(handle, &order);
 1773             child = BUS_ADD_CHILD(bus, order, NULL, -1);
 1774             if (child == NULL)
 1775                 break;
 1776 
 1777             /* Associate the handle with the device_t and vice versa. */
 1778             acpi_set_handle(child, handle);
 1779             AcpiAttachData(handle, acpi_fake_objhandler, child);
 1780 
 1781             /*
 1782              * Check that the device is present.  If it's not present,
 1783              * leave it disabled (so that we have a device_t attached to
 1784              * the handle, but we don't probe it).
 1785              *
 1786              * XXX PCI link devices sometimes report "present" but not
 1787              * "functional" (i.e. if disabled).  Go ahead and probe them
 1788              * anyway since we may enable them later.
 1789              */
 1790             if (type == ACPI_TYPE_DEVICE && !acpi_DeviceIsPresent(child)) {
 1791                 /* Never disable PCI link devices. */
 1792                 if (acpi_MatchHid(handle, "PNP0C0F"))
 1793                     break;
 1794                 /*
 1795                  * Docking stations should remain enabled since the system
 1796                  * may be undocked at boot.
 1797                  */
 1798                 if (ACPI_SUCCESS(AcpiGetHandle(handle, "_DCK", &h)))
 1799                     break;
 1800 
 1801                 device_disable(child);
 1802                 break;
 1803             }
 1804 
 1805             /*
 1806              * Get the device's resource settings and attach them.
 1807              * Note that if the device has _PRS but no _CRS, we need
 1808              * to decide when it's appropriate to try to configure the
 1809              * device.  Ignore the return value here; it's OK for the
 1810              * device not to have any resources.
 1811              */
 1812             acpi_parse_resources(child, handle, &acpi_res_parse_set, NULL);
 1813             break;
 1814         }
 1815     }
 1816 
 1817     return_ACPI_STATUS (AE_OK);
 1818 }
 1819 
 1820 /*
 1821  * AcpiAttachData() requires an object handler but never uses it.  This is a
 1822  * placeholder object handler so we can store a device_t in an ACPI_HANDLE.
 1823  */
 1824 void
 1825 acpi_fake_objhandler(ACPI_HANDLE h, void *data)
 1826 {
 1827 }
 1828 
 1829 static void
 1830 acpi_shutdown_final(void *arg, int howto)
 1831 {
 1832     struct acpi_softc *sc = (struct acpi_softc *)arg;
 1833     ACPI_STATUS status;
 1834 
 1835     /*
 1836      * XXX Shutdown code should only run on the BSP (cpuid 0).
 1837      * Some chipsets do not power off the system correctly if called from
 1838      * an AP.
 1839      */
 1840     if ((howto & RB_POWEROFF) != 0) {
 1841         status = AcpiEnterSleepStatePrep(ACPI_STATE_S5);
 1842         if (ACPI_FAILURE(status)) {
 1843             device_printf(sc->acpi_dev, "AcpiEnterSleepStatePrep failed - %s\n",
 1844                 AcpiFormatException(status));
 1845             return;
 1846         }
 1847         device_printf(sc->acpi_dev, "Powering system off\n");
 1848         ACPI_DISABLE_IRQS();
 1849         status = AcpiEnterSleepState(ACPI_STATE_S5);
 1850         if (ACPI_FAILURE(status))
 1851             device_printf(sc->acpi_dev, "power-off failed - %s\n",
 1852                 AcpiFormatException(status));
 1853         else {
 1854             DELAY(1000000);
 1855             device_printf(sc->acpi_dev, "power-off failed - timeout\n");
 1856         }
 1857     } else if ((howto & RB_HALT) == 0 &&
 1858         (AcpiGbl_FADT.Flags & ACPI_FADT_RESET_REGISTER) &&
 1859         sc->acpi_handle_reboot) {
 1860         /* Reboot using the reset register. */
 1861         status = AcpiWrite(
 1862             AcpiGbl_FADT.ResetValue, &AcpiGbl_FADT.ResetRegister);
 1863         if (ACPI_FAILURE(status))
 1864             device_printf(sc->acpi_dev, "reset failed - %s\n",
 1865                 AcpiFormatException(status));
 1866         else {
 1867             DELAY(1000000);
 1868             device_printf(sc->acpi_dev, "reset failed - timeout\n");
 1869         }
 1870     } else if (sc->acpi_do_disable && panicstr == NULL) {
 1871         /*
 1872          * Only disable ACPI if the user requested.  On some systems, writing
 1873          * the disable value to SMI_CMD hangs the system.
 1874          */
 1875         device_printf(sc->acpi_dev, "Shutting down\n");
 1876         AcpiTerminate();
 1877     }
 1878 }
 1879 
 1880 static void
 1881 acpi_enable_fixed_events(struct acpi_softc *sc)
 1882 {
 1883     static int  first_time = 1;
 1884 
 1885     /* Enable and clear fixed events and install handlers. */
 1886     if ((AcpiGbl_FADT.Flags & ACPI_FADT_POWER_BUTTON) == 0) {
 1887         AcpiClearEvent(ACPI_EVENT_POWER_BUTTON);
 1888         AcpiInstallFixedEventHandler(ACPI_EVENT_POWER_BUTTON,
 1889                                      acpi_event_power_button_sleep, sc);
 1890         if (first_time)
 1891             device_printf(sc->acpi_dev, "Power Button (fixed)\n");
 1892     }
 1893     if ((AcpiGbl_FADT.Flags & ACPI_FADT_SLEEP_BUTTON) == 0) {
 1894         AcpiClearEvent(ACPI_EVENT_SLEEP_BUTTON);
 1895         AcpiInstallFixedEventHandler(ACPI_EVENT_SLEEP_BUTTON,
 1896                                      acpi_event_sleep_button_sleep, sc);
 1897         if (first_time)
 1898             device_printf(sc->acpi_dev, "Sleep Button (fixed)\n");
 1899     }
 1900 
 1901     first_time = 0;
 1902 }
 1903 
 1904 /*
 1905  * Returns true if the device is actually present and should
 1906  * be attached to.  This requires the present, enabled, UI-visible 
 1907  * and diagnostics-passed bits to be set.
 1908  */
 1909 BOOLEAN
 1910 acpi_DeviceIsPresent(device_t dev)
 1911 {
 1912     ACPI_DEVICE_INFO    *devinfo;
 1913     ACPI_HANDLE         h;
 1914     BOOLEAN             present;
 1915 
 1916     if ((h = acpi_get_handle(dev)) == NULL ||
 1917         ACPI_FAILURE(AcpiGetObjectInfo(h, &devinfo)))
 1918         return (FALSE);
 1919 
 1920     /* If no _STA method, must be present */
 1921     present = (devinfo->Valid & ACPI_VALID_STA) == 0 ||
 1922         ACPI_DEVICE_PRESENT(devinfo->CurrentStatus) ? TRUE : FALSE;
 1923 
 1924     AcpiOsFree(devinfo);
 1925     return (present);
 1926 }
 1927 
 1928 /*
 1929  * Returns true if the battery is actually present and inserted.
 1930  */
 1931 BOOLEAN
 1932 acpi_BatteryIsPresent(device_t dev)
 1933 {
 1934     ACPI_DEVICE_INFO    *devinfo;
 1935     ACPI_HANDLE         h;
 1936     BOOLEAN             present;
 1937 
 1938     if ((h = acpi_get_handle(dev)) == NULL ||
 1939         ACPI_FAILURE(AcpiGetObjectInfo(h, &devinfo)))
 1940         return (FALSE);
 1941 
 1942     /* If no _STA method, must be present */
 1943     present = (devinfo->Valid & ACPI_VALID_STA) == 0 ||
 1944         ACPI_BATTERY_PRESENT(devinfo->CurrentStatus) ? TRUE : FALSE;
 1945 
 1946     AcpiOsFree(devinfo);
 1947     return (present);
 1948 }
 1949 
 1950 /*
 1951  * Match a HID string against a handle
 1952  */
 1953 static BOOLEAN
 1954 acpi_MatchHid(ACPI_HANDLE h, const char *hid) 
 1955 {
 1956     ACPI_DEVICE_INFO    *devinfo;
 1957     BOOLEAN             ret;
 1958     int                 i;
 1959 
 1960     if (hid == NULL || h == NULL ||
 1961         ACPI_FAILURE(AcpiGetObjectInfo(h, &devinfo)))
 1962         return (FALSE);
 1963 
 1964     ret = FALSE;
 1965     if ((devinfo->Valid & ACPI_VALID_HID) != 0 &&
 1966         strcmp(hid, devinfo->HardwareId.String) == 0)
 1967             ret = TRUE;
 1968     else if ((devinfo->Valid & ACPI_VALID_CID) != 0)
 1969         for (i = 0; i < devinfo->CompatibleIdList.Count; i++) {
 1970             if (strcmp(hid, devinfo->CompatibleIdList.Ids[i].String) == 0) {
 1971                 ret = TRUE;
 1972                 break;
 1973             }
 1974         }
 1975 
 1976     AcpiOsFree(devinfo);
 1977     return (ret);
 1978 }
 1979 
 1980 /*
 1981  * Return the handle of a named object within our scope, ie. that of (parent)
 1982  * or one if its parents.
 1983  */
 1984 ACPI_STATUS
 1985 acpi_GetHandleInScope(ACPI_HANDLE parent, char *path, ACPI_HANDLE *result)
 1986 {
 1987     ACPI_HANDLE         r;
 1988     ACPI_STATUS         status;
 1989 
 1990     /* Walk back up the tree to the root */
 1991     for (;;) {
 1992         status = AcpiGetHandle(parent, path, &r);
 1993         if (ACPI_SUCCESS(status)) {
 1994             *result = r;
 1995             return (AE_OK);
 1996         }
 1997         /* XXX Return error here? */
 1998         if (status != AE_NOT_FOUND)
 1999             return (AE_OK);
 2000         if (ACPI_FAILURE(AcpiGetParent(parent, &r)))
 2001             return (AE_NOT_FOUND);
 2002         parent = r;
 2003     }
 2004 }
 2005 
 2006 /* Find the difference between two PM tick counts. */
 2007 uint32_t
 2008 acpi_TimerDelta(uint32_t end, uint32_t start)
 2009 {
 2010     uint32_t delta;
 2011 
 2012     if (end >= start)
 2013         delta = end - start;
 2014     else if (AcpiGbl_FADT.Flags & ACPI_FADT_32BIT_TIMER)
 2015         delta = ((0xFFFFFFFF - start) + end + 1);
 2016     else
 2017         delta = ((0x00FFFFFF - start) + end + 1) & 0x00FFFFFF;
 2018     return (delta);
 2019 }
 2020 
 2021 /*
 2022  * Allocate a buffer with a preset data size.
 2023  */
 2024 ACPI_BUFFER *
 2025 acpi_AllocBuffer(int size)
 2026 {
 2027     ACPI_BUFFER *buf;
 2028 
 2029     if ((buf = malloc(size + sizeof(*buf), M_ACPIDEV, M_NOWAIT)) == NULL)
 2030         return (NULL);
 2031     buf->Length = size;
 2032     buf->Pointer = (void *)(buf + 1);
 2033     return (buf);
 2034 }
 2035 
 2036 ACPI_STATUS
 2037 acpi_SetInteger(ACPI_HANDLE handle, char *path, UINT32 number)
 2038 {
 2039     ACPI_OBJECT arg1;
 2040     ACPI_OBJECT_LIST args;
 2041 
 2042     arg1.Type = ACPI_TYPE_INTEGER;
 2043     arg1.Integer.Value = number;
 2044     args.Count = 1;
 2045     args.Pointer = &arg1;
 2046 
 2047     return (AcpiEvaluateObject(handle, path, &args, NULL));
 2048 }
 2049 
 2050 /*
 2051  * Evaluate a path that should return an integer.
 2052  */
 2053 ACPI_STATUS
 2054 acpi_GetInteger(ACPI_HANDLE handle, char *path, UINT32 *number)
 2055 {
 2056     ACPI_STATUS status;
 2057     ACPI_BUFFER buf;
 2058     ACPI_OBJECT param;
 2059 
 2060     if (handle == NULL)
 2061         handle = ACPI_ROOT_OBJECT;
 2062 
 2063     /*
 2064      * Assume that what we've been pointed at is an Integer object, or
 2065      * a method that will return an Integer.
 2066      */
 2067     buf.Pointer = &param;
 2068     buf.Length = sizeof(param);
 2069     status = AcpiEvaluateObject(handle, path, NULL, &buf);
 2070     if (ACPI_SUCCESS(status)) {
 2071         if (param.Type == ACPI_TYPE_INTEGER)
 2072             *number = param.Integer.Value;
 2073         else
 2074             status = AE_TYPE;
 2075     }
 2076 
 2077     /* 
 2078      * In some applications, a method that's expected to return an Integer
 2079      * may instead return a Buffer (probably to simplify some internal
 2080      * arithmetic).  We'll try to fetch whatever it is, and if it's a Buffer,
 2081      * convert it into an Integer as best we can.
 2082      *
 2083      * This is a hack.
 2084      */
 2085     if (status == AE_BUFFER_OVERFLOW) {
 2086         if ((buf.Pointer = AcpiOsAllocate(buf.Length)) == NULL) {
 2087             status = AE_NO_MEMORY;
 2088         } else {
 2089             status = AcpiEvaluateObject(handle, path, NULL, &buf);
 2090             if (ACPI_SUCCESS(status))
 2091                 status = acpi_ConvertBufferToInteger(&buf, number);
 2092             AcpiOsFree(buf.Pointer);
 2093         }
 2094     }
 2095     return (status);
 2096 }
 2097 
 2098 ACPI_STATUS
 2099 acpi_ConvertBufferToInteger(ACPI_BUFFER *bufp, UINT32 *number)
 2100 {
 2101     ACPI_OBJECT *p;
 2102     UINT8       *val;
 2103     int         i;
 2104 
 2105     p = (ACPI_OBJECT *)bufp->Pointer;
 2106     if (p->Type == ACPI_TYPE_INTEGER) {
 2107         *number = p->Integer.Value;
 2108         return (AE_OK);
 2109     }
 2110     if (p->Type != ACPI_TYPE_BUFFER)
 2111         return (AE_TYPE);
 2112     if (p->Buffer.Length > sizeof(int))
 2113         return (AE_BAD_DATA);
 2114 
 2115     *number = 0;
 2116     val = p->Buffer.Pointer;
 2117     for (i = 0; i < p->Buffer.Length; i++)
 2118         *number += val[i] << (i * 8);
 2119     return (AE_OK);
 2120 }
 2121 
 2122 /*
 2123  * Iterate over the elements of an a package object, calling the supplied
 2124  * function for each element.
 2125  *
 2126  * XXX possible enhancement might be to abort traversal on error.
 2127  */
 2128 ACPI_STATUS
 2129 acpi_ForeachPackageObject(ACPI_OBJECT *pkg,
 2130         void (*func)(ACPI_OBJECT *comp, void *arg), void *arg)
 2131 {
 2132     ACPI_OBJECT *comp;
 2133     int         i;
 2134 
 2135     if (pkg == NULL || pkg->Type != ACPI_TYPE_PACKAGE)
 2136         return (AE_BAD_PARAMETER);
 2137 
 2138     /* Iterate over components */
 2139     i = 0;
 2140     comp = pkg->Package.Elements;
 2141     for (; i < pkg->Package.Count; i++, comp++)
 2142         func(comp, arg);
 2143 
 2144     return (AE_OK);
 2145 }
 2146 
 2147 /*
 2148  * Find the (index)th resource object in a set.
 2149  */
 2150 ACPI_STATUS
 2151 acpi_FindIndexedResource(ACPI_BUFFER *buf, int index, ACPI_RESOURCE **resp)
 2152 {
 2153     ACPI_RESOURCE       *rp;
 2154     int                 i;
 2155 
 2156     rp = (ACPI_RESOURCE *)buf->Pointer;
 2157     i = index;
 2158     while (i-- > 0) {
 2159         /* Range check */
 2160         if (rp > (ACPI_RESOURCE *)((u_int8_t *)buf->Pointer + buf->Length))
 2161             return (AE_BAD_PARAMETER);
 2162 
 2163         /* Check for terminator */
 2164         if (rp->Type == ACPI_RESOURCE_TYPE_END_TAG || rp->Length == 0)
 2165             return (AE_NOT_FOUND);
 2166         rp = ACPI_NEXT_RESOURCE(rp);
 2167     }
 2168     if (resp != NULL)
 2169         *resp = rp;
 2170 
 2171     return (AE_OK);
 2172 }
 2173 
 2174 /*
 2175  * Append an ACPI_RESOURCE to an ACPI_BUFFER.
 2176  *
 2177  * Given a pointer to an ACPI_RESOURCE structure, expand the ACPI_BUFFER
 2178  * provided to contain it.  If the ACPI_BUFFER is empty, allocate a sensible
 2179  * backing block.  If the ACPI_RESOURCE is NULL, return an empty set of
 2180  * resources.
 2181  */
 2182 #define ACPI_INITIAL_RESOURCE_BUFFER_SIZE       512
 2183 
 2184 ACPI_STATUS
 2185 acpi_AppendBufferResource(ACPI_BUFFER *buf, ACPI_RESOURCE *res)
 2186 {
 2187     ACPI_RESOURCE       *rp;
 2188     void                *newp;
 2189 
 2190     /* Initialise the buffer if necessary. */
 2191     if (buf->Pointer == NULL) {
 2192         buf->Length = ACPI_INITIAL_RESOURCE_BUFFER_SIZE;
 2193         if ((buf->Pointer = AcpiOsAllocate(buf->Length)) == NULL)
 2194             return (AE_NO_MEMORY);
 2195         rp = (ACPI_RESOURCE *)buf->Pointer;
 2196         rp->Type = ACPI_RESOURCE_TYPE_END_TAG;
 2197         rp->Length = 0;
 2198     }
 2199     if (res == NULL)
 2200         return (AE_OK);
 2201 
 2202     /*
 2203      * Scan the current buffer looking for the terminator.
 2204      * This will either find the terminator or hit the end
 2205      * of the buffer and return an error.
 2206      */
 2207     rp = (ACPI_RESOURCE *)buf->Pointer;
 2208     for (;;) {
 2209         /* Range check, don't go outside the buffer */
 2210         if (rp >= (ACPI_RESOURCE *)((u_int8_t *)buf->Pointer + buf->Length))
 2211             return (AE_BAD_PARAMETER);
 2212         if (rp->Type == ACPI_RESOURCE_TYPE_END_TAG || rp->Length == 0)
 2213             break;
 2214         rp = ACPI_NEXT_RESOURCE(rp);
 2215     }
 2216 
 2217     /*
 2218      * Check the size of the buffer and expand if required.
 2219      *
 2220      * Required size is:
 2221      *  size of existing resources before terminator + 
 2222      *  size of new resource and header +
 2223      *  size of terminator.
 2224      *
 2225      * Note that this loop should really only run once, unless
 2226      * for some reason we are stuffing a *really* huge resource.
 2227      */
 2228     while ((((u_int8_t *)rp - (u_int8_t *)buf->Pointer) + 
 2229             res->Length + ACPI_RS_SIZE_NO_DATA +
 2230             ACPI_RS_SIZE_MIN) >= buf->Length) {
 2231         if ((newp = AcpiOsAllocate(buf->Length * 2)) == NULL)
 2232             return (AE_NO_MEMORY);
 2233         bcopy(buf->Pointer, newp, buf->Length);
 2234         rp = (ACPI_RESOURCE *)((u_int8_t *)newp +
 2235                                ((u_int8_t *)rp - (u_int8_t *)buf->Pointer));
 2236         AcpiOsFree(buf->Pointer);
 2237         buf->Pointer = newp;
 2238         buf->Length += buf->Length;
 2239     }
 2240 
 2241     /* Insert the new resource. */
 2242     bcopy(res, rp, res->Length + ACPI_RS_SIZE_NO_DATA);
 2243 
 2244     /* And add the terminator. */
 2245     rp = ACPI_NEXT_RESOURCE(rp);
 2246     rp->Type = ACPI_RESOURCE_TYPE_END_TAG;
 2247     rp->Length = 0;
 2248 
 2249     return (AE_OK);
 2250 }
 2251 
 2252 /*
 2253  * Set interrupt model.
 2254  */
 2255 ACPI_STATUS
 2256 acpi_SetIntrModel(int model)
 2257 {
 2258 
 2259     return (acpi_SetInteger(ACPI_ROOT_OBJECT, "_PIC", model));
 2260 }
 2261 
 2262 /*
 2263  * Walk subtables of a table and call a callback routine for each
 2264  * subtable.  The caller should provide the first subtable and a
 2265  * pointer to the end of the table.  This can be used to walk tables
 2266  * such as MADT and SRAT that use subtable entries.
 2267  */
 2268 void
 2269 acpi_walk_subtables(void *first, void *end, acpi_subtable_handler *handler,
 2270     void *arg)
 2271 {
 2272     ACPI_SUBTABLE_HEADER *entry;
 2273 
 2274     for (entry = first; (void *)entry < end; ) {
 2275         /* Avoid an infinite loop if we hit a bogus entry. */
 2276         if (entry->Length < sizeof(ACPI_SUBTABLE_HEADER))
 2277             return;
 2278 
 2279         handler(entry, arg);
 2280         entry = ACPI_ADD_PTR(ACPI_SUBTABLE_HEADER, entry, entry->Length);
 2281     }
 2282 }
 2283 
 2284 /*
 2285  * DEPRECATED.  This interface has serious deficiencies and will be
 2286  * removed.
 2287  *
 2288  * Immediately enter the sleep state.  In the old model, acpiconf(8) ran
 2289  * rc.suspend and rc.resume so we don't have to notify devd(8) to do this.
 2290  */
 2291 ACPI_STATUS
 2292 acpi_SetSleepState(struct acpi_softc *sc, int state)
 2293 {
 2294     static int once;
 2295 
 2296     if (!once) {
 2297         device_printf(sc->acpi_dev,
 2298 "warning: acpi_SetSleepState() deprecated, need to update your software\n");
 2299         once = 1;
 2300     }
 2301     return (acpi_EnterSleepState(sc, state));
 2302 }
 2303 
 2304 #if defined(__amd64__) || defined(__i386__)
 2305 static void
 2306 acpi_sleep_force(void *arg)
 2307 {
 2308     struct acpi_softc *sc = (struct acpi_softc *)arg;
 2309 
 2310     device_printf(sc->acpi_dev,
 2311         "suspend request timed out, forcing sleep now\n");
 2312     if (ACPI_FAILURE(acpi_EnterSleepState(sc, sc->acpi_next_sstate)))
 2313         device_printf(sc->acpi_dev, "force sleep state S%d failed\n",
 2314             sc->acpi_next_sstate);
 2315 }
 2316 #endif
 2317 
 2318 /*
 2319  * Request that the system enter the given suspend state.  All /dev/apm
 2320  * devices and devd(8) will be notified.  Userland then has a chance to
 2321  * save state and acknowledge the request.  The system sleeps once all
 2322  * acks are in.
 2323  */
 2324 int
 2325 acpi_ReqSleepState(struct acpi_softc *sc, int state)
 2326 {
 2327 #if defined(__i386__)
 2328     struct apm_clone_data *clone;
 2329 #endif
 2330 
 2331     if (state < ACPI_STATE_S1 || state > ACPI_S_STATES_MAX)
 2332         return (EINVAL);
 2333     if (!acpi_sleep_states[state])
 2334         return (EOPNOTSUPP);
 2335 
 2336     /* S5 (soft-off) should be entered directly with no waiting. */
 2337     if (state == ACPI_STATE_S5) {
 2338         if (ACPI_SUCCESS(acpi_EnterSleepState(sc, state)))
 2339             return (0);
 2340         else
 2341             return (ENXIO);
 2342     }
 2343 
 2344 #if defined(__amd64__) || defined(__i386__)
 2345     /* If a suspend request is already in progress, just return. */
 2346     ACPI_LOCK(acpi);
 2347     if (sc->acpi_next_sstate != 0) {
 2348         ACPI_UNLOCK(acpi);
 2349         return (0);
 2350     }
 2351 
 2352     /* Record the pending state and notify all apm devices. */
 2353     sc->acpi_next_sstate = state;
 2354 #if defined(__i386__)
 2355     STAILQ_FOREACH(clone, &sc->apm_cdevs, entries) {
 2356         clone->notify_status = APM_EV_NONE;
 2357         if ((clone->flags & ACPI_EVF_DEVD) == 0) {
 2358             selwakeuppri(&clone->sel_read, PZERO);
 2359             KNOTE_LOCKED(&clone->sel_read.si_note, 0);
 2360         }
 2361     }
 2362 #endif
 2363 
 2364     /* If devd(8) is not running, immediately enter the sleep state. */
 2365     if (!devctl_process_running()) {
 2366         ACPI_UNLOCK(acpi);
 2367         if (ACPI_SUCCESS(acpi_EnterSleepState(sc, sc->acpi_next_sstate))) {
 2368             return (0);
 2369         } else {
 2370             return (ENXIO);
 2371         }
 2372     }
 2373 
 2374     /*
 2375      * Set a timeout to fire if userland doesn't ack the suspend request
 2376      * in time.  This way we still eventually go to sleep if we were
 2377      * overheating or running low on battery, even if userland is hung.
 2378      * We cancel this timeout once all userland acks are in or the
 2379      * suspend request is aborted.
 2380      */
 2381     callout_reset(&sc->susp_force_to, 10 * hz, acpi_sleep_force, sc);
 2382     ACPI_UNLOCK(acpi);
 2383 
 2384     /* Now notify devd(8) also. */
 2385     acpi_UserNotify("Suspend", ACPI_ROOT_OBJECT, state);
 2386 
 2387     return (0);
 2388 #else
 2389     /* This platform does not support acpi suspend/resume. */
 2390     return (EOPNOTSUPP);
 2391 #endif
 2392 }
 2393 
 2394 /*
 2395  * Acknowledge (or reject) a pending sleep state.  The caller has
 2396  * prepared for suspend and is now ready for it to proceed.  If the
 2397  * error argument is non-zero, it indicates suspend should be cancelled
 2398  * and gives an errno value describing why.  Once all votes are in,
 2399  * we suspend the system.
 2400  */
 2401 int
 2402 acpi_AckSleepState(struct apm_clone_data *clone, int error)
 2403 {
 2404 #if defined(__amd64__) || defined(__i386__)
 2405     struct acpi_softc *sc;
 2406     int ret, sleeping;
 2407 
 2408     /* If no pending sleep state, return an error. */
 2409     ACPI_LOCK(acpi);
 2410     sc = clone->acpi_sc;
 2411     if (sc->acpi_next_sstate == 0) {
 2412         ACPI_UNLOCK(acpi);
 2413         return (ENXIO);
 2414     }
 2415 
 2416     /* Caller wants to abort suspend process. */
 2417     if (error) {
 2418         sc->acpi_next_sstate = 0;
 2419         callout_stop(&sc->susp_force_to);
 2420         device_printf(sc->acpi_dev,
 2421             "listener on %s cancelled the pending suspend\n",
 2422             devtoname(clone->cdev));
 2423         ACPI_UNLOCK(acpi);
 2424         return (0);
 2425     }
 2426 
 2427     /*
 2428      * Mark this device as acking the suspend request.  Then, walk through
 2429      * all devices, seeing if they agree yet.  We only count devices that
 2430      * are writable since read-only devices couldn't ack the request.
 2431      */
 2432     sleeping = TRUE;
 2433 #if defined(__i386__)
 2434     clone->notify_status = APM_EV_ACKED;
 2435     STAILQ_FOREACH(clone, &sc->apm_cdevs, entries) {
 2436         if ((clone->flags & ACPI_EVF_WRITE) != 0 &&
 2437             clone->notify_status != APM_EV_ACKED) {
 2438             sleeping = FALSE;
 2439             break;
 2440         }
 2441     }
 2442 #endif
 2443 
 2444     /* If all devices have voted "yes", we will suspend now. */
 2445     if (sleeping)
 2446         callout_stop(&sc->susp_force_to);
 2447     ACPI_UNLOCK(acpi);
 2448     ret = 0;
 2449     if (sleeping) {
 2450         if (ACPI_FAILURE(acpi_EnterSleepState(sc, sc->acpi_next_sstate)))
 2451                 ret = ENODEV;
 2452     }
 2453     return (ret);
 2454 #else
 2455     /* This platform does not support acpi suspend/resume. */
 2456     return (EOPNOTSUPP);
 2457 #endif
 2458 }
 2459 
 2460 static void
 2461 acpi_sleep_enable(void *arg)
 2462 {
 2463     struct acpi_softc   *sc = (struct acpi_softc *)arg;
 2464 
 2465     /* Reschedule if the system is not fully up and running. */
 2466     if (!AcpiGbl_SystemAwakeAndRunning) {
 2467         timeout(acpi_sleep_enable, sc, hz * ACPI_MINIMUM_AWAKETIME);
 2468         return;
 2469     }
 2470 
 2471     ACPI_LOCK(acpi);
 2472     sc->acpi_sleep_disabled = FALSE;
 2473     ACPI_UNLOCK(acpi);
 2474 }
 2475 
 2476 static ACPI_STATUS
 2477 acpi_sleep_disable(struct acpi_softc *sc)
 2478 {
 2479     ACPI_STATUS         status;
 2480 
 2481     /* Fail if the system is not fully up and running. */
 2482     if (!AcpiGbl_SystemAwakeAndRunning)
 2483         return (AE_ERROR);
 2484 
 2485     ACPI_LOCK(acpi);
 2486     status = sc->acpi_sleep_disabled ? AE_ERROR : AE_OK;
 2487     sc->acpi_sleep_disabled = TRUE;
 2488     ACPI_UNLOCK(acpi);
 2489 
 2490     return (status);
 2491 }
 2492 
 2493 enum acpi_sleep_state {
 2494     ACPI_SS_NONE,
 2495     ACPI_SS_GPE_SET,
 2496     ACPI_SS_DEV_SUSPEND,
 2497     ACPI_SS_SLP_PREP,
 2498     ACPI_SS_SLEPT,
 2499 };
 2500 
 2501 /*
 2502  * Enter the desired system sleep state.
 2503  *
 2504  * Currently we support S1-S5 but S4 is only S4BIOS
 2505  */
 2506 static ACPI_STATUS
 2507 acpi_EnterSleepState(struct acpi_softc *sc, int state)
 2508 {
 2509     ACPI_STATUS status;
 2510     enum acpi_sleep_state slp_state;
 2511 
 2512     ACPI_FUNCTION_TRACE_U32((char *)(uintptr_t)__func__, state);
 2513 
 2514     if (state < ACPI_STATE_S1 || state > ACPI_S_STATES_MAX)
 2515         return_ACPI_STATUS (AE_BAD_PARAMETER);
 2516     if (!acpi_sleep_states[state]) {
 2517         device_printf(sc->acpi_dev, "Sleep state S%d not supported by BIOS\n",
 2518             state);
 2519         return (AE_SUPPORT);
 2520     }
 2521 
 2522     /* Re-entry once we're suspending is not allowed. */
 2523     status = acpi_sleep_disable(sc);
 2524     if (ACPI_FAILURE(status)) {
 2525         device_printf(sc->acpi_dev,
 2526             "suspend request ignored (not ready yet)\n");
 2527         return (status);
 2528     }
 2529 
 2530     if (state == ACPI_STATE_S5) {
 2531         /*
 2532          * Shut down cleanly and power off.  This will call us back through the
 2533          * shutdown handlers.
 2534          */
 2535         shutdown_nice(RB_POWEROFF);
 2536         return_ACPI_STATUS (AE_OK);
 2537     }
 2538 
 2539 #ifdef SMP
 2540     thread_lock(curthread);
 2541     sched_bind(curthread, 0);
 2542     thread_unlock(curthread);
 2543 #endif
 2544 
 2545     /*
 2546      * Be sure to hold Giant across DEVICE_SUSPEND/RESUME since non-MPSAFE
 2547      * drivers need this.
 2548      */
 2549     mtx_lock(&Giant);
 2550 
 2551     slp_state = ACPI_SS_NONE;
 2552 
 2553     sc->acpi_sstate = state;
 2554 
 2555     /* Enable any GPEs as appropriate and requested by the user. */
 2556     acpi_wake_prep_walk(state);
 2557     slp_state = ACPI_SS_GPE_SET;
 2558 
 2559     /*
 2560      * Inform all devices that we are going to sleep.  If at least one
 2561      * device fails, DEVICE_SUSPEND() automatically resumes the tree.
 2562      *
 2563      * XXX Note that a better two-pass approach with a 'veto' pass
 2564      * followed by a "real thing" pass would be better, but the current
 2565      * bus interface does not provide for this.
 2566      */
 2567     if (DEVICE_SUSPEND(root_bus) != 0) {
 2568         device_printf(sc->acpi_dev, "device_suspend failed\n");
 2569         goto backout;
 2570     }
 2571     slp_state = ACPI_SS_DEV_SUSPEND;
 2572 
 2573     /* If testing device suspend only, back out of everything here. */
 2574     if (acpi_susp_bounce)
 2575         goto backout;
 2576 
 2577     status = AcpiEnterSleepStatePrep(state);
 2578     if (ACPI_FAILURE(status)) {
 2579         device_printf(sc->acpi_dev, "AcpiEnterSleepStatePrep failed - %s\n",
 2580                       AcpiFormatException(status));
 2581         goto backout;
 2582     }
 2583     slp_state = ACPI_SS_SLP_PREP;
 2584 
 2585     if (sc->acpi_sleep_delay > 0)
 2586         DELAY(sc->acpi_sleep_delay * 1000000);
 2587 
 2588     if (state != ACPI_STATE_S1) {
 2589         acpi_sleep_machdep(sc, state);
 2590 
 2591         /* Re-enable ACPI hardware on wakeup from sleep state 4. */
 2592         if (state == ACPI_STATE_S4)
 2593             AcpiEnable();
 2594     } else {
 2595         ACPI_DISABLE_IRQS();
 2596         status = AcpiEnterSleepState(state);
 2597         if (ACPI_FAILURE(status)) {
 2598             device_printf(sc->acpi_dev, "AcpiEnterSleepState failed - %s\n",
 2599                           AcpiFormatException(status));
 2600             goto backout;
 2601         }
 2602     }
 2603     slp_state = ACPI_SS_SLEPT;
 2604 
 2605     /*
 2606      * Back out state according to how far along we got in the suspend
 2607      * process.  This handles both the error and success cases.
 2608      */
 2609 backout:
 2610     sc->acpi_next_sstate = 0;
 2611     if (slp_state >= ACPI_SS_GPE_SET) {
 2612         acpi_wake_prep_walk(state);
 2613         sc->acpi_sstate = ACPI_STATE_S0;
 2614     }
 2615     if (slp_state >= ACPI_SS_SLP_PREP)
 2616         AcpiLeaveSleepState(state);
 2617     if (slp_state >= ACPI_SS_DEV_SUSPEND)
 2618         DEVICE_RESUME(root_bus);
 2619     if (slp_state >= ACPI_SS_SLEPT)
 2620         acpi_enable_fixed_events(sc);
 2621 
 2622     mtx_unlock(&Giant);
 2623 
 2624 #ifdef SMP
 2625     thread_lock(curthread);
 2626     sched_unbind(curthread);
 2627     thread_unlock(curthread);
 2628 #endif
 2629 
 2630     /* Allow another sleep request after a while. */
 2631     timeout(acpi_sleep_enable, sc, hz * ACPI_MINIMUM_AWAKETIME);
 2632 
 2633     /* Run /etc/rc.resume after we are back. */
 2634     if (devctl_process_running())
 2635         acpi_UserNotify("Resume", ACPI_ROOT_OBJECT, state);
 2636 
 2637     return_ACPI_STATUS (status);
 2638 }
 2639 
 2640 void
 2641 acpi_resync_clock(struct acpi_softc *sc)
 2642 {
 2643 
 2644     if (!acpi_reset_clock)
 2645         return;
 2646 
 2647     /*
 2648      * Warm up timecounter again and reset system clock.
 2649      */
 2650     (void)timecounter->tc_get_timecount(timecounter);
 2651     (void)timecounter->tc_get_timecount(timecounter);
 2652     inittodr(time_second + sc->acpi_sleep_delay);
 2653 }
 2654 
 2655 /* Enable or disable the device's wake GPE. */
 2656 int
 2657 acpi_wake_set_enable(device_t dev, int enable)
 2658 {
 2659     struct acpi_prw_data prw;
 2660     ACPI_STATUS status;
 2661     int flags;
 2662 
 2663     /* Make sure the device supports waking the system and get the GPE. */
 2664     if (acpi_parse_prw(acpi_get_handle(dev), &prw) != 0)
 2665         return (ENXIO);
 2666 
 2667     flags = acpi_get_flags(dev);
 2668     if (enable) {
 2669         status = AcpiEnableGpe(prw.gpe_handle, prw.gpe_bit,
 2670             ACPI_GPE_TYPE_WAKE_RUN);
 2671         if (ACPI_FAILURE(status)) {
 2672             device_printf(dev, "enable wake failed\n");
 2673             return (ENXIO);
 2674         }
 2675         acpi_set_flags(dev, flags | ACPI_FLAG_WAKE_ENABLED);
 2676     } else {
 2677         status = AcpiDisableGpe(prw.gpe_handle, prw.gpe_bit,
 2678             ACPI_GPE_TYPE_WAKE);
 2679         if (ACPI_FAILURE(status)) {
 2680             device_printf(dev, "disable wake failed\n");
 2681             return (ENXIO);
 2682         }
 2683         acpi_set_flags(dev, flags & ~ACPI_FLAG_WAKE_ENABLED);
 2684     }
 2685 
 2686     return (0);
 2687 }
 2688 
 2689 static int
 2690 acpi_wake_sleep_prep(ACPI_HANDLE handle, int sstate)
 2691 {
 2692     struct acpi_prw_data prw;
 2693     device_t dev;
 2694 
 2695     /* Check that this is a wake-capable device and get its GPE. */
 2696     if (acpi_parse_prw(handle, &prw) != 0)
 2697         return (ENXIO);
 2698     dev = acpi_get_device(handle);
 2699 
 2700     /*
 2701      * The destination sleep state must be less than (i.e., higher power)
 2702      * or equal to the value specified by _PRW.  If this GPE cannot be
 2703      * enabled for the next sleep state, then disable it.  If it can and
 2704      * the user requested it be enabled, turn on any required power resources
 2705      * and set _PSW.
 2706      */
 2707     if (sstate > prw.lowest_wake) {
 2708         AcpiDisableGpe(prw.gpe_handle, prw.gpe_bit, ACPI_GPE_TYPE_WAKE);
 2709         if (bootverbose)
 2710             device_printf(dev, "wake_prep disabled wake for %s (S%d)\n",
 2711                 acpi_name(handle), sstate);
 2712     } else if (dev && (acpi_get_flags(dev) & ACPI_FLAG_WAKE_ENABLED) != 0) {
 2713         acpi_pwr_wake_enable(handle, 1);
 2714         acpi_SetInteger(handle, "_PSW", 1);
 2715         if (bootverbose)
 2716             device_printf(dev, "wake_prep enabled for %s (S%d)\n",
 2717                 acpi_name(handle), sstate);
 2718     }
 2719 
 2720     return (0);
 2721 }
 2722 
 2723 static int
 2724 acpi_wake_run_prep(ACPI_HANDLE handle, int sstate)
 2725 {
 2726     struct acpi_prw_data prw;
 2727     device_t dev;
 2728 
 2729     /*
 2730      * Check that this is a wake-capable device and get its GPE.  Return
 2731      * now if the user didn't enable this device for wake.
 2732      */
 2733     if (acpi_parse_prw(handle, &prw) != 0)
 2734         return (ENXIO);
 2735     dev = acpi_get_device(handle);
 2736     if (dev == NULL || (acpi_get_flags(dev) & ACPI_FLAG_WAKE_ENABLED) == 0)
 2737         return (0);
 2738 
 2739     /*
 2740      * If this GPE couldn't be enabled for the previous sleep state, it was
 2741      * disabled before going to sleep so re-enable it.  If it was enabled,
 2742      * clear _PSW and turn off any power resources it used.
 2743      */
 2744     if (sstate > prw.lowest_wake) {
 2745         AcpiEnableGpe(prw.gpe_handle, prw.gpe_bit, ACPI_GPE_TYPE_WAKE_RUN);
 2746         if (bootverbose)
 2747             device_printf(dev, "run_prep re-enabled %s\n", acpi_name(handle));
 2748     } else {
 2749         acpi_SetInteger(handle, "_PSW", 0);
 2750         acpi_pwr_wake_enable(handle, 0);
 2751         if (bootverbose)
 2752             device_printf(dev, "run_prep cleaned up for %s\n",
 2753                 acpi_name(handle));
 2754     }
 2755 
 2756     return (0);
 2757 }
 2758 
 2759 static ACPI_STATUS
 2760 acpi_wake_prep(ACPI_HANDLE handle, UINT32 level, void *context, void **status)
 2761 {
 2762     int sstate;
 2763 
 2764     /* If suspending, run the sleep prep function, otherwise wake. */
 2765     sstate = *(int *)context;
 2766     if (AcpiGbl_SystemAwakeAndRunning)
 2767         acpi_wake_sleep_prep(handle, sstate);
 2768     else
 2769         acpi_wake_run_prep(handle, sstate);
 2770     return (AE_OK);
 2771 }
 2772 
 2773 /* Walk the tree rooted at acpi0 to prep devices for suspend/resume. */
 2774 static int
 2775 acpi_wake_prep_walk(int sstate)
 2776 {
 2777     ACPI_HANDLE sb_handle;
 2778 
 2779     if (ACPI_SUCCESS(AcpiGetHandle(ACPI_ROOT_OBJECT, "\\_SB_", &sb_handle)))
 2780         AcpiWalkNamespace(ACPI_TYPE_DEVICE, sb_handle, 100,
 2781             acpi_wake_prep, NULL, &sstate, NULL);
 2782     return (0);
 2783 }
 2784 
 2785 /* Walk the tree rooted at acpi0 to attach per-device wake sysctls. */
 2786 static int
 2787 acpi_wake_sysctl_walk(device_t dev)
 2788 {
 2789     int error, i, numdevs;
 2790     device_t *devlist;
 2791     device_t child;
 2792     ACPI_STATUS status;
 2793 
 2794     error = device_get_children(dev, &devlist, &numdevs);
 2795     if (error != 0 || numdevs == 0) {
 2796         if (numdevs == 0)
 2797             free(devlist, M_TEMP);
 2798         return (error);
 2799     }
 2800     for (i = 0; i < numdevs; i++) {
 2801         child = devlist[i];
 2802         acpi_wake_sysctl_walk(child);
 2803         if (!device_is_attached(child))
 2804             continue;
 2805         status = AcpiEvaluateObject(acpi_get_handle(child), "_PRW", NULL, NULL);
 2806         if (ACPI_SUCCESS(status)) {
 2807             SYSCTL_ADD_PROC(device_get_sysctl_ctx(child),
 2808                 SYSCTL_CHILDREN(device_get_sysctl_tree(child)), OID_AUTO,
 2809                 "wake", CTLTYPE_INT | CTLFLAG_RW, child, 0,
 2810                 acpi_wake_set_sysctl, "I", "Device set to wake the system");
 2811         }
 2812     }
 2813     free(devlist, M_TEMP);
 2814 
 2815     return (0);
 2816 }
 2817 
 2818 /* Enable or disable wake from userland. */
 2819 static int
 2820 acpi_wake_set_sysctl(SYSCTL_HANDLER_ARGS)
 2821 {
 2822     int enable, error;
 2823     device_t dev;
 2824 
 2825     dev = (device_t)arg1;
 2826     enable = (acpi_get_flags(dev) & ACPI_FLAG_WAKE_ENABLED) ? 1 : 0;
 2827 
 2828     error = sysctl_handle_int(oidp, &enable, 0, req);
 2829     if (error != 0 || req->newptr == NULL)
 2830         return (error);
 2831     if (enable != 0 && enable != 1)
 2832         return (EINVAL);
 2833 
 2834     return (acpi_wake_set_enable(dev, enable));
 2835 }
 2836 
 2837 /* Parse a device's _PRW into a structure. */
 2838 int
 2839 acpi_parse_prw(ACPI_HANDLE h, struct acpi_prw_data *prw)
 2840 {
 2841     ACPI_STATUS                 status;
 2842     ACPI_BUFFER                 prw_buffer;
 2843     ACPI_OBJECT                 *res, *res2;
 2844     int                         error, i, power_count;
 2845 
 2846     if (h == NULL || prw == NULL)
 2847         return (EINVAL);
 2848 
 2849     /*
 2850      * The _PRW object (7.2.9) is only required for devices that have the
 2851      * ability to wake the system from a sleeping state.
 2852      */
 2853     error = EINVAL;
 2854     prw_buffer.Pointer = NULL;
 2855     prw_buffer.Length = ACPI_ALLOCATE_BUFFER;
 2856     status = AcpiEvaluateObject(h, "_PRW", NULL, &prw_buffer);
 2857     if (ACPI_FAILURE(status))
 2858         return (ENOENT);
 2859     res = (ACPI_OBJECT *)prw_buffer.Pointer;
 2860     if (res == NULL)
 2861         return (ENOENT);
 2862     if (!ACPI_PKG_VALID(res, 2))
 2863         goto out;
 2864 
 2865     /*
 2866      * Element 1 of the _PRW object:
 2867      * The lowest power system sleeping state that can be entered while still
 2868      * providing wake functionality.  The sleeping state being entered must
 2869      * be less than (i.e., higher power) or equal to this value.
 2870      */
 2871     if (acpi_PkgInt32(res, 1, &prw->lowest_wake) != 0)
 2872         goto out;
 2873 
 2874     /*
 2875      * Element 0 of the _PRW object:
 2876      */
 2877     switch (res->Package.Elements[0].Type) {
 2878     case ACPI_TYPE_INTEGER:
 2879         /*
 2880          * If the data type of this package element is numeric, then this
 2881          * _PRW package element is the bit index in the GPEx_EN, in the
 2882          * GPE blocks described in the FADT, of the enable bit that is
 2883          * enabled for the wake event.
 2884          */
 2885         prw->gpe_handle = NULL;
 2886         prw->gpe_bit = res->Package.Elements[0].Integer.Value;
 2887         error = 0;
 2888         break;
 2889     case ACPI_TYPE_PACKAGE:
 2890         /*
 2891          * If the data type of this package element is a package, then this
 2892          * _PRW package element is itself a package containing two
 2893          * elements.  The first is an object reference to the GPE Block
 2894          * device that contains the GPE that will be triggered by the wake
 2895          * event.  The second element is numeric and it contains the bit
 2896          * index in the GPEx_EN, in the GPE Block referenced by the
 2897          * first element in the package, of the enable bit that is enabled for
 2898          * the wake event.
 2899          *
 2900          * For example, if this field is a package then it is of the form:
 2901          * Package() {\_SB.PCI0.ISA.GPE, 2}
 2902          */
 2903         res2 = &res->Package.Elements[0];
 2904         if (!ACPI_PKG_VALID(res2, 2))
 2905             goto out;
 2906         prw->gpe_handle = acpi_GetReference(NULL, &res2->Package.Elements[0]);
 2907         if (prw->gpe_handle == NULL)
 2908             goto out;
 2909         if (acpi_PkgInt32(res2, 1, &prw->gpe_bit) != 0)
 2910             goto out;
 2911         error = 0;
 2912         break;
 2913     default:
 2914         goto out;
 2915     }
 2916 
 2917     /* Elements 2 to N of the _PRW object are power resources. */
 2918     power_count = res->Package.Count - 2;
 2919     if (power_count > ACPI_PRW_MAX_POWERRES) {
 2920         printf("ACPI device %s has too many power resources\n", acpi_name(h));
 2921         power_count = 0;
 2922     }
 2923     prw->power_res_count = power_count;
 2924     for (i = 0; i < power_count; i++)
 2925         prw->power_res[i] = res->Package.Elements[i];
 2926 
 2927 out:
 2928     if (prw_buffer.Pointer != NULL)
 2929         AcpiOsFree(prw_buffer.Pointer);
 2930     return (error);
 2931 }
 2932 
 2933 /*
 2934  * ACPI Event Handlers
 2935  */
 2936 
 2937 /* System Event Handlers (registered by EVENTHANDLER_REGISTER) */
 2938 
 2939 static void
 2940 acpi_system_eventhandler_sleep(void *arg, int state)
 2941 {
 2942     struct acpi_softc *sc = (struct acpi_softc *)arg;
 2943     int ret;
 2944 
 2945     ACPI_FUNCTION_TRACE_U32((char *)(uintptr_t)__func__, state);
 2946 
 2947     /* Check if button action is disabled or unknown. */
 2948     if (state == ACPI_STATE_UNKNOWN)
 2949         return;
 2950 
 2951     /* Request that the system prepare to enter the given suspend state. */
 2952     ret = acpi_ReqSleepState(sc, state);
 2953     if (ret != 0)
 2954         device_printf(sc->acpi_dev,
 2955             "request to enter state S%d failed (err %d)\n", state, ret);
 2956 
 2957     return_VOID;
 2958 }
 2959 
 2960 static void
 2961 acpi_system_eventhandler_wakeup(void *arg, int state)
 2962 {
 2963 
 2964     ACPI_FUNCTION_TRACE_U32((char *)(uintptr_t)__func__, state);
 2965 
 2966     /* Currently, nothing to do for wakeup. */
 2967 
 2968     return_VOID;
 2969 }
 2970 
 2971 /* 
 2972  * ACPICA Event Handlers (FixedEvent, also called from button notify handler)
 2973  */
 2974 UINT32
 2975 acpi_event_power_button_sleep(void *context)
 2976 {
 2977     struct acpi_softc   *sc = (struct acpi_softc *)context;
 2978 
 2979     ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
 2980 
 2981     EVENTHANDLER_INVOKE(acpi_sleep_event, sc->acpi_power_button_sx);
 2982 
 2983     return_VALUE (ACPI_INTERRUPT_HANDLED);
 2984 }
 2985 
 2986 UINT32
 2987 acpi_event_power_button_wake(void *context)
 2988 {
 2989     struct acpi_softc   *sc = (struct acpi_softc *)context;
 2990 
 2991     ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
 2992 
 2993     EVENTHANDLER_INVOKE(acpi_wakeup_event, sc->acpi_power_button_sx);
 2994 
 2995     return_VALUE (ACPI_INTERRUPT_HANDLED);
 2996 }
 2997 
 2998 UINT32
 2999 acpi_event_sleep_button_sleep(void *context)
 3000 {
 3001     struct acpi_softc   *sc = (struct acpi_softc *)context;
 3002 
 3003     ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
 3004 
 3005     EVENTHANDLER_INVOKE(acpi_sleep_event, sc->acpi_sleep_button_sx);
 3006 
 3007     return_VALUE (ACPI_INTERRUPT_HANDLED);
 3008 }
 3009 
 3010 UINT32
 3011 acpi_event_sleep_button_wake(void *context)
 3012 {
 3013     struct acpi_softc   *sc = (struct acpi_softc *)context;
 3014 
 3015     ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
 3016 
 3017     EVENTHANDLER_INVOKE(acpi_wakeup_event, sc->acpi_sleep_button_sx);
 3018 
 3019     return_VALUE (ACPI_INTERRUPT_HANDLED);
 3020 }
 3021 
 3022 /*
 3023  * XXX This static buffer is suboptimal.  There is no locking so only
 3024  * use this for single-threaded callers.
 3025  */
 3026 char *
 3027 acpi_name(ACPI_HANDLE handle)
 3028 {
 3029     ACPI_BUFFER buf;
 3030     static char data[256];
 3031 
 3032     buf.Length = sizeof(data);
 3033     buf.Pointer = data;
 3034 
 3035     if (handle && ACPI_SUCCESS(AcpiGetName(handle, ACPI_FULL_PATHNAME, &buf)))
 3036         return (data);
 3037     return ("(unknown)");
 3038 }
 3039 
 3040 /*
 3041  * Debugging/bug-avoidance.  Avoid trying to fetch info on various
 3042  * parts of the namespace.
 3043  */
 3044 int
 3045 acpi_avoid(ACPI_HANDLE handle)
 3046 {
 3047     char        *cp, *env, *np;
 3048     int         len;
 3049 
 3050     np = acpi_name(handle);
 3051     if (*np == '\\')
 3052         np++;
 3053     if ((env = getenv("debug.acpi.avoid")) == NULL)
 3054         return (0);
 3055 
 3056     /* Scan the avoid list checking for a match */
 3057     cp = env;
 3058     for (;;) {
 3059         while (*cp != 0 && isspace(*cp))
 3060             cp++;
 3061         if (*cp == 0)
 3062             break;
 3063         len = 0;
 3064         while (cp[len] != 0 && !isspace(cp[len]))
 3065             len++;
 3066         if (!strncmp(cp, np, len)) {
 3067             freeenv(env);
 3068             return(1);
 3069         }
 3070         cp += len;
 3071     }
 3072     freeenv(env);
 3073 
 3074     return (0);
 3075 }
 3076 
 3077 /*
 3078  * Debugging/bug-avoidance.  Disable ACPI subsystem components.
 3079  */
 3080 int
 3081 acpi_disabled(char *subsys)
 3082 {
 3083     char        *cp, *env;
 3084     int         len;
 3085 
 3086     if ((env = getenv("debug.acpi.disabled")) == NULL)
 3087         return (0);
 3088     if (strcmp(env, "all") == 0) {
 3089         freeenv(env);
 3090         return (1);
 3091     }
 3092 
 3093     /* Scan the disable list, checking for a match. */
 3094     cp = env;
 3095     for (;;) {
 3096         while (*cp != '\0' && isspace(*cp))
 3097             cp++;
 3098         if (*cp == '\0')
 3099             break;
 3100         len = 0;
 3101         while (cp[len] != '\0' && !isspace(cp[len]))
 3102             len++;
 3103         if (strncmp(cp, subsys, len) == 0) {
 3104             freeenv(env);
 3105             return (1);
 3106         }
 3107         cp += len;
 3108     }
 3109     freeenv(env);
 3110 
 3111     return (0);
 3112 }
 3113 
 3114 /*
 3115  * Control interface.
 3116  *
 3117  * We multiplex ioctls for all participating ACPI devices here.  Individual 
 3118  * drivers wanting to be accessible via /dev/acpi should use the
 3119  * register/deregister interface to make their handlers visible.
 3120  */
 3121 struct acpi_ioctl_hook
 3122 {
 3123     TAILQ_ENTRY(acpi_ioctl_hook) link;
 3124     u_long                       cmd;
 3125     acpi_ioctl_fn                fn;
 3126     void                         *arg;
 3127 };
 3128 
 3129 static TAILQ_HEAD(,acpi_ioctl_hook)     acpi_ioctl_hooks;
 3130 static int                              acpi_ioctl_hooks_initted;
 3131 
 3132 int
 3133 acpi_register_ioctl(u_long cmd, acpi_ioctl_fn fn, void *arg)
 3134 {
 3135     struct acpi_ioctl_hook      *hp;
 3136 
 3137     if ((hp = malloc(sizeof(*hp), M_ACPIDEV, M_NOWAIT)) == NULL)
 3138         return (ENOMEM);
 3139     hp->cmd = cmd;
 3140     hp->fn = fn;
 3141     hp->arg = arg;
 3142 
 3143     ACPI_LOCK(acpi);
 3144     if (acpi_ioctl_hooks_initted == 0) {
 3145         TAILQ_INIT(&acpi_ioctl_hooks);
 3146         acpi_ioctl_hooks_initted = 1;
 3147     }
 3148     TAILQ_INSERT_TAIL(&acpi_ioctl_hooks, hp, link);
 3149     ACPI_UNLOCK(acpi);
 3150 
 3151     return (0);
 3152 }
 3153 
 3154 void
 3155 acpi_deregister_ioctl(u_long cmd, acpi_ioctl_fn fn)
 3156 {
 3157     struct acpi_ioctl_hook      *hp;
 3158 
 3159     ACPI_LOCK(acpi);
 3160     TAILQ_FOREACH(hp, &acpi_ioctl_hooks, link)
 3161         if (hp->cmd == cmd && hp->fn == fn)
 3162             break;
 3163 
 3164     if (hp != NULL) {
 3165         TAILQ_REMOVE(&acpi_ioctl_hooks, hp, link);
 3166         free(hp, M_ACPIDEV);
 3167     }
 3168     ACPI_UNLOCK(acpi);
 3169 }
 3170 
 3171 static int
 3172 acpiopen(struct cdev *dev, int flag, int fmt, struct thread *td)
 3173 {
 3174     return (0);
 3175 }
 3176 
 3177 static int
 3178 acpiclose(struct cdev *dev, int flag, int fmt, struct thread *td)
 3179 {
 3180     return (0);
 3181 }
 3182 
 3183 static int
 3184 acpiioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td)
 3185 {
 3186     struct acpi_softc           *sc;
 3187     struct acpi_ioctl_hook      *hp;
 3188     int                         error, state;
 3189 
 3190     error = 0;
 3191     hp = NULL;
 3192     sc = dev->si_drv1;
 3193 
 3194     /*
 3195      * Scan the list of registered ioctls, looking for handlers.
 3196      */
 3197     ACPI_LOCK(acpi);
 3198     if (acpi_ioctl_hooks_initted)
 3199         TAILQ_FOREACH(hp, &acpi_ioctl_hooks, link) {
 3200             if (hp->cmd == cmd)
 3201                 break;
 3202         }
 3203     ACPI_UNLOCK(acpi);
 3204     if (hp)
 3205         return (hp->fn(cmd, addr, hp->arg));
 3206 
 3207     /*
 3208      * Core ioctls are not permitted for non-writable user.
 3209      * Currently, other ioctls just fetch information.
 3210      * Not changing system behavior.
 3211      */
 3212     if ((flag & FWRITE) == 0)
 3213         return (EPERM);
 3214 
 3215     /* Core system ioctls. */
 3216     switch (cmd) {
 3217     case ACPIIO_REQSLPSTATE:
 3218         state = *(int *)addr;
 3219         if (state != ACPI_STATE_S5)
 3220             return (acpi_ReqSleepState(sc, state));
 3221         device_printf(sc->acpi_dev, "power off via acpi ioctl not supported\n");
 3222         error = EOPNOTSUPP;
 3223         break;
 3224     case ACPIIO_ACKSLPSTATE:
 3225         error = *(int *)addr;
 3226         error = acpi_AckSleepState(sc->acpi_clone, error);
 3227         break;
 3228     case ACPIIO_SETSLPSTATE:    /* DEPRECATED */
 3229         state = *(int *)addr;
 3230         if (state < ACPI_STATE_S0 || state > ACPI_S_STATES_MAX)
 3231             return (EINVAL);
 3232         if (!acpi_sleep_states[state])
 3233             return (EOPNOTSUPP);
 3234         if (ACPI_FAILURE(acpi_SetSleepState(sc, state)))
 3235             error = ENXIO;
 3236         break;
 3237     default:
 3238         error = ENXIO;
 3239         break;
 3240     }
 3241 
 3242     return (error);
 3243 }
 3244 
 3245 static int
 3246 acpi_sname2sstate(const char *sname)
 3247 {
 3248     int sstate;
 3249 
 3250     if (toupper(sname[0]) == 'S') {
 3251         sstate = sname[1] - '';
 3252         if (sstate >= ACPI_STATE_S0 && sstate <= ACPI_STATE_S5 &&
 3253             sname[2] == '\0')
 3254             return (sstate);
 3255     } else if (strcasecmp(sname, "NONE") == 0)
 3256         return (ACPI_STATE_UNKNOWN);
 3257     return (-1);
 3258 }
 3259 
 3260 static const char *
 3261 acpi_sstate2sname(int sstate)
 3262 {
 3263     static const char *snames[] = { "S0", "S1", "S2", "S3", "S4", "S5" };
 3264 
 3265     if (sstate >= ACPI_STATE_S0 && sstate <= ACPI_STATE_S5)
 3266         return (snames[sstate]);
 3267     else if (sstate == ACPI_STATE_UNKNOWN)
 3268         return ("NONE");
 3269     return (NULL);
 3270 }
 3271 
 3272 static int
 3273 acpi_supported_sleep_state_sysctl(SYSCTL_HANDLER_ARGS)
 3274 {
 3275     int error;
 3276     struct sbuf sb;
 3277     UINT8 state;
 3278 
 3279     sbuf_new(&sb, NULL, 32, SBUF_AUTOEXTEND);
 3280     for (state = ACPI_STATE_S1; state < ACPI_S_STATE_COUNT; state++)
 3281         if (acpi_sleep_states[state])
 3282             sbuf_printf(&sb, "%s ", acpi_sstate2sname(state));
 3283     sbuf_trim(&sb);
 3284     sbuf_finish(&sb);
 3285     error = sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req);
 3286     sbuf_delete(&sb);
 3287     return (error);
 3288 }
 3289 
 3290 static int
 3291 acpi_sleep_state_sysctl(SYSCTL_HANDLER_ARGS)
 3292 {
 3293     char sleep_state[10];
 3294     int error, new_state, old_state;
 3295 
 3296     old_state = *(int *)oidp->oid_arg1;
 3297     strlcpy(sleep_state, acpi_sstate2sname(old_state), sizeof(sleep_state));
 3298     error = sysctl_handle_string(oidp, sleep_state, sizeof(sleep_state), req);
 3299     if (error == 0 && req->newptr != NULL) {
 3300         new_state = acpi_sname2sstate(sleep_state);
 3301         if (new_state < ACPI_STATE_S1)
 3302             return (EINVAL);
 3303         if (new_state < ACPI_S_STATE_COUNT && !acpi_sleep_states[new_state])
 3304             return (EOPNOTSUPP);
 3305         if (new_state != old_state)
 3306             *(int *)oidp->oid_arg1 = new_state;
 3307     }
 3308     return (error);
 3309 }
 3310 
 3311 /* Inform devctl(4) when we receive a Notify. */
 3312 void
 3313 acpi_UserNotify(const char *subsystem, ACPI_HANDLE h, uint8_t notify)
 3314 {
 3315     char                notify_buf[16];
 3316     ACPI_BUFFER         handle_buf;
 3317     ACPI_STATUS         status;
 3318 
 3319     if (subsystem == NULL)
 3320         return;
 3321 
 3322     handle_buf.Pointer = NULL;
 3323     handle_buf.Length = ACPI_ALLOCATE_BUFFER;
 3324     status = AcpiNsHandleToPathname(h, &handle_buf);
 3325     if (ACPI_FAILURE(status))
 3326         return;
 3327     snprintf(notify_buf, sizeof(notify_buf), "notify=0x%02x", notify);
 3328     devctl_notify("ACPI", subsystem, handle_buf.Pointer, notify_buf);
 3329     AcpiOsFree(handle_buf.Pointer);
 3330 }
 3331 
 3332 #ifdef ACPI_DEBUG
 3333 /*
 3334  * Support for parsing debug options from the kernel environment.
 3335  *
 3336  * Bits may be set in the AcpiDbgLayer and AcpiDbgLevel debug registers
 3337  * by specifying the names of the bits in the debug.acpi.layer and
 3338  * debug.acpi.level environment variables.  Bits may be unset by 
 3339  * prefixing the bit name with !.
 3340  */
 3341 struct debugtag
 3342 {
 3343     char        *name;
 3344     UINT32      value;
 3345 };
 3346 
 3347 static struct debugtag  dbg_layer[] = {
 3348     {"ACPI_UTILITIES",          ACPI_UTILITIES},
 3349     {"ACPI_HARDWARE",           ACPI_HARDWARE},
 3350     {"ACPI_EVENTS",             ACPI_EVENTS},
 3351     {"ACPI_TABLES",             ACPI_TABLES},
 3352     {"ACPI_NAMESPACE",          ACPI_NAMESPACE},
 3353     {"ACPI_PARSER",             ACPI_PARSER},
 3354     {"ACPI_DISPATCHER",         ACPI_DISPATCHER},
 3355     {"ACPI_EXECUTER",           ACPI_EXECUTER},
 3356     {"ACPI_RESOURCES",          ACPI_RESOURCES},
 3357     {"ACPI_CA_DEBUGGER",        ACPI_CA_DEBUGGER},
 3358     {"ACPI_OS_SERVICES",        ACPI_OS_SERVICES},
 3359     {"ACPI_CA_DISASSEMBLER",    ACPI_CA_DISASSEMBLER},
 3360     {"ACPI_ALL_COMPONENTS",     ACPI_ALL_COMPONENTS},
 3361 
 3362     {"ACPI_AC_ADAPTER",         ACPI_AC_ADAPTER},
 3363     {"ACPI_BATTERY",            ACPI_BATTERY},
 3364     {"ACPI_BUS",                ACPI_BUS},
 3365     {"ACPI_BUTTON",             ACPI_BUTTON},
 3366     {"ACPI_EC",                 ACPI_EC},
 3367     {"ACPI_FAN",                ACPI_FAN},
 3368     {"ACPI_POWERRES",           ACPI_POWERRES},
 3369     {"ACPI_PROCESSOR",          ACPI_PROCESSOR},
 3370     {"ACPI_THERMAL",            ACPI_THERMAL},
 3371     {"ACPI_TIMER",              ACPI_TIMER},
 3372     {"ACPI_ALL_DRIVERS",        ACPI_ALL_DRIVERS},
 3373     {NULL, 0}
 3374 };
 3375 
 3376 static struct debugtag dbg_level[] = {
 3377     {"ACPI_LV_INIT",            ACPI_LV_INIT},
 3378     {"ACPI_LV_DEBUG_OBJECT",    ACPI_LV_DEBUG_OBJECT},
 3379     {"ACPI_LV_INFO",            ACPI_LV_INFO},
 3380     {"ACPI_LV_ALL_EXCEPTIONS",  ACPI_LV_ALL_EXCEPTIONS},
 3381 
 3382     /* Trace verbosity level 1 [Standard Trace Level] */
 3383     {"ACPI_LV_INIT_NAMES",      ACPI_LV_INIT_NAMES},
 3384     {"ACPI_LV_PARSE",           ACPI_LV_PARSE},
 3385     {"ACPI_LV_LOAD",            ACPI_LV_LOAD},
 3386     {"ACPI_LV_DISPATCH",        ACPI_LV_DISPATCH},
 3387     {"ACPI_LV_EXEC",            ACPI_LV_EXEC},
 3388     {"ACPI_LV_NAMES",           ACPI_LV_NAMES},
 3389     {"ACPI_LV_OPREGION",        ACPI_LV_OPREGION},
 3390     {"ACPI_LV_BFIELD",          ACPI_LV_BFIELD},
 3391     {"ACPI_LV_TABLES",          ACPI_LV_TABLES},
 3392     {"ACPI_LV_VALUES",          ACPI_LV_VALUES},
 3393     {"ACPI_LV_OBJECTS",         ACPI_LV_OBJECTS},
 3394     {"ACPI_LV_RESOURCES",       ACPI_LV_RESOURCES},
 3395     {"ACPI_LV_USER_REQUESTS",   ACPI_LV_USER_REQUESTS},
 3396     {"ACPI_LV_PACKAGE",         ACPI_LV_PACKAGE},
 3397     {"ACPI_LV_VERBOSITY1",      ACPI_LV_VERBOSITY1},
 3398 
 3399     /* Trace verbosity level 2 [Function tracing and memory allocation] */
 3400     {"ACPI_LV_ALLOCATIONS",     ACPI_LV_ALLOCATIONS},
 3401     {"ACPI_LV_FUNCTIONS",       ACPI_LV_FUNCTIONS},
 3402     {"ACPI_LV_OPTIMIZATIONS",   ACPI_LV_OPTIMIZATIONS},
 3403     {"ACPI_LV_VERBOSITY2",      ACPI_LV_VERBOSITY2},
 3404     {"ACPI_LV_ALL",             ACPI_LV_ALL},
 3405 
 3406     /* Trace verbosity level 3 [Threading, I/O, and Interrupts] */
 3407     {"ACPI_LV_MUTEX",           ACPI_LV_MUTEX},
 3408     {"ACPI_LV_THREADS",         ACPI_LV_THREADS},
 3409     {"ACPI_LV_IO",              ACPI_LV_IO},
 3410     {"ACPI_LV_INTERRUPTS",      ACPI_LV_INTERRUPTS},
 3411     {"ACPI_LV_VERBOSITY3",      ACPI_LV_VERBOSITY3},
 3412 
 3413     /* Exceptionally verbose output -- also used in the global "DebugLevel"  */
 3414     {"ACPI_LV_AML_DISASSEMBLE", ACPI_LV_AML_DISASSEMBLE},
 3415     {"ACPI_LV_VERBOSE_INFO",    ACPI_LV_VERBOSE_INFO},
 3416     {"ACPI_LV_FULL_TABLES",     ACPI_LV_FULL_TABLES},
 3417     {"ACPI_LV_EVENTS",          ACPI_LV_EVENTS},
 3418     {"ACPI_LV_VERBOSE",         ACPI_LV_VERBOSE},
 3419     {NULL, 0}
 3420 };    
 3421 
 3422 static void
 3423 acpi_parse_debug(char *cp, struct debugtag *tag, UINT32 *flag)
 3424 {
 3425     char        *ep;
 3426     int         i, l;
 3427     int         set;
 3428 
 3429     while (*cp) {
 3430         if (isspace(*cp)) {
 3431             cp++;
 3432             continue;
 3433         }
 3434         ep = cp;
 3435         while (*ep && !isspace(*ep))
 3436             ep++;
 3437         if (*cp == '!') {
 3438             set = 0;
 3439             cp++;
 3440             if (cp == ep)
 3441                 continue;
 3442         } else {
 3443             set = 1;
 3444         }
 3445         l = ep - cp;
 3446         for (i = 0; tag[i].name != NULL; i++) {
 3447             if (!strncmp(cp, tag[i].name, l)) {
 3448                 if (set)
 3449                     *flag |= tag[i].value;
 3450                 else
 3451                     *flag &= ~tag[i].value;
 3452             }
 3453         }
 3454         cp = ep;
 3455     }
 3456 }
 3457 
 3458 static void
 3459 acpi_set_debugging(void *junk)
 3460 {
 3461     char        *layer, *level;
 3462 
 3463     if (cold) {
 3464         AcpiDbgLayer = 0;
 3465         AcpiDbgLevel = 0;
 3466     }
 3467 
 3468     layer = getenv("debug.acpi.layer");
 3469     level = getenv("debug.acpi.level");
 3470     if (layer == NULL && level == NULL)
 3471         return;
 3472 
 3473     printf("ACPI set debug");
 3474     if (layer != NULL) {
 3475         if (strcmp("NONE", layer) != 0)
 3476             printf(" layer '%s'", layer);
 3477         acpi_parse_debug(layer, &dbg_layer[0], &AcpiDbgLayer);
 3478         freeenv(layer);
 3479     }
 3480     if (level != NULL) {
 3481         if (strcmp("NONE", level) != 0)
 3482             printf(" level '%s'", level);
 3483         acpi_parse_debug(level, &dbg_level[0], &AcpiDbgLevel);
 3484         freeenv(level);
 3485     }
 3486     printf("\n");
 3487 }
 3488 
 3489 SYSINIT(acpi_debugging, SI_SUB_TUNABLES, SI_ORDER_ANY, acpi_set_debugging,
 3490         NULL);
 3491 
 3492 static int
 3493 acpi_debug_sysctl(SYSCTL_HANDLER_ARGS)
 3494 {
 3495     int          error, *dbg;
 3496     struct       debugtag *tag;
 3497     struct       sbuf sb;
 3498 
 3499     if (sbuf_new(&sb, NULL, 128, SBUF_AUTOEXTEND) == NULL)
 3500         return (ENOMEM);
 3501     if (strcmp(oidp->oid_arg1, "debug.acpi.layer") == 0) {
 3502         tag = &dbg_layer[0];
 3503         dbg = &AcpiDbgLayer;
 3504     } else {
 3505         tag = &dbg_level[0];
 3506         dbg = &AcpiDbgLevel;
 3507     }
 3508 
 3509     /* Get old values if this is a get request. */
 3510     ACPI_SERIAL_BEGIN(acpi);
 3511     if (*dbg == 0) {
 3512         sbuf_cpy(&sb, "NONE");
 3513     } else if (req->newptr == NULL) {
 3514         for (; tag->name != NULL; tag++) {
 3515             if ((*dbg & tag->value) == tag->value)
 3516                 sbuf_printf(&sb, "%s ", tag->name);
 3517         }
 3518     }
 3519     sbuf_trim(&sb);
 3520     sbuf_finish(&sb);
 3521 
 3522     /* Copy out the old values to the user. */
 3523     error = SYSCTL_OUT(req, sbuf_data(&sb), sbuf_len(&sb));
 3524     sbuf_delete(&sb);
 3525 
 3526     /* If the user is setting a string, parse it. */
 3527     if (error == 0 && req->newptr != NULL) {
 3528         *dbg = 0;
 3529         setenv((char *)oidp->oid_arg1, (char *)req->newptr);
 3530         acpi_set_debugging(NULL);
 3531     }
 3532     ACPI_SERIAL_END(acpi);
 3533 
 3534     return (error);
 3535 }
 3536 
 3537 SYSCTL_PROC(_debug_acpi, OID_AUTO, layer, CTLFLAG_RW | CTLTYPE_STRING,
 3538             "debug.acpi.layer", 0, acpi_debug_sysctl, "A", "");
 3539 SYSCTL_PROC(_debug_acpi, OID_AUTO, level, CTLFLAG_RW | CTLTYPE_STRING,
 3540             "debug.acpi.level", 0, acpi_debug_sysctl, "A", "");
 3541 #endif /* ACPI_DEBUG */
 3542 
 3543 static int
 3544 acpi_debug_objects_sysctl(SYSCTL_HANDLER_ARGS)
 3545 {
 3546         int     error;
 3547         int     old;
 3548 
 3549         old = acpi_debug_objects;
 3550         error = sysctl_handle_int(oidp, &acpi_debug_objects, 0, req);
 3551         if (error != 0 || req->newptr == NULL)
 3552                 return (error);
 3553         if (old == acpi_debug_objects || (old && acpi_debug_objects))
 3554                 return (0);
 3555 
 3556         ACPI_SERIAL_BEGIN(acpi);
 3557         AcpiGbl_EnableAmlDebugObject = acpi_debug_objects ? TRUE : FALSE;
 3558         ACPI_SERIAL_END(acpi);
 3559 
 3560         return (0);
 3561 }
 3562 
 3563 static int
 3564 acpi_pm_func(u_long cmd, void *arg, ...)
 3565 {
 3566         int     state, acpi_state;
 3567         int     error;
 3568         struct  acpi_softc *sc;
 3569         va_list ap;
 3570 
 3571         error = 0;
 3572         switch (cmd) {
 3573         case POWER_CMD_SUSPEND:
 3574                 sc = (struct acpi_softc *)arg;
 3575                 if (sc == NULL) {
 3576                         error = EINVAL;
 3577                         goto out;
 3578                 }
 3579 
 3580                 va_start(ap, arg);
 3581                 state = va_arg(ap, int);
 3582                 va_end(ap);
 3583 
 3584                 switch (state) {
 3585                 case POWER_SLEEP_STATE_STANDBY:
 3586                         acpi_state = sc->acpi_standby_sx;
 3587                         break;
 3588                 case POWER_SLEEP_STATE_SUSPEND:
 3589                         acpi_state = sc->acpi_suspend_sx;
 3590                         break;
 3591                 case POWER_SLEEP_STATE_HIBERNATE:
 3592                         acpi_state = ACPI_STATE_S4;
 3593                         break;
 3594                 default:
 3595                         error = EINVAL;
 3596                         goto out;
 3597                 }
 3598 
 3599                 if (ACPI_FAILURE(acpi_EnterSleepState(sc, acpi_state)))
 3600                         error = ENXIO;
 3601                 break;
 3602         default:
 3603                 error = EINVAL;
 3604                 goto out;
 3605         }
 3606 
 3607 out:
 3608         return (error);
 3609 }
 3610 
 3611 static void
 3612 acpi_pm_register(void *arg)
 3613 {
 3614     if (!cold || resource_disabled("acpi", 0))
 3615         return;
 3616 
 3617     power_pm_register(POWER_PM_TYPE_ACPI, acpi_pm_func, NULL);
 3618 }
 3619 
 3620 SYSINIT(power, SI_SUB_KLD, SI_ORDER_ANY, acpi_pm_register, 0);

Cache object: 35715171f0d5ccb72987f6cd1834e423


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.