The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/acpica/acpi_cpu_cstate.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 2003-2005 Nate Lawson (SDG)
    3  * Copyright (c) 2001 Michael Smith
    4  * All rights reserved.
    5  *
    6  * Redistribution and use in source and binary forms, with or without
    7  * modification, are permitted provided that the following conditions
    8  * are met:
    9  * 1. Redistributions of source code must retain the above copyright
   10  *    notice, this list of conditions and the following disclaimer.
   11  * 2. Redistributions in binary form must reproduce the above copyright
   12  *    notice, this list of conditions and the following disclaimer in the
   13  *    documentation and/or other materials provided with the distribution.
   14  *
   15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   25  * SUCH DAMAGE.
   26  *
   27  * $FreeBSD: src/sys/dev/acpica/acpi_cpu.c,v 1.72 2008/04/12 12:06:00 rpaulo Exp $
   28  */
   29 
   30 #include "opt_acpi.h"
   31 #include <sys/param.h>
   32 #include <sys/bus.h>
   33 #include <sys/kernel.h>
   34 #include <sys/malloc.h>
   35 #include <sys/globaldata.h>
   36 #include <sys/power.h>
   37 #include <sys/proc.h>
   38 #include <sys/sbuf.h>
   39 #include <sys/thread2.h>
   40 #include <sys/mplock2.h>
   41 
   42 #include <bus/pci/pcivar.h>
   43 #include <machine/atomic.h>
   44 #include <machine/globaldata.h>
   45 #include <machine/md_var.h>
   46 #include <machine/smp.h>
   47 #include <sys/rman.h>
   48 
   49 #include "acpi.h"
   50 #include "acpivar.h"
   51 #include "acpi_cpu.h"
   52 
   53 /*
   54  * Support for ACPI Processor devices, including C[1-3] sleep states.
   55  */
   56 
   57 /* Hooks for the ACPI CA debugging infrastructure */
   58 #define _COMPONENT      ACPI_PROCESSOR
   59 ACPI_MODULE_NAME("PROCESSOR")
   60 
   61 struct acpi_cx {
   62     struct resource     *p_lvlx;        /* Register to read to enter state. */
   63     int                  rid;           /* rid of p_lvlx */
   64     uint32_t             type;          /* C1-3 (C4 and up treated as C3). */
   65     uint32_t             trans_lat;     /* Transition latency (usec). */
   66     uint32_t             power;         /* Power consumed (mW). */
   67     int                  res_type;      /* Resource type for p_lvlx. */
   68 };
   69 #define MAX_CX_STATES    8
   70 
   71 struct acpi_cpu_softc {
   72     device_t             cpu_dev;
   73     struct acpi_cpux_softc *cpu_parent;
   74     ACPI_HANDLE          cpu_handle;
   75     struct mdglobaldata *md;
   76     uint32_t             cpu_acpi_id;   /* ACPI processor id */
   77     uint32_t             cpu_p_blk;     /* ACPI P_BLK location */
   78     uint32_t             cpu_p_blk_len; /* P_BLK length (must be 6). */
   79     struct acpi_cx       cpu_cx_states[MAX_CX_STATES];
   80     int                  cpu_cx_count;  /* Number of valid Cx states. */
   81     int                  cpu_prev_sleep;/* Last idle sleep duration. */
   82     /* Runtime state. */
   83     int                  cpu_non_c3;    /* Index of lowest non-C3 state. */
   84     u_int                cpu_cx_stats[MAX_CX_STATES];/* Cx usage history. */
   85     /* Values for sysctl. */
   86     int                  cpu_cx_lowest;
   87     char                 cpu_cx_supported[64];
   88 };
   89 
   90 struct acpi_cpu_device {
   91     struct resource_list        ad_rl;
   92 };
   93 
   94 #define CPU_GET_REG(reg, width)                                         \
   95     (bus_space_read_ ## width(rman_get_bustag((reg)),                   \
   96                       rman_get_bushandle((reg)), 0))
   97 #define CPU_SET_REG(reg, width, val)                                    \
   98     (bus_space_write_ ## width(rman_get_bustag((reg)),                  \
   99                        rman_get_bushandle((reg)), 0, (val)))
  100 
  101 #define PM_USEC(x)       ((x) >> 2)     /* ~4 clocks per usec (3.57955 Mhz) */
  102 
  103 #define ACPI_NOTIFY_CX_STATES   0x81    /* _CST changed. */
  104 
  105 #define CPU_QUIRK_NO_C3         (1<<0)  /* C3-type states are not usable. */
  106 #define CPU_QUIRK_NO_BM_CTRL    (1<<2)  /* No bus mastering control. */
  107 
  108 #define PCI_VENDOR_INTEL        0x8086
  109 #define PCI_DEVICE_82371AB_3    0x7113  /* PIIX4 chipset for quirks. */
  110 #define PCI_REVISION_A_STEP     0
  111 #define PCI_REVISION_B_STEP     1
  112 #define PCI_REVISION_4E         2
  113 #define PCI_REVISION_4M         3
  114 #define PIIX4_DEVACTB_REG       0x58
  115 #define PIIX4_BRLD_EN_IRQ0      (1<<0)
  116 #define PIIX4_BRLD_EN_IRQ       (1<<1)
  117 #define PIIX4_BRLD_EN_IRQ8      (1<<5)
  118 #define PIIX4_STOP_BREAK_MASK   (PIIX4_BRLD_EN_IRQ0 | PIIX4_BRLD_EN_IRQ | PIIX4_BRLD_EN_IRQ8)
  119 #define PIIX4_PCNTRL_BST_EN     (1<<10)
  120 
  121 /* Platform hardware resource information. */
  122 static uint32_t          cpu_smi_cmd;   /* Value to write to SMI_CMD. */
  123 static uint8_t           cpu_cst_cnt;   /* Indicate we are _CST aware. */
  124 static int               cpu_quirks;    /* Indicate any hardware bugs. */
  125 
  126 /* Runtime state. */
  127 static int               cpu_disable_idle; /* Disable entry to idle function */
  128 static int               cpu_cx_count;  /* Number of valid Cx states */
  129 
  130 /* Values for sysctl. */
  131 static int               cpu_cx_generic;
  132 static int               cpu_cx_lowest;
  133 
  134 /* C3 state transition */
  135 static int               cpu_c3_ncpus;
  136 
  137 static device_t         *cpu_devices;
  138 static int               cpu_ndevices;
  139 static struct acpi_cpu_softc **cpu_softc;
  140 
  141 static int      acpi_cpu_cst_probe(device_t dev);
  142 static int      acpi_cpu_cst_attach(device_t dev);
  143 static int      acpi_cpu_cst_suspend(device_t dev);
  144 static int      acpi_cpu_cst_resume(device_t dev);
  145 static struct resource_list *acpi_cpu_cst_get_rlist(device_t dev,
  146                     device_t child);
  147 static device_t acpi_cpu_cst_add_child(device_t bus, device_t parent,
  148                     int order, const char *name, int unit);
  149 static int      acpi_cpu_cst_read_ivar(device_t dev, device_t child,
  150                     int index, uintptr_t *result);
  151 static int      acpi_cpu_cst_shutdown(device_t dev);
  152 static void     acpi_cpu_cx_probe(struct acpi_cpu_softc *sc);
  153 static void     acpi_cpu_generic_cx_probe(struct acpi_cpu_softc *sc);
  154 static int      acpi_cpu_cx_cst(struct acpi_cpu_softc *sc);
  155 static void     acpi_cpu_startup(void *arg);
  156 static void     acpi_cpu_startup_cx(struct acpi_cpu_softc *sc);
  157 static void     acpi_cpu_cx_list(struct acpi_cpu_softc *sc);
  158 static void     acpi_cpu_idle(void);
  159 static void     acpi_cpu_cst_notify(device_t);
  160 static int      acpi_cpu_quirks(void);
  161 static int      acpi_cpu_usage_sysctl(SYSCTL_HANDLER_ARGS);
  162 static int      acpi_cpu_set_cx_lowest(struct acpi_cpu_softc *sc, int val);
  163 static int      acpi_cpu_cx_lowest_sysctl(SYSCTL_HANDLER_ARGS);
  164 static int      acpi_cpu_global_cx_lowest_sysctl(SYSCTL_HANDLER_ARGS);
  165 
  166 static void     acpi_cpu_c1(void);      /* XXX */
  167 
  168 static device_method_t acpi_cpu_cst_methods[] = {
  169     /* Device interface */
  170     DEVMETHOD(device_probe,     acpi_cpu_cst_probe),
  171     DEVMETHOD(device_attach,    acpi_cpu_cst_attach),
  172     DEVMETHOD(device_detach,    bus_generic_detach),
  173     DEVMETHOD(device_shutdown,  acpi_cpu_cst_shutdown),
  174     DEVMETHOD(device_suspend,   acpi_cpu_cst_suspend),
  175     DEVMETHOD(device_resume,    acpi_cpu_cst_resume),
  176 
  177     /* Bus interface */
  178     DEVMETHOD(bus_add_child,    acpi_cpu_cst_add_child),
  179     DEVMETHOD(bus_read_ivar,    acpi_cpu_cst_read_ivar),
  180     DEVMETHOD(bus_get_resource_list, acpi_cpu_cst_get_rlist),
  181     DEVMETHOD(bus_get_resource, bus_generic_rl_get_resource),
  182     DEVMETHOD(bus_set_resource, bus_generic_rl_set_resource),
  183     DEVMETHOD(bus_alloc_resource, bus_generic_rl_alloc_resource),
  184     DEVMETHOD(bus_release_resource, bus_generic_rl_release_resource),
  185     DEVMETHOD(bus_driver_added, bus_generic_driver_added),
  186     DEVMETHOD(bus_activate_resource, bus_generic_activate_resource),
  187     DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource),
  188     DEVMETHOD(bus_setup_intr,   bus_generic_setup_intr),
  189     DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr),
  190     DEVMETHOD_END
  191 };
  192 
  193 static driver_t acpi_cpu_cst_driver = {
  194     "cpu_cst",
  195     acpi_cpu_cst_methods,
  196     sizeof(struct acpi_cpu_softc),
  197 };
  198 
  199 static devclass_t acpi_cpu_cst_devclass;
  200 DRIVER_MODULE(cpu_cst, cpu, acpi_cpu_cst_driver, acpi_cpu_cst_devclass, NULL, NULL);
  201 MODULE_DEPEND(cpu_cst, acpi, 1, 1, 1);
  202 
  203 static int
  204 acpi_cpu_cst_probe(device_t dev)
  205 {
  206     int cpu_id;
  207 
  208     if (acpi_disabled("cpu_cst") || acpi_get_type(dev) != ACPI_TYPE_PROCESSOR)
  209         return (ENXIO);
  210 
  211     cpu_id = acpi_get_magic(dev);
  212 
  213     if (cpu_softc == NULL)
  214         cpu_softc = kmalloc(sizeof(struct acpi_cpu_softc *) *
  215             SMP_MAXCPU, M_TEMP /* XXX */, M_INTWAIT | M_ZERO);
  216 
  217     /*
  218      * Check if we already probed this processor.  We scan the bus twice
  219      * so it's possible we've already seen this one.
  220      */
  221     if (cpu_softc[cpu_id] != NULL) {
  222         device_printf(dev, "CPU%d cstate already exist\n", cpu_id);
  223         return (ENXIO);
  224     }
  225 
  226     /* Mark this processor as in-use and save our derived id for attach. */
  227     cpu_softc[cpu_id] = (void *)1;
  228     device_set_desc(dev, "ACPI CPU C-State");
  229 
  230     return (0);
  231 }
  232 
  233 static int
  234 acpi_cpu_cst_attach(device_t dev)
  235 {
  236     ACPI_BUFFER            buf;
  237     ACPI_OBJECT            *obj;
  238     struct mdglobaldata   *md;
  239     struct acpi_cpu_softc *sc;
  240     ACPI_STATUS            status;
  241     int                    cpu_id;
  242 
  243     ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
  244 
  245     sc = device_get_softc(dev);
  246     sc->cpu_dev = dev;
  247     sc->cpu_parent = device_get_softc(device_get_parent(dev));
  248     sc->cpu_handle = acpi_get_handle(dev);
  249     cpu_id = acpi_get_magic(dev);
  250     cpu_softc[cpu_id] = sc;
  251     md = (struct mdglobaldata *)globaldata_find(device_get_unit(dev));
  252     sc->md = md;
  253     cpu_smi_cmd = AcpiGbl_FADT.SmiCommand;
  254     cpu_cst_cnt = AcpiGbl_FADT.CstControl;
  255 
  256     buf.Pointer = NULL;
  257     buf.Length = ACPI_ALLOCATE_BUFFER;
  258     status = AcpiEvaluateObject(sc->cpu_handle, NULL, NULL, &buf);
  259     if (ACPI_FAILURE(status)) {
  260         device_printf(dev, "attach failed to get Processor obj - %s\n",
  261                       AcpiFormatException(status));
  262         return (ENXIO);
  263     }
  264     obj = (ACPI_OBJECT *)buf.Pointer;
  265     sc->cpu_p_blk = obj->Processor.PblkAddress;
  266     sc->cpu_p_blk_len = obj->Processor.PblkLength;
  267     sc->cpu_acpi_id = obj->Processor.ProcId;
  268     AcpiOsFree(obj);
  269     ACPI_DEBUG_PRINT((ACPI_DB_INFO, "acpi_cpu%d: P_BLK at %#x/%d\n",
  270                      device_get_unit(dev), sc->cpu_p_blk, sc->cpu_p_blk_len));
  271 
  272     /*
  273      * If this is the first cpu we attach, create and initialize the generic
  274      * resources that will be used by all acpi cpu devices.
  275      */
  276     if (device_get_unit(dev) == 0) {
  277         /* Assume we won't be using generic Cx mode by default */
  278         cpu_cx_generic = FALSE;
  279 
  280         /* Queue post cpu-probing task handler */
  281         AcpiOsExecute(OSL_NOTIFY_HANDLER, acpi_cpu_startup, NULL);
  282     }
  283 
  284     /* Probe for Cx state support. */
  285     acpi_cpu_cx_probe(sc);
  286 
  287     /* Finally,  call identify and probe/attach for child devices. */
  288     bus_generic_probe(dev);
  289     bus_generic_attach(dev);
  290 
  291     return (0);
  292 }
  293 
  294 /*
  295  * Disable any entry to the idle function during suspend and re-enable it
  296  * during resume.
  297  */
  298 static int
  299 acpi_cpu_cst_suspend(device_t dev)
  300 {
  301     int error;
  302 
  303     error = bus_generic_suspend(dev);
  304     if (error)
  305         return (error);
  306     cpu_disable_idle = TRUE;
  307     return (0);
  308 }
  309 
  310 static int
  311 acpi_cpu_cst_resume(device_t dev)
  312 {
  313 
  314     cpu_disable_idle = FALSE;
  315     return (bus_generic_resume(dev));
  316 }
  317 
  318 static struct resource_list *
  319 acpi_cpu_cst_get_rlist(device_t dev, device_t child)
  320 {
  321     struct acpi_cpu_device *ad;
  322 
  323     ad = device_get_ivars(child);
  324     if (ad == NULL)
  325         return (NULL);
  326     return (&ad->ad_rl);
  327 }
  328 
  329 static device_t
  330 acpi_cpu_cst_add_child(device_t bus, device_t parent, int order,
  331     const char *name, int unit)
  332 {
  333     struct acpi_cpu_device *ad;
  334     device_t child;
  335 
  336     if ((ad = kmalloc(sizeof(*ad), M_TEMP, M_NOWAIT | M_ZERO)) == NULL)
  337         return (NULL);
  338 
  339     resource_list_init(&ad->ad_rl);
  340 
  341     child = device_add_child_ordered(parent, order, name, unit);
  342     if (child != NULL)
  343         device_set_ivars(child, ad);
  344     else
  345         kfree(ad, M_TEMP);
  346     return (child);
  347 }
  348 
  349 static int
  350 acpi_cpu_cst_read_ivar(device_t dev, device_t child, int index,
  351     uintptr_t *result)
  352 {
  353     struct acpi_cpu_softc *sc;
  354 
  355     sc = device_get_softc(dev);
  356     switch (index) {
  357     case ACPI_IVAR_HANDLE:
  358         *result = (uintptr_t)sc->cpu_handle;
  359         break;
  360 #if 0
  361     case CPU_IVAR_PCPU:
  362         *result = (uintptr_t)sc->cpu_pcpu;
  363         break;
  364 #endif
  365     default:
  366         return (ENOENT);
  367     }
  368     return (0);
  369 }
  370 
  371 static int
  372 acpi_cpu_cst_shutdown(device_t dev)
  373 {
  374     ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
  375 
  376     /* Allow children to shutdown first. */
  377     bus_generic_shutdown(dev);
  378 
  379     /*
  380      * Disable any entry to the idle function.  There is a small race where
  381      * an idle thread have passed this check but not gone to sleep.  This
  382      * is ok since device_shutdown() does not free the softc, otherwise
  383      * we'd have to be sure all threads were evicted before returning.
  384      */
  385     cpu_disable_idle = TRUE;
  386 
  387     return_VALUE (0);
  388 }
  389 
  390 static void
  391 acpi_cpu_cx_probe(struct acpi_cpu_softc *sc)
  392 {
  393     ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
  394 
  395     /* Use initial sleep value of 1 sec. to start with lowest idle state. */
  396     sc->cpu_prev_sleep = 1000000;
  397     sc->cpu_cx_lowest = 0;
  398 
  399     /*
  400      * Check for the ACPI 2.0 _CST sleep states object. If we can't find
  401      * any, we'll revert to generic FADT/P_BLK Cx control method which will
  402      * be handled by acpi_cpu_startup. We need to defer to after having
  403      * probed all the cpus in the system before probing for generic Cx
  404      * states as we may already have found cpus with valid _CST packages
  405      */
  406     if (!cpu_cx_generic && acpi_cpu_cx_cst(sc) != 0) {
  407         /*
  408          * We were unable to find a _CST package for this cpu or there
  409          * was an error parsing it. Switch back to generic mode.
  410          */
  411         cpu_cx_generic = TRUE;
  412         if (bootverbose)
  413             device_printf(sc->cpu_dev, "switching to generic Cx mode\n");
  414     }
  415 
  416     /*
  417      * TODO: _CSD Package should be checked here.
  418      */
  419 }
  420 
  421 static void
  422 acpi_cpu_generic_cx_probe(struct acpi_cpu_softc *sc)
  423 {
  424     ACPI_GENERIC_ADDRESS         gas;
  425     struct acpi_cx              *cx_ptr;
  426 
  427     sc->cpu_cx_count = 0;
  428     cx_ptr = sc->cpu_cx_states;
  429 
  430     /* Use initial sleep value of 1 sec. to start with lowest idle state. */
  431     sc->cpu_prev_sleep = 1000000;
  432 
  433     /* C1 has been required since just after ACPI 1.0 */
  434     cx_ptr->type = ACPI_STATE_C1;
  435     cx_ptr->trans_lat = 0;
  436     cx_ptr++;
  437     sc->cpu_cx_count++;
  438 
  439     /* 
  440      * The spec says P_BLK must be 6 bytes long.  However, some systems
  441      * use it to indicate a fractional set of features present so we
  442      * take 5 as C2.  Some may also have a value of 7 to indicate
  443      * another C3 but most use _CST for this (as required) and having
  444      * "only" C1-C3 is not a hardship.
  445      */
  446     if (sc->cpu_p_blk_len < 5)
  447         return; 
  448 
  449     /* Validate and allocate resources for C2 (P_LVL2). */
  450     gas.SpaceId = ACPI_ADR_SPACE_SYSTEM_IO;
  451     gas.BitWidth = 8;
  452     if (AcpiGbl_FADT.C2Latency <= 100) {
  453         gas.Address = sc->cpu_p_blk + 4;
  454 
  455         cx_ptr->rid = sc->cpu_parent->cpux_next_rid;
  456         acpi_bus_alloc_gas(sc->cpu_dev, &cx_ptr->type, &cx_ptr->rid, &gas, &cx_ptr->p_lvlx,
  457                                             RF_SHAREABLE);
  458         if (cx_ptr->p_lvlx != NULL) {
  459             sc->cpu_parent->cpux_next_rid++;
  460             cx_ptr->type = ACPI_STATE_C2;
  461             cx_ptr->trans_lat = AcpiGbl_FADT.C2Latency;
  462             cx_ptr++;
  463             sc->cpu_cx_count++;
  464         }
  465     }
  466     if (sc->cpu_p_blk_len < 6)
  467         return;
  468 
  469     /* Validate and allocate resources for C3 (P_LVL3). */
  470     if (AcpiGbl_FADT.C3Latency <= 1000 && !(cpu_quirks & CPU_QUIRK_NO_C3)) {
  471         gas.Address = sc->cpu_p_blk + 5;
  472 
  473         cx_ptr->rid = sc->cpu_parent->cpux_next_rid;
  474         acpi_bus_alloc_gas(sc->cpu_dev, &cx_ptr->type, &cx_ptr->rid, &gas,
  475                                             &cx_ptr->p_lvlx, RF_SHAREABLE);
  476         if (cx_ptr->p_lvlx != NULL) {
  477             sc->cpu_parent->cpux_next_rid++;
  478             cx_ptr->type = ACPI_STATE_C3;
  479             cx_ptr->trans_lat = AcpiGbl_FADT.C3Latency;
  480             cx_ptr++;
  481             sc->cpu_cx_count++;
  482         }
  483     }
  484 }
  485 
  486 /*
  487  * Parse a _CST package and set up its Cx states.  Since the _CST object
  488  * can change dynamically, our notify handler may call this function
  489  * to clean up and probe the new _CST package.
  490  */
  491 static int
  492 acpi_cpu_cx_cst(struct acpi_cpu_softc *sc)
  493 {
  494     struct       acpi_cx *cx_ptr;
  495     ACPI_STATUS  status;
  496     ACPI_BUFFER  buf;
  497     ACPI_OBJECT *top;
  498     ACPI_OBJECT *pkg;
  499     uint32_t     count;
  500     int          i;
  501 
  502     ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
  503 
  504     buf.Pointer = NULL;
  505     buf.Length = ACPI_ALLOCATE_BUFFER;
  506     status = AcpiEvaluateObject(sc->cpu_handle, "_CST", NULL, &buf);
  507     if (ACPI_FAILURE(status))
  508         return (ENXIO);
  509 
  510     /* _CST is a package with a count and at least one Cx package. */
  511     top = (ACPI_OBJECT *)buf.Pointer;
  512     if (!ACPI_PKG_VALID(top, 2) || acpi_PkgInt32(top, 0, &count) != 0) {
  513         device_printf(sc->cpu_dev, "invalid _CST package\n");
  514         AcpiOsFree(buf.Pointer);
  515         return (ENXIO);
  516     }
  517     if (count != top->Package.Count - 1) {
  518         device_printf(sc->cpu_dev, "invalid _CST state count (%d != %d)\n",
  519                count, top->Package.Count - 1);
  520         count = top->Package.Count - 1;
  521     }
  522     if (count > MAX_CX_STATES) {
  523         device_printf(sc->cpu_dev, "_CST has too many states (%d)\n", count);
  524         count = MAX_CX_STATES;
  525     }
  526 
  527     /* Set up all valid states. */
  528     sc->cpu_cx_count = 0;
  529     cx_ptr = sc->cpu_cx_states;
  530     for (i = 0; i < count; i++) {
  531         pkg = &top->Package.Elements[i + 1];
  532         if (!ACPI_PKG_VALID(pkg, 4) ||
  533             acpi_PkgInt32(pkg, 1, &cx_ptr->type) != 0 ||
  534             acpi_PkgInt32(pkg, 2, &cx_ptr->trans_lat) != 0 ||
  535             acpi_PkgInt32(pkg, 3, &cx_ptr->power) != 0) {
  536 
  537             device_printf(sc->cpu_dev, "skipping invalid Cx state package\n");
  538             continue;
  539         }
  540 
  541         /* Validate the state to see if we should use it. */
  542         switch (cx_ptr->type) {
  543         case ACPI_STATE_C1:
  544             sc->cpu_non_c3 = i;
  545             cx_ptr++;
  546             sc->cpu_cx_count++;
  547             continue;
  548         case ACPI_STATE_C2:
  549             sc->cpu_non_c3 = i;
  550             break;
  551         case ACPI_STATE_C3:
  552         default:
  553             if ((cpu_quirks & CPU_QUIRK_NO_C3) != 0) {
  554 
  555                 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
  556                                  "acpi_cpu%d: C3[%d] not available.\n",
  557                                  device_get_unit(sc->cpu_dev), i));
  558                 continue;
  559             }
  560             break;
  561         }
  562 
  563 #ifdef notyet
  564         /* Free up any previous register. */
  565         if (cx_ptr->p_lvlx != NULL) {
  566             bus_release_resource(sc->cpu_dev, 0, 0, cx_ptr->p_lvlx);
  567             cx_ptr->p_lvlx = NULL;
  568         }
  569 #endif
  570 
  571         /* Allocate the control register for C2 or C3. */
  572         cx_ptr->rid = sc->cpu_parent->cpux_next_rid;
  573         acpi_PkgGas(sc->cpu_dev, pkg, 0, &cx_ptr->res_type, &cx_ptr->rid, &cx_ptr->p_lvlx,
  574                     RF_SHAREABLE);
  575         if (cx_ptr->p_lvlx) {
  576             sc->cpu_parent->cpux_next_rid++;
  577             ACPI_DEBUG_PRINT((ACPI_DB_INFO,
  578                              "acpi_cpu%d: Got C%d - %d latency\n",
  579                              device_get_unit(sc->cpu_dev), cx_ptr->type,
  580                              cx_ptr->trans_lat));
  581             cx_ptr++;
  582             sc->cpu_cx_count++;
  583         }
  584     }
  585     AcpiOsFree(buf.Pointer);
  586 
  587     return (0);
  588 }
  589 
  590 /*
  591  * Call this *after* all CPUs have been attached.
  592  */
  593 static void
  594 acpi_cpu_startup(void *arg)
  595 {
  596     struct acpi_cpu_softc *sc;
  597     int i;
  598 
  599     /* Get set of CPU devices */
  600     devclass_get_devices(acpi_cpu_cst_devclass, &cpu_devices, &cpu_ndevices);
  601 
  602     /*
  603      * Setup any quirks that might necessary now that we have probed
  604      * all the CPUs
  605      */
  606     acpi_cpu_quirks();
  607 
  608     cpu_cx_count = 0;
  609     if (cpu_cx_generic) {
  610         /*
  611          * We are using generic Cx mode, probe for available Cx states
  612          * for all processors.
  613          */
  614         for (i = 0; i < cpu_ndevices; i++) {
  615             sc = device_get_softc(cpu_devices[i]);
  616             acpi_cpu_generic_cx_probe(sc);
  617             if (sc->cpu_cx_count > cpu_cx_count)
  618                     cpu_cx_count = sc->cpu_cx_count;
  619         }
  620 
  621         /*
  622          * Find the highest Cx state common to all CPUs
  623          * in the system, taking quirks into account.
  624          */
  625         for (i = 0; i < cpu_ndevices; i++) {
  626             sc = device_get_softc(cpu_devices[i]);
  627             if (sc->cpu_cx_count < cpu_cx_count)
  628                 cpu_cx_count = sc->cpu_cx_count;
  629         }
  630     } else {
  631         /*
  632          * We are using _CST mode, remove C3 state if necessary.
  633          * Update the largest Cx state supported in the global cpu_cx_count.
  634          * It will be used in the global Cx sysctl handler.
  635          * As we now know for sure that we will be using _CST mode
  636          * install our notify handler.
  637          */
  638         for (i = 0; i < cpu_ndevices; i++) {
  639             sc = device_get_softc(cpu_devices[i]);
  640             if (cpu_quirks & CPU_QUIRK_NO_C3) {
  641                 sc->cpu_cx_count = sc->cpu_non_c3 + 1;
  642             }
  643             if (sc->cpu_cx_count > cpu_cx_count)
  644                 cpu_cx_count = sc->cpu_cx_count;
  645             sc->cpu_parent->cpux_cst_notify = acpi_cpu_cst_notify;
  646         }
  647     }
  648 
  649     /* Perform Cx final initialization. */
  650     for (i = 0; i < cpu_ndevices; i++) {
  651         sc = device_get_softc(cpu_devices[i]);
  652         acpi_cpu_startup_cx(sc);
  653 
  654         if (sc->cpu_parent->glob_sysctl_tree != NULL) {
  655             struct acpi_cpux_softc *cpux = sc->cpu_parent;
  656 
  657             /* Add a sysctl handler to handle global Cx lowest setting */
  658             SYSCTL_ADD_PROC(&cpux->glob_sysctl_ctx,
  659                             SYSCTL_CHILDREN(cpux->glob_sysctl_tree),
  660                             OID_AUTO, "cx_lowest",
  661                             CTLTYPE_STRING | CTLFLAG_RW, NULL, 0,
  662                             acpi_cpu_global_cx_lowest_sysctl, "A",
  663                             "Global lowest Cx sleep state to use");
  664         }
  665     }
  666 
  667     /* Take over idling from cpu_idle_default(). */
  668     cpu_cx_lowest = 0;
  669     cpu_disable_idle = FALSE;
  670     cpu_idle_hook = acpi_cpu_idle;
  671 }
  672 
  673 static void
  674 acpi_cpu_cx_list(struct acpi_cpu_softc *sc)
  675 {
  676     struct sbuf sb;
  677     int i;
  678 
  679     /*
  680      * Set up the list of Cx states
  681      */
  682     sc->cpu_non_c3 = 0;
  683     sbuf_new(&sb, sc->cpu_cx_supported, sizeof(sc->cpu_cx_supported),
  684         SBUF_FIXEDLEN);
  685     for (i = 0; i < sc->cpu_cx_count; i++) {
  686         sbuf_printf(&sb, "C%d/%d ", i + 1, sc->cpu_cx_states[i].trans_lat);
  687         if (sc->cpu_cx_states[i].type < ACPI_STATE_C3)
  688             sc->cpu_non_c3 = i;
  689     }
  690     sbuf_trim(&sb);
  691     sbuf_finish(&sb);
  692 }       
  693 
  694 static void
  695 acpi_cpu_startup_cx(struct acpi_cpu_softc *sc)
  696 {
  697     struct acpi_cpux_softc *cpux = sc->cpu_parent;
  698 
  699     acpi_cpu_cx_list(sc);
  700     
  701     SYSCTL_ADD_STRING(&cpux->pcpu_sysctl_ctx,
  702                       SYSCTL_CHILDREN(cpux->pcpu_sysctl_tree),
  703                       OID_AUTO, "cx_supported", CTLFLAG_RD,
  704                       sc->cpu_cx_supported, 0,
  705                       "Cx/microsecond values for supported Cx states");
  706     SYSCTL_ADD_PROC(&cpux->pcpu_sysctl_ctx,
  707                     SYSCTL_CHILDREN(cpux->pcpu_sysctl_tree),
  708                     OID_AUTO, "cx_lowest", CTLTYPE_STRING | CTLFLAG_RW,
  709                     (void *)sc, 0, acpi_cpu_cx_lowest_sysctl, "A",
  710                     "lowest Cx sleep state to use");
  711     SYSCTL_ADD_PROC(&cpux->pcpu_sysctl_ctx,
  712                     SYSCTL_CHILDREN(cpux->pcpu_sysctl_tree),
  713                     OID_AUTO, "cx_usage", CTLTYPE_STRING | CTLFLAG_RD,
  714                     (void *)sc, 0, acpi_cpu_usage_sysctl, "A",
  715                     "percent usage for each Cx state");
  716 
  717 #ifdef notyet
  718     /* Signal platform that we can handle _CST notification. */
  719     if (!cpu_cx_generic && cpu_cst_cnt != 0) {
  720         ACPI_LOCK(acpi);
  721         AcpiOsWritePort(cpu_smi_cmd, cpu_cst_cnt, 8);
  722         ACPI_UNLOCK(acpi);
  723     }
  724 #endif
  725 }
  726 
  727 /*
  728  * Idle the CPU in the lowest state possible.  This function is called with
  729  * interrupts disabled.  Note that once it re-enables interrupts, a task
  730  * switch can occur so do not access shared data (i.e. the softc) after
  731  * interrupts are re-enabled.
  732  */
  733 static void
  734 acpi_cpu_idle(void)
  735 {
  736     struct      acpi_cpu_softc *sc;
  737     struct      acpi_cx *cx_next;
  738     uint64_t    start_time, end_time;
  739     int         bm_active, cx_next_idx, i;
  740 
  741     /* If disabled, return immediately. */
  742     if (cpu_disable_idle) {
  743         ACPI_ENABLE_IRQS();
  744         return;
  745     }
  746 
  747     /*
  748      * Look up our CPU id to get our softc.  If it's NULL, we'll use C1
  749      * since there is no ACPI processor object for this CPU.  This occurs
  750      * for logical CPUs in the HTT case.
  751      */
  752     sc = cpu_softc[mdcpu->mi.gd_cpuid];
  753     if (sc == NULL) {
  754         acpi_cpu_c1();
  755         return;
  756     }
  757 
  758     /* Find the lowest state that has small enough latency. */
  759     cx_next_idx = 0;
  760     for (i = sc->cpu_cx_lowest; i >= 0; i--) {
  761         if (sc->cpu_cx_states[i].trans_lat * 3 <= sc->cpu_prev_sleep) {
  762             cx_next_idx = i;
  763             break;
  764         }
  765     }
  766 
  767     /*
  768      * Check for bus master activity.  If there was activity, clear
  769      * the bit and use the lowest non-C3 state.  Note that the USB
  770      * driver polling for new devices keeps this bit set all the
  771      * time if USB is loaded.
  772      */
  773     if ((cpu_quirks & CPU_QUIRK_NO_BM_CTRL) == 0) {
  774         AcpiReadBitRegister(ACPI_BITREG_BUS_MASTER_STATUS, &bm_active);
  775         if (bm_active != 0) {
  776             AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_STATUS, 1);
  777             cx_next_idx = min(cx_next_idx, sc->cpu_non_c3);
  778         }
  779     }
  780 
  781     /* Select the next state and update statistics. */
  782     cx_next = &sc->cpu_cx_states[cx_next_idx];
  783     sc->cpu_cx_stats[cx_next_idx]++;
  784     KASSERT(cx_next->type != ACPI_STATE_C0, ("acpi_cpu_idle: C0 sleep"));
  785 
  786     /*
  787      * Execute HLT (or equivalent) and wait for an interrupt.  We can't
  788      * calculate the time spent in C1 since the place we wake up is an
  789      * ISR.  Assume we slept half of quantum and return.
  790      */
  791     if (cx_next->type == ACPI_STATE_C1) {
  792         sc->cpu_prev_sleep = (sc->cpu_prev_sleep * 3 + 500000 / hz) / 4;
  793         acpi_cpu_c1();
  794         return;
  795     }
  796 
  797     /*
  798      * For C3, disable bus master arbitration and enable bus master wake
  799      * if BM control is available, otherwise flush the CPU cache.
  800      */
  801     if (cx_next->type == ACPI_STATE_C3) {
  802         if ((cpu_quirks & CPU_QUIRK_NO_BM_CTRL) == 0) {
  803             AcpiWriteBitRegister(ACPI_BITREG_ARB_DISABLE, 1);
  804             AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_RLD, 1);
  805         } else
  806             ACPI_FLUSH_CPU_CACHE();
  807     }
  808 
  809     /*
  810      * Read from P_LVLx to enter C2(+), checking time spent asleep.
  811      * Use the ACPI timer for measuring sleep time.  Since we need to
  812      * get the time very close to the CPU start/stop clock logic, this
  813      * is the only reliable time source.
  814      */
  815     AcpiRead(&start_time, &AcpiGbl_FADT.XPmTimerBlock);
  816     CPU_GET_REG(cx_next->p_lvlx, 1);
  817 
  818     /*
  819      * Read the end time twice.  Since it may take an arbitrary time
  820      * to enter the idle state, the first read may be executed before
  821      * the processor has stopped.  Doing it again provides enough
  822      * margin that we are certain to have a correct value.
  823      */
  824     AcpiRead(&end_time, &AcpiGbl_FADT.XPmTimerBlock);
  825     AcpiRead(&end_time, &AcpiGbl_FADT.XPmTimerBlock);
  826 
  827     /* Enable bus master arbitration and disable bus master wakeup. */
  828     if (cx_next->type == ACPI_STATE_C3) {
  829         if ((cpu_quirks & CPU_QUIRK_NO_BM_CTRL) == 0) {
  830             AcpiWriteBitRegister(ACPI_BITREG_ARB_DISABLE, 0);
  831             AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_RLD, 0);
  832         }
  833     }
  834     ACPI_ENABLE_IRQS();
  835 
  836     /* Find the actual time asleep in microseconds. */
  837     end_time = acpi_TimerDelta(end_time, start_time);
  838     sc->cpu_prev_sleep = (sc->cpu_prev_sleep * 3 + PM_USEC(end_time)) / 4;
  839 }
  840 
  841 /*
  842  * Re-evaluate the _CST object when we are notified that it changed.
  843  *
  844  * XXX Re-evaluation disabled until locking is done.
  845  */
  846 static void
  847 acpi_cpu_cst_notify(device_t dev)
  848 {
  849     struct acpi_cpu_softc *sc = device_get_softc(dev);
  850     struct acpi_cpu_softc *isc;
  851     int i;
  852     
  853     /* Update the list of Cx states. */
  854     acpi_cpu_cx_cst(sc);
  855     acpi_cpu_cx_list(sc);
  856 
  857     /* Update the new lowest useable Cx state for all CPUs. */
  858     crit_enter();
  859     cpu_cx_count = 0;
  860     for (i = 0; i < cpu_ndevices; i++) {
  861         isc = device_get_softc(cpu_devices[i]);
  862         if (isc->cpu_cx_count > cpu_cx_count)
  863             cpu_cx_count = isc->cpu_cx_count;
  864     }
  865     crit_exit();
  866 }
  867 
  868 static int
  869 acpi_cpu_quirks(void)
  870 {
  871     device_t acpi_dev;
  872     uint32_t val;
  873 
  874     ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
  875 
  876     /*
  877      * Bus mastering arbitration control is needed to keep caches coherent
  878      * while sleeping in C3.  If it's not present but a working flush cache
  879      * instruction is present, flush the caches before entering C3 instead.
  880      * Otherwise, just disable C3 completely.
  881      */
  882     if (AcpiGbl_FADT.Pm2ControlBlock == 0 ||
  883         AcpiGbl_FADT.Pm2ControlLength == 0) {
  884         if ((AcpiGbl_FADT.Flags & ACPI_FADT_WBINVD) &&
  885             (AcpiGbl_FADT.Flags & ACPI_FADT_WBINVD_FLUSH) == 0) {
  886             cpu_quirks |= CPU_QUIRK_NO_BM_CTRL;
  887             ACPI_DEBUG_PRINT((ACPI_DB_INFO,
  888                 "acpi_cpu: no BM control, using flush cache method\n"));
  889         } else {
  890             cpu_quirks |= CPU_QUIRK_NO_C3;
  891             ACPI_DEBUG_PRINT((ACPI_DB_INFO,
  892                 "acpi_cpu: no BM control, C3 not available\n"));
  893         }
  894     }
  895 
  896     /*
  897      * If we are using generic Cx mode, C3 on multiple CPUs requires using
  898      * the expensive flush cache instruction.
  899      */
  900     if (cpu_cx_generic && ncpus > 1) {
  901         cpu_quirks |= CPU_QUIRK_NO_BM_CTRL;
  902         ACPI_DEBUG_PRINT((ACPI_DB_INFO,
  903             "acpi_cpu: SMP, using flush cache mode for C3\n"));
  904     }
  905 
  906     /* Look for various quirks of the PIIX4 part. */
  907     acpi_dev = pci_find_device(PCI_VENDOR_INTEL, PCI_DEVICE_82371AB_3);
  908     if (acpi_dev != NULL) {
  909         switch (pci_get_revid(acpi_dev)) {
  910         /*
  911          * Disable C3 support for all PIIX4 chipsets.  Some of these parts
  912          * do not report the BMIDE status to the BM status register and
  913          * others have a livelock bug if Type-F DMA is enabled.  Linux
  914          * works around the BMIDE bug by reading the BM status directly
  915          * but we take the simpler approach of disabling C3 for these
  916          * parts.
  917          *
  918          * See erratum #18 ("C3 Power State/BMIDE and Type-F DMA
  919          * Livelock") from the January 2002 PIIX4 specification update.
  920          * Applies to all PIIX4 models.
  921          *
  922          * Also, make sure that all interrupts cause a "Stop Break"
  923          * event to exit from C2 state.
  924          * Also, BRLD_EN_BM (ACPI_BITREG_BUS_MASTER_RLD in ACPI-speak)
  925          * should be set to zero, otherwise it causes C2 to short-sleep.
  926          * PIIX4 doesn't properly support C3 and bus master activity
  927          * need not break out of C2.
  928          */
  929         case PCI_REVISION_A_STEP:
  930         case PCI_REVISION_B_STEP:
  931         case PCI_REVISION_4E:
  932         case PCI_REVISION_4M:
  933             cpu_quirks |= CPU_QUIRK_NO_C3;
  934             ACPI_DEBUG_PRINT((ACPI_DB_INFO,
  935                 "acpi_cpu: working around PIIX4 bug, disabling C3\n"));
  936 
  937             val = pci_read_config(acpi_dev, PIIX4_DEVACTB_REG, 4);
  938             if ((val & PIIX4_STOP_BREAK_MASK) != PIIX4_STOP_BREAK_MASK) {
  939                 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
  940                     "acpi_cpu: PIIX4: enabling IRQs to generate Stop Break\n"));
  941                 val |= PIIX4_STOP_BREAK_MASK;
  942                 pci_write_config(acpi_dev, PIIX4_DEVACTB_REG, val, 4);
  943             }
  944             AcpiReadBitRegister(ACPI_BITREG_BUS_MASTER_RLD, &val);
  945             if (val) {
  946                 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
  947                     "acpi_cpu: PIIX4: reset BRLD_EN_BM\n"));
  948                 AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_RLD, 0);
  949             }
  950             break;
  951         default:
  952             break;
  953         }
  954     }
  955 
  956     return (0);
  957 }
  958 
  959 static int
  960 acpi_cpu_usage_sysctl(SYSCTL_HANDLER_ARGS)
  961 {
  962     struct acpi_cpu_softc *sc;
  963     struct sbuf  sb;
  964     char         buf[128];
  965     int          i;
  966     uintmax_t    fract, sum, whole;
  967 
  968     sc = (struct acpi_cpu_softc *) arg1;
  969     sum = 0;
  970     for (i = 0; i < sc->cpu_cx_count; i++)
  971         sum += sc->cpu_cx_stats[i];
  972     sbuf_new(&sb, buf, sizeof(buf), SBUF_FIXEDLEN);
  973     for (i = 0; i < sc->cpu_cx_count; i++) {
  974         if (sum > 0) {
  975             whole = (uintmax_t)sc->cpu_cx_stats[i] * 100;
  976             fract = (whole % sum) * 100;
  977             sbuf_printf(&sb, "%u.%02u%% ", (u_int)(whole / sum),
  978                 (u_int)(fract / sum));
  979         } else
  980             sbuf_printf(&sb, "0.00%% ");
  981     }
  982     sbuf_printf(&sb, "last %dus", sc->cpu_prev_sleep);
  983     sbuf_trim(&sb);
  984     sbuf_finish(&sb);
  985     sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req);
  986     sbuf_delete(&sb);
  987 
  988     return (0);
  989 }
  990 
  991 static int
  992 acpi_cpu_set_cx_lowest(struct acpi_cpu_softc *sc, int val)
  993 {
  994     int i, old_lowest, error = 0;
  995     uint32_t old_type, type;
  996 
  997     get_mplock();
  998 
  999     old_lowest = atomic_swap_int(&sc->cpu_cx_lowest, val);
 1000 
 1001     old_type = sc->cpu_cx_states[old_lowest].type;
 1002     type = sc->cpu_cx_states[val].type;
 1003     if (old_type == ACPI_STATE_C3 && type != ACPI_STATE_C3) {
 1004         KKASSERT(cpu_c3_ncpus > 0);
 1005         if (atomic_fetchadd_int(&cpu_c3_ncpus, -1) == 1) {
 1006             /*
 1007              * All of the CPUs exit C3 state, use a better
 1008              * one shot timer.
 1009              */
 1010             error = cputimer_intr_select_caps(CPUTIMER_INTR_CAP_NONE);
 1011             KKASSERT(!error);
 1012             cputimer_intr_restart();
 1013         }
 1014     } else if (type == ACPI_STATE_C3 && old_type != ACPI_STATE_C3) {
 1015         if (atomic_fetchadd_int(&cpu_c3_ncpus, 1) == 0) {
 1016             /*
 1017              * When the first CPU enters C3 state, switch
 1018              * to an one shot timer, which could handle
 1019              * C3 state, i.e. the timer will not hang.
 1020              */
 1021             error = cputimer_intr_select_caps(CPUTIMER_INTR_CAP_PS);
 1022             if (!error) {
 1023                 cputimer_intr_restart();
 1024             } else {
 1025                 kprintf("no suitable intr cputimer found\n");
 1026 
 1027                 /* Restore */
 1028                 sc->cpu_cx_lowest = old_lowest;
 1029                 atomic_fetchadd_int(&cpu_c3_ncpus, -1);
 1030             }
 1031         }
 1032     }
 1033 
 1034     rel_mplock();
 1035 
 1036     if (error)
 1037         return error;
 1038 
 1039     /* If not disabling, cache the new lowest non-C3 state. */
 1040     sc->cpu_non_c3 = 0;
 1041     for (i = sc->cpu_cx_lowest; i >= 0; i--) {
 1042         if (sc->cpu_cx_states[i].type < ACPI_STATE_C3) {
 1043             sc->cpu_non_c3 = i;
 1044             break;
 1045         }
 1046     }
 1047 
 1048     /* Reset the statistics counters. */
 1049     bzero(sc->cpu_cx_stats, sizeof(sc->cpu_cx_stats));
 1050     return (0);
 1051 }
 1052 
 1053 static int
 1054 acpi_cpu_cx_lowest_sysctl(SYSCTL_HANDLER_ARGS)
 1055 {
 1056     struct       acpi_cpu_softc *sc;
 1057     char         state[8];
 1058     int          val, error;
 1059 
 1060     sc = (struct acpi_cpu_softc *) arg1;
 1061     ksnprintf(state, sizeof(state), "C%d", sc->cpu_cx_lowest + 1);
 1062     error = sysctl_handle_string(oidp, state, sizeof(state), req);
 1063     if (error != 0 || req->newptr == NULL)
 1064         return (error);
 1065     if (strlen(state) < 2 || toupper(state[0]) != 'C')
 1066         return (EINVAL);
 1067     val = (int) strtol(state + 1, NULL, 10) - 1;
 1068     if (val < 0 || val > sc->cpu_cx_count - 1)
 1069         return (EINVAL);
 1070 
 1071     crit_enter();
 1072     error = acpi_cpu_set_cx_lowest(sc, val);
 1073     crit_exit();
 1074 
 1075     return error;
 1076 }
 1077 
 1078 static int
 1079 acpi_cpu_global_cx_lowest_sysctl(SYSCTL_HANDLER_ARGS)
 1080 {
 1081     struct      acpi_cpu_softc *sc;
 1082     char        state[8];
 1083     int         val, error, i;
 1084 
 1085     ksnprintf(state, sizeof(state), "C%d", cpu_cx_lowest + 1);
 1086     error = sysctl_handle_string(oidp, state, sizeof(state), req);
 1087     if (error != 0 || req->newptr == NULL)
 1088         return (error);
 1089     if (strlen(state) < 2 || toupper(state[0]) != 'C')
 1090         return (EINVAL);
 1091     val = (int) strtol(state + 1, NULL, 10) - 1;
 1092     if (val < 0 || val > cpu_cx_count - 1)
 1093         return (EINVAL);
 1094     cpu_cx_lowest = val;
 1095 
 1096     /* Update the new lowest useable Cx state for all CPUs. */
 1097     crit_enter();
 1098     for (i = 0; i < cpu_ndevices; i++) {
 1099         sc = device_get_softc(cpu_devices[i]);
 1100         error = acpi_cpu_set_cx_lowest(sc, val);
 1101         if (error) {
 1102             KKASSERT(i == 0);
 1103             break;
 1104         }
 1105     }
 1106     crit_exit();
 1107 
 1108     return error;
 1109 }
 1110 
 1111 /*
 1112  * Put the CPU in C1 in a machine-dependant way.
 1113  * XXX: shouldn't be here!
 1114  */
 1115 static void
 1116 acpi_cpu_c1(void)
 1117 {
 1118 #ifdef __ia64__
 1119     ia64_call_pal_static(PAL_HALT_LIGHT, 0, 0, 0);
 1120 #else
 1121     splz();
 1122     if ((mycpu->gd_reqflags & RQF_IDLECHECK_WK_MASK) == 0)
 1123         __asm __volatile("sti; hlt");
 1124     else
 1125         __asm __volatile("sti; pause");
 1126 #endif /* !__ia64__ */
 1127 }

Cache object: 65bc7a78d714ed9f2dbb4c5a34774f84


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.