The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/acpica/acpi_cpu.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 2003 Nate Lawson (SDG)
    3  * Copyright (c) 2001 Michael Smith
    4  * All rights reserved.
    5  *
    6  * Redistribution and use in source and binary forms, with or without
    7  * modification, are permitted provided that the following conditions
    8  * are met:
    9  * 1. Redistributions of source code must retain the above copyright
   10  *    notice, this list of conditions and the following disclaimer.
   11  * 2. Redistributions in binary form must reproduce the above copyright
   12  *    notice, this list of conditions and the following disclaimer in the
   13  *    documentation and/or other materials provided with the distribution.
   14  *
   15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   25  * SUCH DAMAGE.
   26  */
   27 
   28 #include <sys/cdefs.h>
   29 __FBSDID("$FreeBSD: releng/5.2/sys/dev/acpica/acpi_cpu.c 123132 2003-12-03 20:27:48Z jhb $");
   30 
   31 #include "opt_acpi.h"
   32 #include <sys/param.h>
   33 #include <sys/bus.h>
   34 #include <sys/kernel.h>
   35 #include <sys/malloc.h>
   36 #include <sys/pcpu.h>
   37 #include <sys/power.h>
   38 #include <sys/proc.h>
   39 #include <sys/sbuf.h>
   40 #include <sys/smp.h>
   41 
   42 #include <dev/pci/pcivar.h>
   43 #include <machine/atomic.h>
   44 #include <machine/bus.h>
   45 #ifdef __ia64__
   46 #include <machine/pal.h>
   47 #endif
   48 #include <sys/rman.h>
   49 
   50 #include "acpi.h"
   51 #include <dev/acpica/acpivar.h>
   52 
   53 /*
   54  * Support for ACPI Processor devices, including ACPI 2.0 throttling
   55  * and C[1-3] sleep states.
   56  *
   57  * TODO: implement scans of all CPUs to be sure all Cx states are
   58  * equivalent.
   59  */
   60 
   61 /* Hooks for the ACPI CA debugging infrastructure */
   62 #define _COMPONENT      ACPI_PROCESSOR
   63 ACPI_MODULE_NAME("PROCESSOR")
   64 
   65 struct acpi_cx {
   66     struct resource     *p_lvlx;        /* Register to read to enter state. */
   67     uint32_t             type;          /* C1-3 (C4 and up treated as C3). */
   68     uint32_t             trans_lat;     /* Transition latency (usec). */
   69     uint32_t             power;         /* Power consumed (mW). */
   70 };
   71 #define MAX_CX_STATES    8
   72 
   73 struct acpi_cx_stats {
   74     int                  long_slp;      /* Count of sleeps >= trans_lat. */
   75     int                  short_slp;     /* Count of sleeps < trans_lat. */
   76 };
   77 
   78 struct acpi_cpu_softc {
   79     device_t             cpu_dev;
   80     ACPI_HANDLE          cpu_handle;
   81     uint32_t             acpi_id;       /* ACPI processor id */
   82     uint32_t             cpu_p_blk;     /* ACPI P_BLK location */
   83     uint32_t             cpu_p_blk_len; /* P_BLK length (must be 6). */
   84     struct resource     *cpu_p_cnt;     /* Throttling control register */
   85     struct acpi_cx       cpu_cx_states[MAX_CX_STATES];
   86     int                  cpu_cx_count;  /* Number of valid Cx states. */
   87 };
   88 
   89 #define CPU_GET_REG(reg, width)                                         \
   90     (bus_space_read_ ## width(rman_get_bustag((reg)),                   \
   91                       rman_get_bushandle((reg)), 0))
   92 #define CPU_SET_REG(reg, width, val)                                    \
   93     (bus_space_write_ ## width(rman_get_bustag((reg)),                  \
   94                        rman_get_bushandle((reg)), 0, (val)))
   95 
   96 /* 
   97  * Speeds are stored in counts, from 1 - CPU_MAX_SPEED, and
   98  * reported to the user in tenths of a percent.
   99  */
  100 static uint32_t          cpu_duty_offset;
  101 static uint32_t          cpu_duty_width;
  102 #define CPU_MAX_SPEED           (1 << cpu_duty_width)
  103 #define CPU_SPEED_PERCENT(x)    ((1000 * (x)) / CPU_MAX_SPEED)
  104 #define CPU_SPEED_PRINTABLE(x)  (CPU_SPEED_PERCENT(x) / 10),    \
  105                                 (CPU_SPEED_PERCENT(x) % 10)
  106 #define CPU_P_CNT_THT_EN (1<<4)
  107 #define PM_USEC(x)       ((x) >> 2)     /* ~4 clocks per usec (3.57955 Mhz) */
  108 
  109 #define ACPI_CPU_NOTIFY_PERF_STATES     0x80    /* _PSS changed. */
  110 #define ACPI_CPU_NOTIFY_CX_STATES       0x81    /* _CST changed. */
  111 
  112 #define CPU_QUIRK_NO_C3         0x0001  /* C3-type states are not usable. */
  113 #define CPU_QUIRK_NO_THROTTLE   0x0002  /* Throttling is not usable. */
  114 
  115 #define PCI_VENDOR_INTEL        0x8086
  116 #define PCI_DEVICE_82371AB_3    0x7113  /* PIIX4 chipset for quirks. */
  117 #define PCI_REVISION_A_STEP     0
  118 #define PCI_REVISION_B_STEP     1
  119 #define PCI_REVISION_4E         2
  120 #define PCI_REVISION_4M         3
  121 
  122 /* Platform hardware resource information. */
  123 static uint32_t          cpu_smi_cmd;   /* Value to write to SMI_CMD. */
  124 static uint8_t           cpu_pstate_cnt;/* Register to take over throttling. */
  125 static uint8_t           cpu_cst_cnt;   /* Indicate we are _CST aware. */
  126 static uint32_t          cpu_rid;       /* Driver-wide resource id. */
  127 static uint32_t          cpu_quirks;    /* Indicate any hardware bugs. */
  128 
  129 /* Runtime state. */
  130 static int               cpu_cx_count;  /* Number of valid states */
  131 static uint32_t          cpu_cx_next;   /* State to use for next sleep. */
  132 static uint32_t          cpu_non_c3;    /* Index of lowest non-C3 state. */
  133 static struct acpi_cx_stats cpu_cx_stats[MAX_CX_STATES];
  134 static int               cpu_idle_busy; /* Count of CPUs in acpi_cpu_idle. */
  135 
  136 /* Values for sysctl. */
  137 static uint32_t          cpu_current_state;
  138 static uint32_t          cpu_performance_state;
  139 static uint32_t          cpu_economy_state;
  140 static uint32_t          cpu_max_state;
  141 static int               cpu_cx_lowest;
  142 static char              cpu_cx_supported[64];
  143 
  144 static device_t         *cpu_devices;
  145 static int               cpu_ndevices;
  146 static struct acpi_cpu_softc **cpu_softc;
  147 
  148 static struct sysctl_ctx_list   acpi_cpu_sysctl_ctx;
  149 static struct sysctl_oid        *acpi_cpu_sysctl_tree;
  150 
  151 static int      acpi_cpu_probe(device_t dev);
  152 static int      acpi_cpu_attach(device_t dev);
  153 static int      acpi_pcpu_get_id(uint32_t idx, uint32_t *acpi_id,
  154                                  uint32_t *cpu_id);
  155 static int      acpi_cpu_shutdown(device_t dev);
  156 static int      acpi_cpu_throttle_probe(struct acpi_cpu_softc *sc);
  157 static int      acpi_cpu_cx_probe(struct acpi_cpu_softc *sc);
  158 static int      acpi_cpu_cx_cst(struct acpi_cpu_softc *sc);
  159 static void     acpi_cpu_startup(void *arg);
  160 static void     acpi_cpu_startup_throttling(void);
  161 static void     acpi_cpu_startup_cx(void);
  162 static void     acpi_cpu_throttle_set(uint32_t speed);
  163 static void     acpi_cpu_idle(void);
  164 static void     acpi_cpu_c1(void);
  165 static void     acpi_pm_ticksub(uint32_t *end, const uint32_t *start);
  166 static void     acpi_cpu_notify(ACPI_HANDLE h, UINT32 notify, void *context);
  167 static int      acpi_cpu_quirks(struct acpi_cpu_softc *sc);
  168 static void     acpi_cpu_power_profile(void *arg);
  169 static int      acpi_cpu_throttle_sysctl(SYSCTL_HANDLER_ARGS);
  170 static int      acpi_cpu_history_sysctl(SYSCTL_HANDLER_ARGS);
  171 static int      acpi_cpu_cx_lowest_sysctl(SYSCTL_HANDLER_ARGS);
  172 
  173 static device_method_t acpi_cpu_methods[] = {
  174     /* Device interface */
  175     DEVMETHOD(device_probe,     acpi_cpu_probe),
  176     DEVMETHOD(device_attach,    acpi_cpu_attach),
  177     DEVMETHOD(device_shutdown,  acpi_cpu_shutdown),
  178 
  179     {0, 0}
  180 };
  181 
  182 static driver_t acpi_cpu_driver = {
  183     "acpi_cpu",
  184     acpi_cpu_methods,
  185     sizeof(struct acpi_cpu_softc),
  186 };
  187 
  188 static devclass_t acpi_cpu_devclass;
  189 DRIVER_MODULE(acpi_cpu, acpi, acpi_cpu_driver, acpi_cpu_devclass, 0, 0);
  190 
  191 static int
  192 acpi_cpu_probe(device_t dev)
  193 {
  194     if (!acpi_disabled("cpu") && acpi_get_type(dev) == ACPI_TYPE_PROCESSOR) {
  195         device_set_desc(dev, "CPU");
  196         if (cpu_softc == NULL)
  197                 cpu_softc = malloc(sizeof(struct acpi_cpu_softc *) *
  198                     (mp_maxid + 1), M_TEMP /* XXX */, M_WAITOK | M_ZERO);
  199         return (0);
  200     }
  201 
  202     return (ENXIO);
  203 }
  204 
  205 static int
  206 acpi_cpu_attach(device_t dev)
  207 {
  208     struct acpi_cpu_softc *sc;
  209     struct acpi_softc     *acpi_sc;
  210     ACPI_OBJECT            pobj;
  211     ACPI_BUFFER            buf;
  212     ACPI_STATUS            status;
  213     int                    thr_ret, cx_ret, cpu_id;
  214 
  215     ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
  216 
  217     ACPI_ASSERTLOCK;
  218 
  219     sc = device_get_softc(dev);
  220     sc->cpu_dev = dev;
  221     sc->cpu_handle = acpi_get_handle(dev);
  222 
  223     /* Get our Processor object. */
  224     buf.Pointer = &pobj;
  225     buf.Length = sizeof(pobj);
  226     status = AcpiEvaluateObject(sc->cpu_handle, NULL, NULL, &buf);
  227     if (ACPI_FAILURE(status)) {
  228         device_printf(dev, "Couldn't get Processor object - %s\n",
  229                       AcpiFormatException(status));
  230         return_VALUE (ENXIO);
  231     }
  232     if (pobj.Type != ACPI_TYPE_PROCESSOR) {
  233         device_printf(dev, "Processor object has bad type %d\n", pobj.Type);
  234         return_VALUE (ENXIO);
  235     }
  236 
  237     /*
  238      * Find the processor associated with our unit.  We could use the
  239      * ProcId as a key, however, some boxes do not have the same values
  240      * in their Processor object as the ProcId values in the MADT.
  241      */
  242     sc->acpi_id = pobj.Processor.ProcId;
  243     if (acpi_pcpu_get_id(device_get_unit(dev), &sc->acpi_id, &cpu_id) != 0)
  244         return_VALUE (ENXIO);
  245 
  246     /*
  247      * Check if we already probed this processor.  We scan the bus twice
  248      * so it's possible we've already seen this one.
  249      */
  250     if (cpu_softc[cpu_id] != NULL)
  251         return (ENXIO);
  252     cpu_softc[cpu_id] = sc;
  253 
  254     /*
  255      * XXX Temporarily call any _INI function under the processor.
  256      * ACPI-CA will do this after Nov. 2003.  The spec doesn't
  257      * suggest processors have _INI methods but my Thinkpad T23 does.
  258      */
  259     AcpiEvaluateObject(sc->cpu_handle, "_INI", NULL, NULL);
  260 
  261     /* Get various global values from the Processor object. */
  262     sc->cpu_p_blk = pobj.Processor.PblkAddress;
  263     sc->cpu_p_blk_len = pobj.Processor.PblkLength;
  264     ACPI_DEBUG_PRINT((ACPI_DB_INFO, "acpi_cpu%d: P_BLK at %#x/%d\n",
  265                      device_get_unit(dev), sc->cpu_p_blk, sc->cpu_p_blk_len));
  266 
  267     acpi_sc = acpi_device_get_parent_softc(dev);
  268     sysctl_ctx_init(&acpi_cpu_sysctl_ctx);
  269     acpi_cpu_sysctl_tree = SYSCTL_ADD_NODE(&acpi_cpu_sysctl_ctx,
  270                                 SYSCTL_CHILDREN(acpi_sc->acpi_sysctl_tree),
  271                                 OID_AUTO, "cpu", CTLFLAG_RD, 0, "");
  272 
  273     /* If this is the first device probed, check for quirks. */
  274     if (device_get_unit(dev) == 0)
  275         acpi_cpu_quirks(sc);
  276 
  277     /*
  278      * Probe for throttling and Cx state support.
  279      * If none of these is present, free up unused resources.
  280      */
  281     thr_ret = acpi_cpu_throttle_probe(sc);
  282     cx_ret = acpi_cpu_cx_probe(sc);
  283     if (thr_ret == 0 || cx_ret == 0) {
  284         status = AcpiInstallNotifyHandler(sc->cpu_handle, ACPI_DEVICE_NOTIFY,
  285                                           acpi_cpu_notify, sc);
  286         if (device_get_unit(dev) == 0)
  287             AcpiOsQueueForExecution(OSD_PRIORITY_LO, acpi_cpu_startup, NULL);
  288     } else {
  289         sysctl_ctx_free(&acpi_cpu_sysctl_ctx);
  290     }
  291 
  292     return_VALUE (0);
  293 }
  294 
  295 /*
  296  * Find the nth present CPU and return its pc_cpuid as well as set the
  297  * pc_acpi_id from the most reliable source.
  298  */
  299 static int
  300 acpi_pcpu_get_id(uint32_t idx, uint32_t *acpi_id, uint32_t *cpu_id)
  301 {
  302     struct pcpu *pcpu_data;
  303     uint32_t     i;
  304   
  305     KASSERT(acpi_id != NULL, ("Null acpi_id"));
  306     KASSERT(cpu_id != NULL, ("Null cpu_id"));
  307     for (i = 0; i <= mp_maxid; i++) {
  308         if (CPU_ABSENT(i))
  309             continue;
  310         pcpu_data = pcpu_find(i);
  311         KASSERT(pcpu_data != NULL, ("no pcpu data for %d", i));
  312         if (idx-- == 0) {
  313             /*
  314              * If pc_acpi_id was not initialized (e.g., a non-APIC UP box)
  315              * override it with the value from the ASL.  Otherwise, if the
  316              * two don't match, prefer the MADT-derived value.  Finally,
  317              * return the pc_cpuid to reference this processor.
  318              */
  319             if (pcpu_data->pc_acpi_id == 0xffffffff)
  320                  pcpu_data->pc_acpi_id = *acpi_id;
  321             else if (pcpu_data->pc_acpi_id != *acpi_id)
  322                 *acpi_id = pcpu_data->pc_acpi_id;
  323             *cpu_id = pcpu_data->pc_cpuid;
  324             return (0);
  325         }
  326     }
  327 
  328     return (ESRCH);
  329 }
  330           
  331 static int
  332 acpi_cpu_shutdown(device_t dev)
  333 {
  334     ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
  335 
  336     /* Disable any entry to the idle function. */
  337     cpu_cx_count = 0;
  338 
  339     /* Wait for all processors to exit acpi_cpu_idle(). */
  340     smp_rendezvous(NULL, NULL, NULL, NULL);
  341     while (cpu_idle_busy > 0)
  342         DELAY(1);
  343 
  344     return_VALUE (0);
  345 }
  346 
  347 static int
  348 acpi_cpu_throttle_probe(struct acpi_cpu_softc *sc)
  349 {
  350     uint32_t             duty_end;
  351     ACPI_BUFFER          buf;
  352     ACPI_OBJECT          obj;
  353     ACPI_GENERIC_ADDRESS gas;
  354     ACPI_STATUS          status;
  355 
  356     ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
  357 
  358     ACPI_ASSERTLOCK;
  359 
  360     /* Get throttling parameters from the FADT.  0 means not supported. */
  361     if (device_get_unit(sc->cpu_dev) == 0) {
  362         cpu_smi_cmd = AcpiGbl_FADT->SmiCmd;
  363         cpu_pstate_cnt = AcpiGbl_FADT->PstateCnt;
  364         cpu_cst_cnt = AcpiGbl_FADT->CstCnt;
  365         cpu_duty_offset = AcpiGbl_FADT->DutyOffset;
  366         cpu_duty_width = AcpiGbl_FADT->DutyWidth;
  367     }
  368     if (cpu_duty_width == 0 || (cpu_quirks & CPU_QUIRK_NO_THROTTLE) != 0)
  369         return (ENXIO);
  370 
  371     /* Validate the duty offset/width. */
  372     duty_end = cpu_duty_offset + cpu_duty_width - 1;
  373     if (duty_end > 31) {
  374         device_printf(sc->cpu_dev, "CLK_VAL field overflows P_CNT register\n");
  375         return (ENXIO);
  376     }
  377     if (cpu_duty_offset <= 4 && duty_end >= 4) {
  378         device_printf(sc->cpu_dev, "CLK_VAL field overlaps THT_EN bit\n");
  379         return (ENXIO);
  380     }
  381 
  382     /*
  383      * If not present, fall back to using the processor's P_BLK to find
  384      * the P_CNT register.
  385      *
  386      * Note that some systems seem to duplicate the P_BLK pointer
  387      * across multiple CPUs, so not getting the resource is not fatal.
  388      */
  389     buf.Pointer = &obj;
  390     buf.Length = sizeof(obj);
  391     status = AcpiEvaluateObject(sc->cpu_handle, "_PTC", NULL, &buf);
  392     if (ACPI_SUCCESS(status)) {
  393         if (obj.Buffer.Length < sizeof(ACPI_GENERIC_ADDRESS) + 3) {
  394             device_printf(sc->cpu_dev, "_PTC buffer too small\n");
  395             return (ENXIO);
  396         }
  397         memcpy(&gas, obj.Buffer.Pointer + 3, sizeof(gas));
  398         sc->cpu_p_cnt = acpi_bus_alloc_gas(sc->cpu_dev, &cpu_rid, &gas);
  399         if (sc->cpu_p_cnt != NULL) {
  400             ACPI_DEBUG_PRINT((ACPI_DB_INFO, "acpi_cpu%d: P_CNT from _PTC\n",
  401                              device_get_unit(sc->cpu_dev)));
  402         }
  403     }
  404 
  405     /* If _PTC not present or other failure, try the P_BLK. */
  406     if (sc->cpu_p_cnt == NULL) {
  407         /* The spec says P_BLK must be at least 6 bytes long. */
  408         if (sc->cpu_p_blk_len != 6)
  409             return (ENXIO);
  410         gas.Address = sc->cpu_p_blk;
  411         gas.AddressSpaceId = ACPI_ADR_SPACE_SYSTEM_IO;
  412         gas.RegisterBitWidth = 32;
  413         sc->cpu_p_cnt = acpi_bus_alloc_gas(sc->cpu_dev, &cpu_rid, &gas);
  414         if (sc->cpu_p_cnt != NULL) {
  415             ACPI_DEBUG_PRINT((ACPI_DB_INFO, "acpi_cpu%d: P_CNT from P_BLK\n",
  416                              device_get_unit(sc->cpu_dev)));
  417         } else {
  418             device_printf(sc->cpu_dev, "Failed to attach throttling P_CNT\n");
  419             return (ENXIO);
  420         }
  421     }
  422     cpu_rid++;
  423 
  424     return (0);
  425 }
  426 
  427 static int
  428 acpi_cpu_cx_probe(struct acpi_cpu_softc *sc)
  429 {
  430     ACPI_GENERIC_ADDRESS gas;
  431     struct acpi_cx      *cx_ptr;
  432     int                  error;
  433 
  434     ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
  435 
  436     /* Bus mastering arbitration control is needed for C3. */
  437     if (AcpiGbl_FADT->V1_Pm2CntBlk == 0 || AcpiGbl_FADT->Pm2CntLen == 0) {
  438         cpu_quirks |= CPU_QUIRK_NO_C3;
  439         ACPI_DEBUG_PRINT((ACPI_DB_INFO,
  440                          "acpi_cpu%d: No BM control, C3 disabled\n",
  441                          device_get_unit(sc->cpu_dev)));
  442     }
  443 
  444     /*
  445      * First, check for the ACPI 2.0 _CST sleep states object.
  446      * If not usable, fall back to the P_BLK's P_LVL2 and P_LVL3.
  447      */
  448     sc->cpu_cx_count = 0;
  449     error = acpi_cpu_cx_cst(sc);
  450     if (error != 0) {
  451         cx_ptr = sc->cpu_cx_states;
  452 
  453         /* C1 has been required since just after ACPI 1.0 */
  454         cx_ptr->type = ACPI_STATE_C1;
  455         cx_ptr->trans_lat = 0;
  456         cpu_non_c3 = 0;
  457         cx_ptr++;
  458         sc->cpu_cx_count++;
  459 
  460         if (sc->cpu_p_blk_len != 6)
  461             goto done;
  462 
  463         /* Validate and allocate resources for C2 (P_LVL2). */
  464         gas.AddressSpaceId = ACPI_ADR_SPACE_SYSTEM_IO;
  465         gas.RegisterBitWidth = 8;
  466         if (AcpiGbl_FADT->Plvl2Lat < 100) {
  467             gas.Address = sc->cpu_p_blk + 4;
  468             cx_ptr->p_lvlx = acpi_bus_alloc_gas(sc->cpu_dev, &cpu_rid, &gas);
  469             if (cx_ptr->p_lvlx != NULL) {
  470                 cpu_rid++;
  471                 cx_ptr->type = ACPI_STATE_C2;
  472                 cx_ptr->trans_lat = AcpiGbl_FADT->Plvl2Lat;
  473                 cpu_non_c3 = 1;
  474                 cx_ptr++;
  475                 sc->cpu_cx_count++;
  476             }
  477         }
  478 
  479         /* Validate and allocate resources for C3 (P_LVL3). */
  480         if (AcpiGbl_FADT->Plvl3Lat < 1000 &&
  481             (cpu_quirks & CPU_QUIRK_NO_C3) == 0) {
  482 
  483             gas.Address = sc->cpu_p_blk + 5;
  484             cx_ptr->p_lvlx = acpi_bus_alloc_gas(sc->cpu_dev, &cpu_rid, &gas);
  485             if (cx_ptr->p_lvlx != NULL) {
  486                 cpu_rid++;
  487                 cx_ptr->type = ACPI_STATE_C3;
  488                 cx_ptr->trans_lat = AcpiGbl_FADT->Plvl3Lat;
  489                 cx_ptr++;
  490                 sc->cpu_cx_count++;
  491             }
  492         }
  493     }
  494 
  495 done:
  496     /* If no valid registers were found, don't attach. */
  497     if (sc->cpu_cx_count == 0)
  498         return (ENXIO);
  499 
  500     return (0);
  501 }
  502 
  503 /*
  504  * Parse a _CST package and set up its Cx states.  Since the _CST object
  505  * can change dynamically, our notify handler may call this function
  506  * to clean up and probe the new _CST package.
  507  */
  508 static int
  509 acpi_cpu_cx_cst(struct acpi_cpu_softc *sc)
  510 {
  511     struct       acpi_cx *cx_ptr;
  512     ACPI_STATUS  status;
  513     ACPI_BUFFER  buf;
  514     ACPI_OBJECT *top;
  515     ACPI_OBJECT *pkg;
  516     uint32_t     count;
  517     int          i;
  518 
  519     ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
  520 
  521     buf.Pointer = NULL;
  522     buf.Length = ACPI_ALLOCATE_BUFFER;
  523     status = AcpiEvaluateObject(sc->cpu_handle, "_CST", NULL, &buf);
  524     if (ACPI_FAILURE(status))
  525         return (ENXIO);
  526 
  527     /* _CST is a package with a count and at least one Cx package. */
  528     top = (ACPI_OBJECT *)buf.Pointer;
  529     if (!ACPI_PKG_VALID(top, 2)) {
  530         device_printf(sc->cpu_dev, "Invalid _CST package\n");
  531         AcpiOsFree(buf.Pointer);
  532         return (ENXIO);
  533     }
  534     acpi_PkgInt32(sc->cpu_dev, top, 0, &count);
  535     if (count != top->Package.Count - 1) {
  536         device_printf(sc->cpu_dev, "Invalid _CST state count (%d != %d)\n",
  537                count, top->Package.Count - 1);
  538         count = top->Package.Count - 1;
  539     }
  540     if (count > MAX_CX_STATES) {
  541         device_printf(sc->cpu_dev, "_CST has too many states (%d)\n", count);
  542         count = MAX_CX_STATES;
  543     }
  544 
  545     /* Set up all valid states. */
  546     sc->cpu_cx_count = 0;
  547     cx_ptr = sc->cpu_cx_states;
  548     for (i = 0; i < count; i++) {
  549         pkg = &top->Package.Elements[i + 1];
  550         if (!ACPI_PKG_VALID(pkg, 4)) {
  551             device_printf(sc->cpu_dev, "Skipping invalid Cx state package\n");
  552             continue;
  553         }
  554 
  555         /* Cx type, transition latency, power consumed. */
  556         acpi_PkgInt32(sc->cpu_dev, pkg, 1, &cx_ptr->type);
  557         acpi_PkgInt32(sc->cpu_dev, pkg, 2, &cx_ptr->trans_lat);
  558         acpi_PkgInt32(sc->cpu_dev, pkg, 3, &cx_ptr->power);
  559 
  560         /* Validate the state to see if we should use it. */
  561         switch (cx_ptr->type) {
  562         case ACPI_STATE_C1:
  563             cpu_non_c3 = i;
  564             cx_ptr++;
  565             sc->cpu_cx_count++;
  566             continue;
  567         case ACPI_STATE_C2:
  568             if (cx_ptr->trans_lat > 100) {
  569                 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
  570                                  "acpi_cpu%d: C2[%d] not available.\n",
  571                                  device_get_unit(sc->cpu_dev), i));
  572                 continue;
  573             }
  574             cpu_non_c3 = i;
  575             break;
  576         case ACPI_STATE_C3:
  577         default:
  578             if (cx_ptr->trans_lat > 1000 ||
  579                 (cpu_quirks & CPU_QUIRK_NO_C3) != 0) {
  580 
  581                 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
  582                                  "acpi_cpu%d: C3[%d] not available.\n",
  583                                  device_get_unit(sc->cpu_dev), i));
  584                 continue;
  585             }
  586             break;
  587         }
  588 
  589 #ifdef notyet
  590         /* Free up any previous register. */
  591         if (cx_ptr->p_lvlx != NULL) {
  592             bus_release_resource(sc->cpu_dev, 0, 0, cx_ptr->p_lvlx);
  593             cx_ptr->p_lvlx = NULL;
  594         }
  595 #endif
  596 
  597         /* Allocate the control register for C2 or C3. */
  598         acpi_PkgGas(sc->cpu_dev, pkg, 0, &cpu_rid, &cx_ptr->p_lvlx);
  599         if (cx_ptr->p_lvlx != NULL) {
  600             cpu_rid++;
  601             ACPI_DEBUG_PRINT((ACPI_DB_INFO,
  602                              "acpi_cpu%d: Got C%d - %d latency\n",
  603                              device_get_unit(sc->cpu_dev), cx_ptr->type,
  604                              cx_ptr->trans_lat));
  605             cx_ptr++;
  606             sc->cpu_cx_count++;
  607         }
  608     }
  609     AcpiOsFree(buf.Pointer);
  610 
  611     return (0);
  612 }
  613 
  614 /*
  615  * Call this *after* all CPUs have been attached.
  616  */
  617 static void
  618 acpi_cpu_startup(void *arg)
  619 {
  620     struct acpi_cpu_softc *sc;
  621     int count, i;
  622 
  623     /* Get set of CPU devices */
  624     devclass_get_devices(acpi_cpu_devclass, &cpu_devices, &cpu_ndevices);
  625 
  626     /* Register performance profile change handler */
  627     EVENTHANDLER_REGISTER(power_profile_change, acpi_cpu_power_profile,
  628                           NULL, 0);
  629 
  630     /*
  631      * Make sure all the processors' Cx counts match.  We should probably
  632      * also check the contents of each.  However, no known systems have
  633      * non-matching Cx counts so we'll deal with this later.
  634      */
  635     count = MAX_CX_STATES;
  636     for (i = 0; i < cpu_ndevices; i++) {
  637         sc = device_get_softc(cpu_devices[i]);
  638         count = min(sc->cpu_cx_count, count);
  639     }
  640     cpu_cx_count = count;
  641 
  642     /* Perform throttling and Cx final initialization. */
  643     sc = device_get_softc(cpu_devices[0]);
  644     if (sc->cpu_p_cnt != NULL)
  645         acpi_cpu_startup_throttling();
  646     if (cpu_cx_count > 0)
  647         acpi_cpu_startup_cx();
  648 }
  649 
  650 /*
  651  * Takes the ACPI lock to avoid fighting anyone over the SMI command
  652  * port.
  653  */
  654 static void
  655 acpi_cpu_startup_throttling()
  656 {
  657     int cpu_temp_speed;
  658     ACPI_LOCK_DECL;
  659 
  660     /* Initialise throttling states */
  661     cpu_max_state = CPU_MAX_SPEED;
  662     cpu_performance_state = cpu_max_state;
  663     cpu_economy_state = cpu_performance_state / 2;
  664 
  665     /* 0 is 'reserved' */
  666     if (cpu_economy_state == 0) 
  667         cpu_economy_state++;
  668     if (TUNABLE_INT_FETCH("hw.acpi.cpu.performance_speed", &cpu_temp_speed) &&
  669         cpu_temp_speed > 0 && cpu_temp_speed <= cpu_max_state) {
  670 
  671         cpu_performance_state = cpu_temp_speed;
  672     }
  673     if (TUNABLE_INT_FETCH("hw.acpi.cpu.economy_speed", &cpu_temp_speed) &&
  674         cpu_temp_speed > 0 && cpu_temp_speed <= cpu_max_state) {
  675 
  676         cpu_economy_state = cpu_temp_speed;
  677     }
  678 
  679     SYSCTL_ADD_INT(&acpi_cpu_sysctl_ctx,
  680                    SYSCTL_CHILDREN(acpi_cpu_sysctl_tree),
  681                    OID_AUTO, "max_speed", CTLFLAG_RD,
  682                    &cpu_max_state, 0, "maximum CPU speed");
  683     SYSCTL_ADD_INT(&acpi_cpu_sysctl_ctx,
  684                    SYSCTL_CHILDREN(acpi_cpu_sysctl_tree),
  685                    OID_AUTO, "current_speed", CTLFLAG_RD,
  686                    &cpu_current_state, 0, "current CPU speed");
  687     SYSCTL_ADD_PROC(&acpi_cpu_sysctl_ctx,
  688                     SYSCTL_CHILDREN(acpi_cpu_sysctl_tree),
  689                     OID_AUTO, "performance_speed",
  690                     CTLTYPE_INT | CTLFLAG_RW, &cpu_performance_state,
  691                     0, acpi_cpu_throttle_sysctl, "I", "");
  692     SYSCTL_ADD_PROC(&acpi_cpu_sysctl_ctx,
  693                     SYSCTL_CHILDREN(acpi_cpu_sysctl_tree),
  694                     OID_AUTO, "economy_speed",
  695                     CTLTYPE_INT | CTLFLAG_RW, &cpu_economy_state,
  696                     0, acpi_cpu_throttle_sysctl, "I", "");
  697 
  698     /* If ACPI 2.0+, signal platform that we are taking over throttling. */
  699     if (cpu_pstate_cnt != 0) {
  700         ACPI_LOCK;
  701         AcpiOsWritePort(cpu_smi_cmd, cpu_pstate_cnt, 8);
  702         ACPI_UNLOCK;
  703     }
  704 
  705     /* Set initial speed */
  706     acpi_cpu_power_profile(NULL);
  707 
  708     printf("acpi_cpu: throttling enabled, %d steps (100%% to %d.%d%%), " 
  709            "currently %d.%d%%\n", CPU_MAX_SPEED, CPU_SPEED_PRINTABLE(1),
  710            CPU_SPEED_PRINTABLE(cpu_current_state));
  711 }
  712 
  713 static void
  714 acpi_cpu_startup_cx()
  715 {
  716     struct acpi_cpu_softc *sc;
  717     struct sbuf          sb;
  718     int i;
  719     ACPI_LOCK_DECL;
  720 
  721     sc = device_get_softc(cpu_devices[0]);
  722     sbuf_new(&sb, cpu_cx_supported, sizeof(cpu_cx_supported), SBUF_FIXEDLEN);
  723     for (i = 0; i < cpu_cx_count; i++) {
  724         sbuf_printf(&sb, "C%d/%d ", sc->cpu_cx_states[i].type,
  725                     sc->cpu_cx_states[i].trans_lat);
  726     }
  727     sbuf_trim(&sb);
  728     sbuf_finish(&sb);
  729     SYSCTL_ADD_STRING(&acpi_cpu_sysctl_ctx,
  730                       SYSCTL_CHILDREN(acpi_cpu_sysctl_tree),
  731                       OID_AUTO, "cx_supported", CTLFLAG_RD, cpu_cx_supported,
  732                       0, "Cx/microsecond values for supported Cx states");
  733     SYSCTL_ADD_PROC(&acpi_cpu_sysctl_ctx,
  734                     SYSCTL_CHILDREN(acpi_cpu_sysctl_tree),
  735                     OID_AUTO, "cx_lowest", CTLTYPE_INT | CTLFLAG_RW,
  736                     NULL, 0, acpi_cpu_cx_lowest_sysctl, "I",
  737                     "lowest Cx sleep state to use");
  738     SYSCTL_ADD_PROC(&acpi_cpu_sysctl_ctx,
  739                     SYSCTL_CHILDREN(acpi_cpu_sysctl_tree),
  740                     OID_AUTO, "cx_history", CTLTYPE_STRING | CTLFLAG_RD,
  741                     NULL, 0, acpi_cpu_history_sysctl, "A", "");
  742 
  743 #ifdef notyet
  744     /* Signal platform that we can handle _CST notification. */
  745     if (cpu_cst_cnt != 0) {
  746         ACPI_LOCK;
  747         AcpiOsWritePort(cpu_smi_cmd, cpu_cst_cnt, 8);
  748         ACPI_UNLOCK;
  749     }
  750 #endif
  751 
  752     /* Take over idling from cpu_idle_default(). */
  753     cpu_cx_next = cpu_cx_lowest;
  754     cpu_idle_hook = acpi_cpu_idle;
  755 }
  756 
  757 /*
  758  * Set CPUs to the new state.
  759  *
  760  * Must be called with the ACPI lock held.
  761  */
  762 static void
  763 acpi_cpu_throttle_set(uint32_t speed)
  764 {
  765     struct acpi_cpu_softc       *sc;
  766     int                         i;
  767     uint32_t                    p_cnt, clk_val;
  768 
  769     ACPI_ASSERTLOCK;
  770 
  771     /* Iterate over processors */
  772     for (i = 0; i < cpu_ndevices; i++) {
  773         sc = device_get_softc(cpu_devices[i]);
  774         if (sc->cpu_p_cnt == NULL)
  775             continue;
  776 
  777         /* Get the current P_CNT value and disable throttling */
  778         p_cnt = CPU_GET_REG(sc->cpu_p_cnt, 4);
  779         p_cnt &= ~CPU_P_CNT_THT_EN;
  780         CPU_SET_REG(sc->cpu_p_cnt, 4, p_cnt);
  781 
  782         /* If we're at maximum speed, that's all */
  783         if (speed < CPU_MAX_SPEED) {
  784             /* Mask the old CLK_VAL off and or-in the new value */
  785             clk_val = CPU_MAX_SPEED << cpu_duty_offset;
  786             p_cnt &= ~clk_val;
  787             p_cnt |= (speed << cpu_duty_offset);
  788         
  789             /* Write the new P_CNT value and then enable throttling */
  790             CPU_SET_REG(sc->cpu_p_cnt, 4, p_cnt);
  791             p_cnt |= CPU_P_CNT_THT_EN;
  792             CPU_SET_REG(sc->cpu_p_cnt, 4, p_cnt);
  793         }
  794         ACPI_VPRINT(sc->cpu_dev, acpi_device_get_parent_softc(sc->cpu_dev),
  795                     "set speed to %d.%d%%\n", CPU_SPEED_PRINTABLE(speed));
  796     }
  797     cpu_current_state = speed;
  798 }
  799 
  800 /*
  801  * Idle the CPU in the lowest state possible.
  802  * This function is called with interrupts disabled.
  803  */
  804 static void
  805 acpi_cpu_idle()
  806 {
  807     struct      acpi_cpu_softc *sc;
  808     struct      acpi_cx *cx_next;
  809     uint32_t    start_time, end_time;
  810     int         bm_active, i, asleep;
  811 
  812     /* If disabled, return immediately. */
  813     if (cpu_cx_count == 0) {
  814         ACPI_ENABLE_IRQS();
  815         return;
  816     }
  817 
  818     /*
  819      * Look up our CPU id to get our softc.  If it's NULL, we'll use C1
  820      * since there is no ACPI processor object for this CPU.  This occurs
  821      * for logical CPUs in the HTT case.
  822      */
  823     sc = cpu_softc[PCPU_GET(cpuid)];
  824     if (sc == NULL) {
  825         acpi_cpu_c1();
  826         return;
  827     }
  828 
  829     /* Record that a CPU is in the idle function. */
  830     atomic_add_int(&cpu_idle_busy, 1);
  831 
  832     /*
  833      * Check for bus master activity.  If there was activity, clear
  834      * the bit and use the lowest non-C3 state.  Note that the USB
  835      * driver polling for new devices keeps this bit set all the
  836      * time if USB is enabled.
  837      */
  838     AcpiGetRegister(ACPI_BITREG_BUS_MASTER_STATUS, &bm_active,
  839                     ACPI_MTX_DO_NOT_LOCK);
  840     if (bm_active != 0) {
  841         AcpiSetRegister(ACPI_BITREG_BUS_MASTER_STATUS, 1,
  842                         ACPI_MTX_DO_NOT_LOCK);
  843         cpu_cx_next = min(cpu_cx_next, cpu_non_c3);
  844     }
  845 
  846     /* Perform the actual sleep based on the Cx-specific semantics. */
  847     cx_next = &sc->cpu_cx_states[cpu_cx_next];
  848     switch (cx_next->type) {
  849     case ACPI_STATE_C0:
  850         panic("acpi_cpu_idle: attempting to sleep in C0");
  851         /* NOTREACHED */
  852     case ACPI_STATE_C1:
  853         /* Execute HLT (or equivalent) and wait for an interrupt. */
  854         acpi_cpu_c1();
  855 
  856         /*
  857          * We can't calculate the time spent in C1 since the place we
  858          * wake up is an ISR.  Use a constant time of 1 ms.
  859          */
  860         start_time = 0;
  861         end_time = 1000;
  862         break;
  863     case ACPI_STATE_C2:
  864         /*
  865          * Read from P_LVLx to enter C2, checking time spent asleep.
  866          * Use the ACPI timer for measuring sleep time.  Since we need to
  867          * get the time very close to the CPU start/stop clock logic, this
  868          * is the only reliable time source.
  869          */
  870         AcpiHwLowLevelRead(32, &start_time, &AcpiGbl_FADT->XPmTmrBlk);
  871         CPU_GET_REG(cx_next->p_lvlx, 1);
  872 
  873         /*
  874          * Read the end time twice.  Since it may take an arbitrary time
  875          * to enter the idle state, the first read may be executed before
  876          * the processor has stopped.  Doing it again provides enough
  877          * margin that we are certain to have a correct value.
  878          */
  879         AcpiHwLowLevelRead(32, &end_time, &AcpiGbl_FADT->XPmTmrBlk);
  880         AcpiHwLowLevelRead(32, &end_time, &AcpiGbl_FADT->XPmTmrBlk);
  881         ACPI_ENABLE_IRQS();
  882         break;
  883     case ACPI_STATE_C3:
  884     default:
  885         /* Disable bus master arbitration and enable bus master wakeup. */
  886         AcpiSetRegister(ACPI_BITREG_ARB_DISABLE, 1, ACPI_MTX_DO_NOT_LOCK);
  887         AcpiSetRegister(ACPI_BITREG_BUS_MASTER_RLD, 1, ACPI_MTX_DO_NOT_LOCK);
  888 
  889         /* Read from P_LVLx to enter C3, checking time spent asleep. */
  890         AcpiHwLowLevelRead(32, &start_time, &AcpiGbl_FADT->XPmTmrBlk);
  891         CPU_GET_REG(cx_next->p_lvlx, 1);
  892 
  893         /* Read the end time twice.  See comment for C2 above. */
  894         AcpiHwLowLevelRead(32, &end_time, &AcpiGbl_FADT->XPmTmrBlk);
  895         AcpiHwLowLevelRead(32, &end_time, &AcpiGbl_FADT->XPmTmrBlk);
  896 
  897         /* Enable bus master arbitration and disable bus master wakeup. */
  898         AcpiSetRegister(ACPI_BITREG_ARB_DISABLE, 0, ACPI_MTX_DO_NOT_LOCK);
  899         AcpiSetRegister(ACPI_BITREG_BUS_MASTER_RLD, 0, ACPI_MTX_DO_NOT_LOCK);
  900         ACPI_ENABLE_IRQS();
  901         break;
  902     }
  903 
  904     /* Find the actual time asleep in microseconds, minus overhead. */
  905     acpi_pm_ticksub(&end_time, &start_time);
  906     asleep = PM_USEC(end_time) - cx_next->trans_lat;
  907 
  908     /* Record statistics */
  909     if (asleep < cx_next->trans_lat)
  910         cpu_cx_stats[cpu_cx_next].short_slp++;
  911     else
  912         cpu_cx_stats[cpu_cx_next].long_slp++;
  913 
  914     /*
  915      * If we slept 100 us or more, use the lowest Cx state.
  916      * Otherwise, find the lowest state that has a latency less than
  917      * or equal to the length of our last sleep.
  918      */
  919     if (asleep >= 100)
  920         cpu_cx_next = cpu_cx_lowest;
  921     else {
  922         for (i = cpu_cx_lowest; i >= 0; i--) {
  923             if (sc->cpu_cx_states[i].trans_lat <= asleep) {
  924                 cpu_cx_next = i;
  925                 break;
  926             }
  927         }
  928     }
  929 
  930     /* Decrement reference count checked by acpi_cpu_shutdown(). */
  931     atomic_subtract_int(&cpu_idle_busy, 1);
  932 }
  933 
  934 /* Put the CPU in C1 in a machine-dependant way. */
  935 static void
  936 acpi_cpu_c1()
  937 {
  938 #ifdef __ia64__
  939     ia64_call_pal_static(PAL_HALT_LIGHT, 0, 0, 0);
  940 #else
  941     __asm __volatile("sti; hlt");
  942 #endif
  943 }
  944 
  945 /* Find the difference between two PM tick counts. */
  946 static void
  947 acpi_pm_ticksub(uint32_t *end, const uint32_t *start)
  948 {
  949     if (*end >= *start)
  950         *end = *end - *start;
  951     else if (AcpiGbl_FADT->TmrValExt == 0)
  952         *end = (((0x00FFFFFF - *start) + *end + 1) & 0x00FFFFFF);
  953     else
  954         *end = ((0xFFFFFFFF - *start) + *end + 1);
  955 }
  956 
  957 /*
  958  * Re-evaluate the _PSS and _CST objects when we are notified that they
  959  * have changed.
  960  *
  961  * XXX Re-evaluation disabled until locking is done.
  962  */
  963 static void
  964 acpi_cpu_notify(ACPI_HANDLE h, UINT32 notify, void *context)
  965 {
  966     struct acpi_cpu_softc *sc = (struct acpi_cpu_softc *)context;
  967 
  968     switch (notify) {
  969     case ACPI_CPU_NOTIFY_PERF_STATES:
  970         device_printf(sc->cpu_dev, "Performance states changed\n");
  971         /* acpi_cpu_px_available(sc); */
  972         break;
  973     case ACPI_CPU_NOTIFY_CX_STATES:
  974         device_printf(sc->cpu_dev, "Cx states changed\n");
  975         /* acpi_cpu_cx_cst(sc); */
  976         break;
  977     default:
  978         device_printf(sc->cpu_dev, "Unknown notify %#x\n", notify);
  979         break;
  980     }
  981 }
  982 
  983 static int
  984 acpi_cpu_quirks(struct acpi_cpu_softc *sc)
  985 {
  986 
  987     /*
  988      * C3 is not supported on multiple CPUs since this would require
  989      * flushing all caches which is currently too expensive.
  990      */
  991     if (mp_ncpus > 1)
  992         cpu_quirks |= CPU_QUIRK_NO_C3;
  993 
  994 #ifdef notyet
  995     /* Look for various quirks of the PIIX4 part. */
  996     acpi_dev = pci_find_device(PCI_VENDOR_INTEL, PCI_DEVICE_82371AB_3);
  997     if (acpi_dev != NULL) {
  998         switch (pci_get_revid(acpi_dev)) {
  999         /*
 1000          * Disable throttling control on PIIX4 A and B-step.
 1001          * See specification changes #13 ("Manual Throttle Duty Cycle")
 1002          * and #14 ("Enabling and Disabling Manual Throttle"), plus
 1003          * erratum #5 ("STPCLK# Deassertion Time") from the January 
 1004          * 2002 PIIX4 specification update.  Note that few (if any)
 1005          * mobile systems ever used this part.
 1006          */
 1007         case PCI_REVISION_A_STEP:
 1008         case PCI_REVISION_B_STEP:
 1009             cpu_quirks |= CPU_QUIRK_NO_THROTTLE;
 1010             /* FALLTHROUGH */
 1011         /*
 1012          * Disable C3 support for all PIIX4 chipsets.  Some of these parts
 1013          * do not report the BMIDE status to the BM status register and
 1014          * others have a livelock bug if Type-F DMA is enabled.  Linux
 1015          * works around the BMIDE bug by reading the BM status directly
 1016          * but we take the simpler approach of disabling C3 for these
 1017          * parts.
 1018          *
 1019          * See erratum #18 ("C3 Power State/BMIDE and Type-F DMA 
 1020          * Livelock") from the January 2002 PIIX4 specification update.
 1021          * Applies to all PIIX4 models.
 1022          */
 1023         case PCI_REVISION_4E:
 1024         case PCI_REVISION_4M:
 1025             cpu_quirks |= CPU_QUIRK_NO_C3;
 1026             break;
 1027         default:
 1028             break;
 1029         }
 1030     }
 1031 #endif
 1032 
 1033     return (0);
 1034 }
 1035 
 1036 /*
 1037  * Power profile change hook.
 1038  *
 1039  * Uses the ACPI lock to avoid reentrancy.
 1040  */
 1041 static void
 1042 acpi_cpu_power_profile(void *arg)
 1043 {
 1044     int         state;
 1045     uint32_t    new;
 1046     ACPI_LOCK_DECL;
 1047 
 1048     state = power_profile_get_state();
 1049     if (state != POWER_PROFILE_PERFORMANCE && state != POWER_PROFILE_ECONOMY)
 1050         return;
 1051 
 1052     ACPI_LOCK;
 1053     
 1054     switch (state) {
 1055     case POWER_PROFILE_PERFORMANCE:
 1056         new = cpu_performance_state;
 1057         break;
 1058     case POWER_PROFILE_ECONOMY:
 1059         new = cpu_economy_state;
 1060         break;
 1061     default:
 1062         new = cpu_current_state;
 1063         break;
 1064     }
 1065 
 1066     if (cpu_current_state != new)
 1067         acpi_cpu_throttle_set(new);
 1068 
 1069     ACPI_UNLOCK;
 1070 }
 1071 
 1072 /*
 1073  * Handle changes in the performance/ecomony CPU settings.
 1074  *
 1075  * Does not need the ACPI lock (although setting *argp should
 1076  * probably be atomic).
 1077  */
 1078 static int
 1079 acpi_cpu_throttle_sysctl(SYSCTL_HANDLER_ARGS)
 1080 {
 1081     uint32_t    *argp;
 1082     uint32_t     arg;
 1083     int          error;
 1084 
 1085     argp = (uint32_t *)oidp->oid_arg1;
 1086     arg = *argp;
 1087     error = sysctl_handle_int(oidp, &arg, 0, req);
 1088 
 1089     /* Error or no new value */
 1090     if (error != 0 || req->newptr == NULL)
 1091         return (error);
 1092     if (arg < 1 || arg > cpu_max_state)
 1093         return (EINVAL);
 1094 
 1095     /* Set new value and possibly switch */
 1096     *argp = arg;
 1097     acpi_cpu_power_profile(NULL);
 1098 
 1099     return (0);
 1100 }
 1101 
 1102 static int
 1103 acpi_cpu_history_sysctl(SYSCTL_HANDLER_ARGS)
 1104 {
 1105     struct sbuf  sb;
 1106     char         buf[128];
 1107     int          i;
 1108 
 1109     sbuf_new(&sb, buf, sizeof(buf), SBUF_FIXEDLEN);
 1110     for (i = 0; i < cpu_cx_count; i++) {
 1111         sbuf_printf(&sb, "%u/%u ", cpu_cx_stats[i].long_slp,
 1112                     cpu_cx_stats[i].short_slp);
 1113     }
 1114     sbuf_trim(&sb);
 1115     sbuf_finish(&sb);
 1116     sysctl_handle_string(oidp, sbuf_data(&sb), 0, req);
 1117 
 1118     return (0);
 1119 }
 1120 
 1121 static int
 1122 acpi_cpu_cx_lowest_sysctl(SYSCTL_HANDLER_ARGS)
 1123 {
 1124     struct       acpi_cpu_softc *sc;
 1125     int          val, error, i;
 1126 
 1127     sc = device_get_softc(cpu_devices[0]);
 1128     val = cpu_cx_lowest;
 1129     error = sysctl_handle_int(oidp, &val, 0, req);
 1130     if (error != 0 || req->newptr == NULL)
 1131         return (error);
 1132     if (val < 0 || val > cpu_cx_count - 1)
 1133         return (EINVAL);
 1134 
 1135     /* Use the new value for the next idle slice. */
 1136     cpu_cx_lowest = val;
 1137     cpu_cx_next = val;
 1138 
 1139     /* If not disabling, cache the new lowest non-C3 state. */
 1140     cpu_non_c3 = 0;
 1141     for (i = cpu_cx_lowest; i >= 0; i--) {
 1142         if (sc->cpu_cx_states[i].type < ACPI_STATE_C3) {
 1143             cpu_non_c3 = i;
 1144             break;
 1145         }
 1146     }
 1147 
 1148     /* Reset the statistics counters. */
 1149     memset(cpu_cx_stats, 0, sizeof(cpu_cx_stats));
 1150 
 1151     return (0);
 1152 }

Cache object: 15eff8b7f924ce1cc5e59ffa4c00acee


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.