1 /*-
2 * Copyright (c) 2003-2005 Nate Lawson (SDG)
3 * Copyright (c) 2001 Michael Smith
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD: releng/5.4/sys/dev/acpica/acpi_cpu.c 142509 2005-02-25 21:43:38Z njl $");
30
31 #include "opt_acpi.h"
32 #include <sys/param.h>
33 #include <sys/bus.h>
34 #include <sys/cpu.h>
35 #include <sys/kernel.h>
36 #include <sys/malloc.h>
37 #include <sys/module.h>
38 #include <sys/pcpu.h>
39 #include <sys/power.h>
40 #include <sys/proc.h>
41 #include <sys/sbuf.h>
42 #include <sys/smp.h>
43
44 #include <dev/pci/pcivar.h>
45 #include <machine/atomic.h>
46 #include <machine/bus.h>
47 #include <sys/rman.h>
48
49 #include "acpi.h"
50 #include <dev/acpica/acpivar.h>
51
52 /*
53 * Support for ACPI Processor devices, including C[1-3] sleep states.
54 *
55 * TODO: implement scans of all CPUs to be sure all Cx states are
56 * equivalent.
57 */
58
59 /* Hooks for the ACPI CA debugging infrastructure */
60 #define _COMPONENT ACPI_PROCESSOR
61 ACPI_MODULE_NAME("PROCESSOR")
62
63 struct acpi_cx {
64 struct resource *p_lvlx; /* Register to read to enter state. */
65 uint32_t type; /* C1-3 (C4 and up treated as C3). */
66 uint32_t trans_lat; /* Transition latency (usec). */
67 uint32_t power; /* Power consumed (mW). */
68 int res_type; /* Resource type for p_lvlx. */
69 };
70 #define MAX_CX_STATES 8
71
72 struct acpi_cpu_softc {
73 device_t cpu_dev;
74 ACPI_HANDLE cpu_handle;
75 struct pcpu *cpu_pcpu;
76 uint32_t cpu_acpi_id; /* ACPI processor id */
77 uint32_t cpu_p_blk; /* ACPI P_BLK location */
78 uint32_t cpu_p_blk_len; /* P_BLK length (must be 6). */
79 struct acpi_cx cpu_cx_states[MAX_CX_STATES];
80 int cpu_cx_count; /* Number of valid Cx states. */
81 int cpu_prev_sleep;/* Last idle sleep duration. */
82 };
83
84 struct acpi_cpu_device {
85 struct resource_list ad_rl;
86 };
87
88 #define CPU_GET_REG(reg, width) \
89 (bus_space_read_ ## width(rman_get_bustag((reg)), \
90 rman_get_bushandle((reg)), 0))
91 #define CPU_SET_REG(reg, width, val) \
92 (bus_space_write_ ## width(rman_get_bustag((reg)), \
93 rman_get_bushandle((reg)), 0, (val)))
94
95 #define PM_USEC(x) ((x) >> 2) /* ~4 clocks per usec (3.57955 Mhz) */
96
97 #define ACPI_NOTIFY_CX_STATES 0x81 /* _CST changed. */
98
99 #define CPU_QUIRK_NO_C3 (1<<0) /* C3-type states are not usable. */
100 #define CPU_QUIRK_NO_BM_CTRL (1<<2) /* No bus mastering control. */
101
102 #define PCI_VENDOR_INTEL 0x8086
103 #define PCI_DEVICE_82371AB_3 0x7113 /* PIIX4 chipset for quirks. */
104 #define PCI_REVISION_A_STEP 0
105 #define PCI_REVISION_B_STEP 1
106 #define PCI_REVISION_4E 2
107 #define PCI_REVISION_4M 3
108
109 /* Platform hardware resource information. */
110 static uint32_t cpu_smi_cmd; /* Value to write to SMI_CMD. */
111 static uint8_t cpu_cst_cnt; /* Indicate we are _CST aware. */
112 static int cpu_rid; /* Driver-wide resource id. */
113 static int cpu_quirks; /* Indicate any hardware bugs. */
114
115 /* Runtime state. */
116 static int cpu_cx_count; /* Number of valid states */
117 static int cpu_non_c3; /* Index of lowest non-C3 state. */
118 static u_int cpu_cx_stats[MAX_CX_STATES];/* Cx usage history. */
119
120 /* Values for sysctl. */
121 static struct sysctl_ctx_list acpi_cpu_sysctl_ctx;
122 static struct sysctl_oid *acpi_cpu_sysctl_tree;
123 static int cpu_cx_lowest;
124 static char cpu_cx_supported[64];
125
126 static device_t *cpu_devices;
127 static int cpu_ndevices;
128 static struct acpi_cpu_softc **cpu_softc;
129 ACPI_SERIAL_DECL(cpu, "ACPI CPU");
130
131 static int acpi_cpu_probe(device_t dev);
132 static int acpi_cpu_attach(device_t dev);
133 static int acpi_pcpu_get_id(uint32_t idx, uint32_t *acpi_id,
134 uint32_t *cpu_id);
135 static struct resource_list *acpi_cpu_get_rlist(device_t dev, device_t child);
136 static device_t acpi_cpu_add_child(device_t dev, int order, const char *name,
137 int unit);
138 static int acpi_cpu_read_ivar(device_t dev, device_t child, int index,
139 uintptr_t *result);
140 static int acpi_cpu_shutdown(device_t dev);
141 static int acpi_cpu_cx_probe(struct acpi_cpu_softc *sc);
142 static int acpi_cpu_cx_cst(struct acpi_cpu_softc *sc);
143 static void acpi_cpu_startup(void *arg);
144 static void acpi_cpu_startup_cx(void);
145 static void acpi_cpu_idle(void);
146 static void acpi_cpu_notify(ACPI_HANDLE h, UINT32 notify, void *context);
147 static int acpi_cpu_quirks(struct acpi_cpu_softc *sc);
148 static int acpi_cpu_usage_sysctl(SYSCTL_HANDLER_ARGS);
149 static int acpi_cpu_cx_lowest_sysctl(SYSCTL_HANDLER_ARGS);
150
151 static device_method_t acpi_cpu_methods[] = {
152 /* Device interface */
153 DEVMETHOD(device_probe, acpi_cpu_probe),
154 DEVMETHOD(device_attach, acpi_cpu_attach),
155 DEVMETHOD(device_detach, bus_generic_detach),
156 DEVMETHOD(device_shutdown, acpi_cpu_shutdown),
157 DEVMETHOD(device_suspend, bus_generic_suspend),
158 DEVMETHOD(device_resume, bus_generic_resume),
159
160 /* Bus interface */
161 DEVMETHOD(bus_add_child, acpi_cpu_add_child),
162 DEVMETHOD(bus_read_ivar, acpi_cpu_read_ivar),
163 DEVMETHOD(bus_get_resource_list, acpi_cpu_get_rlist),
164 DEVMETHOD(bus_get_resource, bus_generic_rl_get_resource),
165 DEVMETHOD(bus_set_resource, bus_generic_rl_set_resource),
166 DEVMETHOD(bus_alloc_resource, bus_generic_rl_alloc_resource),
167 DEVMETHOD(bus_release_resource, bus_generic_rl_release_resource),
168 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
169 DEVMETHOD(bus_activate_resource, bus_generic_activate_resource),
170 DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource),
171 DEVMETHOD(bus_setup_intr, bus_generic_setup_intr),
172 DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr),
173
174 {0, 0}
175 };
176
177 static driver_t acpi_cpu_driver = {
178 "cpu",
179 acpi_cpu_methods,
180 sizeof(struct acpi_cpu_softc),
181 };
182
183 static devclass_t acpi_cpu_devclass;
184 DRIVER_MODULE(cpu, acpi, acpi_cpu_driver, acpi_cpu_devclass, 0, 0);
185 MODULE_DEPEND(cpu, acpi, 1, 1, 1);
186
187 static int
188 acpi_cpu_probe(device_t dev)
189 {
190 int acpi_id, cpu_id, cx_count;
191 ACPI_BUFFER buf;
192 ACPI_HANDLE handle;
193 char msg[32];
194 ACPI_OBJECT *obj;
195 ACPI_STATUS status;
196
197 if (acpi_disabled("cpu") || acpi_get_type(dev) != ACPI_TYPE_PROCESSOR)
198 return (ENXIO);
199
200 handle = acpi_get_handle(dev);
201 if (cpu_softc == NULL)
202 cpu_softc = malloc(sizeof(struct acpi_cpu_softc *) *
203 (mp_maxid + 1), M_TEMP /* XXX */, M_WAITOK | M_ZERO);
204
205 /* Get our Processor object. */
206 buf.Pointer = NULL;
207 buf.Length = ACPI_ALLOCATE_BUFFER;
208 status = AcpiEvaluateObject(handle, NULL, NULL, &buf);
209 if (ACPI_FAILURE(status)) {
210 device_printf(dev, "probe failed to get Processor obj - %s\n",
211 AcpiFormatException(status));
212 return (ENXIO);
213 }
214 obj = (ACPI_OBJECT *)buf.Pointer;
215 if (obj->Type != ACPI_TYPE_PROCESSOR) {
216 device_printf(dev, "Processor object has bad type %d\n", obj->Type);
217 AcpiOsFree(obj);
218 return (ENXIO);
219 }
220
221 /*
222 * Find the processor associated with our unit. We could use the
223 * ProcId as a key, however, some boxes do not have the same values
224 * in their Processor object as the ProcId values in the MADT.
225 */
226 acpi_id = obj->Processor.ProcId;
227 AcpiOsFree(obj);
228 if (acpi_pcpu_get_id(device_get_unit(dev), &acpi_id, &cpu_id) != 0)
229 return (ENXIO);
230
231 /*
232 * Check if we already probed this processor. We scan the bus twice
233 * so it's possible we've already seen this one.
234 */
235 if (cpu_softc[cpu_id] != NULL)
236 return (ENXIO);
237
238 /* Get a count of Cx states for our device string. */
239 cx_count = 0;
240 buf.Pointer = NULL;
241 buf.Length = ACPI_ALLOCATE_BUFFER;
242 status = AcpiEvaluateObject(handle, "_CST", NULL, &buf);
243 if (ACPI_SUCCESS(status)) {
244 obj = (ACPI_OBJECT *)buf.Pointer;
245 if (ACPI_PKG_VALID(obj, 2))
246 acpi_PkgInt32(obj, 0, &cx_count);
247 AcpiOsFree(obj);
248 } else {
249 if (AcpiGbl_FADT->Plvl2Lat <= 100)
250 cx_count++;
251 if (AcpiGbl_FADT->Plvl3Lat <= 1000)
252 cx_count++;
253 if (cx_count > 0)
254 cx_count++;
255 }
256 if (cx_count > 0)
257 snprintf(msg, sizeof(msg), "ACPI CPU (%d Cx states)", cx_count);
258 else
259 strlcpy(msg, "ACPI CPU", sizeof(msg));
260 device_set_desc_copy(dev, msg);
261
262 /* Mark this processor as in-use and save our derived id for attach. */
263 cpu_softc[cpu_id] = (void *)1;
264 acpi_set_magic(dev, cpu_id);
265
266 return (0);
267 }
268
269 static int
270 acpi_cpu_attach(device_t dev)
271 {
272 ACPI_BUFFER buf;
273 ACPI_OBJECT *obj;
274 struct pcpu *pcpu_data;
275 struct acpi_cpu_softc *sc;
276 struct acpi_softc *acpi_sc;
277 ACPI_STATUS status;
278 int cpu_id;
279
280 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
281
282 sc = device_get_softc(dev);
283 sc->cpu_dev = dev;
284 sc->cpu_handle = acpi_get_handle(dev);
285 cpu_id = acpi_get_magic(dev);
286 cpu_softc[cpu_id] = sc;
287 pcpu_data = pcpu_find(cpu_id);
288 pcpu_data->pc_device = dev;
289 sc->cpu_pcpu = pcpu_data;
290 cpu_smi_cmd = AcpiGbl_FADT->SmiCmd;
291 cpu_cst_cnt = AcpiGbl_FADT->CstCnt;
292
293 buf.Pointer = NULL;
294 buf.Length = ACPI_ALLOCATE_BUFFER;
295 status = AcpiEvaluateObject(sc->cpu_handle, NULL, NULL, &buf);
296 if (ACPI_FAILURE(status)) {
297 device_printf(dev, "attach failed to get Processor obj - %s\n",
298 AcpiFormatException(status));
299 return (ENXIO);
300 }
301 obj = (ACPI_OBJECT *)buf.Pointer;
302 sc->cpu_p_blk = obj->Processor.PblkAddress;
303 sc->cpu_p_blk_len = obj->Processor.PblkLength;
304 sc->cpu_acpi_id = obj->Processor.ProcId;
305 AcpiOsFree(obj);
306 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "acpi_cpu%d: P_BLK at %#x/%d\n",
307 device_get_unit(dev), sc->cpu_p_blk, sc->cpu_p_blk_len));
308
309 acpi_sc = acpi_device_get_parent_softc(dev);
310 sysctl_ctx_init(&acpi_cpu_sysctl_ctx);
311 acpi_cpu_sysctl_tree = SYSCTL_ADD_NODE(&acpi_cpu_sysctl_ctx,
312 SYSCTL_CHILDREN(acpi_sc->acpi_sysctl_tree), OID_AUTO, "cpu",
313 CTLFLAG_RD, 0, "");
314
315 /*
316 * Probe for Cx state support. If it isn't present, free up unused
317 * resources.
318 */
319 if (acpi_cpu_cx_probe(sc) == 0) {
320 status = AcpiInstallNotifyHandler(sc->cpu_handle, ACPI_DEVICE_NOTIFY,
321 acpi_cpu_notify, sc);
322 if (device_get_unit(dev) == 0)
323 AcpiOsQueueForExecution(OSD_PRIORITY_LO, acpi_cpu_startup, NULL);
324 } else
325 sysctl_ctx_free(&acpi_cpu_sysctl_ctx);
326
327 /* Call identify and then probe/attach for cpu child drivers. */
328 bus_generic_probe(dev);
329 bus_generic_attach(dev);
330
331 return (0);
332 }
333
334 /*
335 * Find the nth present CPU and return its pc_cpuid as well as set the
336 * pc_acpi_id from the most reliable source.
337 */
338 static int
339 acpi_pcpu_get_id(uint32_t idx, uint32_t *acpi_id, uint32_t *cpu_id)
340 {
341 struct pcpu *pcpu_data;
342 uint32_t i;
343
344 KASSERT(acpi_id != NULL, ("Null acpi_id"));
345 KASSERT(cpu_id != NULL, ("Null cpu_id"));
346 for (i = 0; i <= mp_maxid; i++) {
347 if (CPU_ABSENT(i))
348 continue;
349 pcpu_data = pcpu_find(i);
350 KASSERT(pcpu_data != NULL, ("no pcpu data for %d", i));
351 if (idx-- == 0) {
352 /*
353 * If pc_acpi_id was not initialized (e.g., a non-APIC UP box)
354 * override it with the value from the ASL. Otherwise, if the
355 * two don't match, prefer the MADT-derived value. Finally,
356 * return the pc_cpuid to reference this processor.
357 */
358 if (pcpu_data->pc_acpi_id == 0xffffffff)
359 pcpu_data->pc_acpi_id = *acpi_id;
360 else if (pcpu_data->pc_acpi_id != *acpi_id)
361 *acpi_id = pcpu_data->pc_acpi_id;
362 *cpu_id = pcpu_data->pc_cpuid;
363 return (0);
364 }
365 }
366
367 return (ESRCH);
368 }
369
370 static struct resource_list *
371 acpi_cpu_get_rlist(device_t dev, device_t child)
372 {
373 struct acpi_cpu_device *ad;
374
375 ad = device_get_ivars(child);
376 if (ad == NULL)
377 return (NULL);
378 return (&ad->ad_rl);
379 }
380
381 static device_t
382 acpi_cpu_add_child(device_t dev, int order, const char *name, int unit)
383 {
384 struct acpi_cpu_device *ad;
385 device_t child;
386
387 if ((ad = malloc(sizeof(*ad), M_TEMP, M_NOWAIT | M_ZERO)) == NULL)
388 return (NULL);
389
390 resource_list_init(&ad->ad_rl);
391
392 child = device_add_child_ordered(dev, order, name, unit);
393 if (child != NULL)
394 device_set_ivars(child, ad);
395 return (child);
396 }
397
398 static int
399 acpi_cpu_read_ivar(device_t dev, device_t child, int index, uintptr_t *result)
400 {
401 struct acpi_cpu_softc *sc;
402
403 sc = device_get_softc(dev);
404 switch (index) {
405 case ACPI_IVAR_HANDLE:
406 *result = (uintptr_t)sc->cpu_handle;
407 break;
408 case CPU_IVAR_PCPU:
409 *result = (uintptr_t)sc->cpu_pcpu;
410 break;
411 default:
412 return (ENOENT);
413 }
414 return (0);
415 }
416
417 static int
418 acpi_cpu_shutdown(device_t dev)
419 {
420 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
421
422 /* Allow children to shutdown first. */
423 bus_generic_shutdown(dev);
424
425 /* Disable any entry to the idle function. */
426 cpu_cx_count = 0;
427
428 /* Signal and wait for all processors to exit acpi_cpu_idle(). */
429 smp_rendezvous(NULL, NULL, NULL, NULL);
430
431 return_VALUE (0);
432 }
433
434 static int
435 acpi_cpu_cx_probe(struct acpi_cpu_softc *sc)
436 {
437 ACPI_GENERIC_ADDRESS gas;
438 struct acpi_cx *cx_ptr;
439 int error;
440
441 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
442
443 /*
444 * Bus mastering arbitration control is needed to keep caches coherent
445 * while sleeping in C3. If it's not present but a working flush cache
446 * instruction is present, flush the caches before entering C3 instead.
447 * Otherwise, just disable C3 completely.
448 */
449 if (AcpiGbl_FADT->V1_Pm2CntBlk == 0 || AcpiGbl_FADT->Pm2CntLen == 0) {
450 if (AcpiGbl_FADT->WbInvd && AcpiGbl_FADT->WbInvdFlush == 0) {
451 cpu_quirks |= CPU_QUIRK_NO_BM_CTRL;
452 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
453 "acpi_cpu%d: no BM control, using flush cache method\n",
454 device_get_unit(sc->cpu_dev)));
455 } else {
456 cpu_quirks |= CPU_QUIRK_NO_C3;
457 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
458 "acpi_cpu%d: no BM control, C3 not available\n",
459 device_get_unit(sc->cpu_dev)));
460 }
461 }
462
463 /*
464 * First, check for the ACPI 2.0 _CST sleep states object.
465 * If not usable, fall back to the P_BLK's P_LVL2 and P_LVL3.
466 */
467 sc->cpu_cx_count = 0;
468 error = acpi_cpu_cx_cst(sc);
469 if (error != 0) {
470 cx_ptr = sc->cpu_cx_states;
471
472 /* C1 has been required since just after ACPI 1.0 */
473 cx_ptr->type = ACPI_STATE_C1;
474 cx_ptr->trans_lat = 0;
475 cpu_non_c3 = 0;
476 cx_ptr++;
477 sc->cpu_cx_count++;
478
479 /*
480 * The spec says P_BLK must be 6 bytes long. However, some systems
481 * use it to indicate a fractional set of features present so we
482 * take 5 as C2. Some may also have a value of 7 to indicate
483 * another C3 but most use _CST for this (as required) and having
484 * "only" C1-C3 is not a hardship.
485 */
486 if (sc->cpu_p_blk_len < 5)
487 goto done;
488
489 /* Validate and allocate resources for C2 (P_LVL2). */
490 gas.AddressSpaceId = ACPI_ADR_SPACE_SYSTEM_IO;
491 gas.RegisterBitWidth = 8;
492 if (AcpiGbl_FADT->Plvl2Lat <= 100) {
493 gas.Address = sc->cpu_p_blk + 4;
494 acpi_bus_alloc_gas(sc->cpu_dev, &cx_ptr->res_type, &cpu_rid, &gas,
495 &cx_ptr->p_lvlx);
496 if (cx_ptr->p_lvlx != NULL) {
497 cpu_rid++;
498 cx_ptr->type = ACPI_STATE_C2;
499 cx_ptr->trans_lat = AcpiGbl_FADT->Plvl2Lat;
500 cpu_non_c3 = 1;
501 cx_ptr++;
502 sc->cpu_cx_count++;
503 }
504 }
505 if (sc->cpu_p_blk_len < 6)
506 goto done;
507
508 /* Validate and allocate resources for C3 (P_LVL3). */
509 if (AcpiGbl_FADT->Plvl3Lat <= 1000 &&
510 (cpu_quirks & CPU_QUIRK_NO_C3) == 0) {
511 gas.Address = sc->cpu_p_blk + 5;
512 acpi_bus_alloc_gas(sc->cpu_dev, &cx_ptr->res_type, &cpu_rid, &gas,
513 &cx_ptr->p_lvlx);
514 if (cx_ptr->p_lvlx != NULL) {
515 cpu_rid++;
516 cx_ptr->type = ACPI_STATE_C3;
517 cx_ptr->trans_lat = AcpiGbl_FADT->Plvl3Lat;
518 cx_ptr++;
519 sc->cpu_cx_count++;
520 }
521 }
522 }
523
524 done:
525 /* If no valid registers were found, don't attach. */
526 if (sc->cpu_cx_count == 0)
527 return (ENXIO);
528
529 /* Use initial sleep value of 1 sec. to start with lowest idle state. */
530 sc->cpu_prev_sleep = 1000000;
531
532 return (0);
533 }
534
535 /*
536 * Parse a _CST package and set up its Cx states. Since the _CST object
537 * can change dynamically, our notify handler may call this function
538 * to clean up and probe the new _CST package.
539 */
540 static int
541 acpi_cpu_cx_cst(struct acpi_cpu_softc *sc)
542 {
543 struct acpi_cx *cx_ptr;
544 ACPI_STATUS status;
545 ACPI_BUFFER buf;
546 ACPI_OBJECT *top;
547 ACPI_OBJECT *pkg;
548 uint32_t count;
549 int i;
550
551 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
552
553 buf.Pointer = NULL;
554 buf.Length = ACPI_ALLOCATE_BUFFER;
555 status = AcpiEvaluateObject(sc->cpu_handle, "_CST", NULL, &buf);
556 if (ACPI_FAILURE(status))
557 return (ENXIO);
558
559 /* _CST is a package with a count and at least one Cx package. */
560 top = (ACPI_OBJECT *)buf.Pointer;
561 if (!ACPI_PKG_VALID(top, 2) || acpi_PkgInt32(top, 0, &count) != 0) {
562 device_printf(sc->cpu_dev, "Invalid _CST package\n");
563 AcpiOsFree(buf.Pointer);
564 return (ENXIO);
565 }
566 if (count != top->Package.Count - 1) {
567 device_printf(sc->cpu_dev, "Invalid _CST state count (%d != %d)\n",
568 count, top->Package.Count - 1);
569 count = top->Package.Count - 1;
570 }
571 if (count > MAX_CX_STATES) {
572 device_printf(sc->cpu_dev, "_CST has too many states (%d)\n", count);
573 count = MAX_CX_STATES;
574 }
575
576 /* Set up all valid states. */
577 sc->cpu_cx_count = 0;
578 cx_ptr = sc->cpu_cx_states;
579 for (i = 0; i < count; i++) {
580 pkg = &top->Package.Elements[i + 1];
581 if (!ACPI_PKG_VALID(pkg, 4) ||
582 acpi_PkgInt32(pkg, 1, &cx_ptr->type) != 0 ||
583 acpi_PkgInt32(pkg, 2, &cx_ptr->trans_lat) != 0 ||
584 acpi_PkgInt32(pkg, 3, &cx_ptr->power) != 0) {
585
586 device_printf(sc->cpu_dev, "Skipping invalid Cx state package\n");
587 continue;
588 }
589
590 /* Validate the state to see if we should use it. */
591 switch (cx_ptr->type) {
592 case ACPI_STATE_C1:
593 cpu_non_c3 = i;
594 cx_ptr++;
595 sc->cpu_cx_count++;
596 continue;
597 case ACPI_STATE_C2:
598 if (cx_ptr->trans_lat > 100) {
599 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
600 "acpi_cpu%d: C2[%d] not available.\n",
601 device_get_unit(sc->cpu_dev), i));
602 continue;
603 }
604 cpu_non_c3 = i;
605 break;
606 case ACPI_STATE_C3:
607 default:
608 if (cx_ptr->trans_lat > 1000 ||
609 (cpu_quirks & CPU_QUIRK_NO_C3) != 0) {
610
611 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
612 "acpi_cpu%d: C3[%d] not available.\n",
613 device_get_unit(sc->cpu_dev), i));
614 continue;
615 }
616 break;
617 }
618
619 #ifdef notyet
620 /* Free up any previous register. */
621 if (cx_ptr->p_lvlx != NULL) {
622 bus_release_resource(sc->cpu_dev, 0, 0, cx_ptr->p_lvlx);
623 cx_ptr->p_lvlx = NULL;
624 }
625 #endif
626
627 /* Allocate the control register for C2 or C3. */
628 acpi_PkgGas(sc->cpu_dev, pkg, 0, &cx_ptr->res_type, &cpu_rid,
629 &cx_ptr->p_lvlx);
630 if (cx_ptr->p_lvlx) {
631 cpu_rid++;
632 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
633 "acpi_cpu%d: Got C%d - %d latency\n",
634 device_get_unit(sc->cpu_dev), cx_ptr->type,
635 cx_ptr->trans_lat));
636 cx_ptr++;
637 sc->cpu_cx_count++;
638 }
639 }
640 AcpiOsFree(buf.Pointer);
641
642 return (0);
643 }
644
645 /*
646 * Call this *after* all CPUs have been attached.
647 */
648 static void
649 acpi_cpu_startup(void *arg)
650 {
651 struct acpi_cpu_softc *sc;
652 int count, i;
653
654 /* Get set of CPU devices */
655 devclass_get_devices(acpi_cpu_devclass, &cpu_devices, &cpu_ndevices);
656
657 /* Check for quirks via the first CPU device. */
658 sc = device_get_softc(cpu_devices[0]);
659 acpi_cpu_quirks(sc);
660
661 /*
662 * Make sure all the processors' Cx counts match. We should probably
663 * also check the contents of each. However, no known systems have
664 * non-matching Cx counts so we'll deal with this later.
665 */
666 count = MAX_CX_STATES;
667 for (i = 0; i < cpu_ndevices; i++) {
668 sc = device_get_softc(cpu_devices[i]);
669 count = min(sc->cpu_cx_count, count);
670 }
671 cpu_cx_count = count;
672
673 /* Perform Cx final initialization. */
674 sc = device_get_softc(cpu_devices[0]);
675 if (cpu_cx_count > 0)
676 acpi_cpu_startup_cx();
677 }
678
679 static void
680 acpi_cpu_startup_cx()
681 {
682 struct acpi_cpu_softc *sc;
683 struct sbuf sb;
684 int i;
685
686 /*
687 * Set up the list of Cx states, eliminating C3 states by truncating
688 * cpu_cx_count if quirks indicate C3 is not usable.
689 */
690 sc = device_get_softc(cpu_devices[0]);
691 sbuf_new(&sb, cpu_cx_supported, sizeof(cpu_cx_supported), SBUF_FIXEDLEN);
692 for (i = 0; i < cpu_cx_count; i++) {
693 if ((cpu_quirks & CPU_QUIRK_NO_C3) == 0 ||
694 sc->cpu_cx_states[i].type != ACPI_STATE_C3)
695 sbuf_printf(&sb, "C%d/%d ", i + 1, sc->cpu_cx_states[i].trans_lat);
696 else
697 cpu_cx_count = i;
698 }
699 sbuf_trim(&sb);
700 sbuf_finish(&sb);
701 SYSCTL_ADD_STRING(&acpi_cpu_sysctl_ctx,
702 SYSCTL_CHILDREN(acpi_cpu_sysctl_tree),
703 OID_AUTO, "cx_supported", CTLFLAG_RD, cpu_cx_supported,
704 0, "Cx/microsecond values for supported Cx states");
705 SYSCTL_ADD_PROC(&acpi_cpu_sysctl_ctx,
706 SYSCTL_CHILDREN(acpi_cpu_sysctl_tree),
707 OID_AUTO, "cx_lowest", CTLTYPE_STRING | CTLFLAG_RW,
708 NULL, 0, acpi_cpu_cx_lowest_sysctl, "A",
709 "lowest Cx sleep state to use");
710 SYSCTL_ADD_PROC(&acpi_cpu_sysctl_ctx,
711 SYSCTL_CHILDREN(acpi_cpu_sysctl_tree),
712 OID_AUTO, "cx_usage", CTLTYPE_STRING | CTLFLAG_RD,
713 NULL, 0, acpi_cpu_usage_sysctl, "A",
714 "percent usage for each Cx state");
715
716 #ifdef notyet
717 /* Signal platform that we can handle _CST notification. */
718 if (cpu_cst_cnt != 0) {
719 ACPI_LOCK(acpi);
720 AcpiOsWritePort(cpu_smi_cmd, cpu_cst_cnt, 8);
721 ACPI_UNLOCK(acpi);
722 }
723 #endif
724
725 /* Take over idling from cpu_idle_default(). */
726 cpu_idle_hook = acpi_cpu_idle;
727 }
728
729 /*
730 * Idle the CPU in the lowest state possible. This function is called with
731 * interrupts disabled. Note that once it re-enables interrupts, a task
732 * switch can occur so do not access shared data (i.e. the softc) after
733 * interrupts are re-enabled.
734 */
735 static void
736 acpi_cpu_idle()
737 {
738 struct acpi_cpu_softc *sc;
739 struct acpi_cx *cx_next;
740 uint32_t start_time, end_time;
741 int bm_active, cx_next_idx, i;
742
743 /* If disabled, return immediately. */
744 if (cpu_cx_count == 0) {
745 ACPI_ENABLE_IRQS();
746 return;
747 }
748
749 /*
750 * Look up our CPU id to get our softc. If it's NULL, we'll use C1
751 * since there is no ACPI processor object for this CPU. This occurs
752 * for logical CPUs in the HTT case.
753 */
754 sc = cpu_softc[PCPU_GET(cpuid)];
755 if (sc == NULL) {
756 acpi_cpu_c1();
757 return;
758 }
759
760 /*
761 * If we slept 100 us or more, use the lowest Cx state. Otherwise,
762 * find the lowest state that has a latency less than or equal to
763 * the length of our last sleep.
764 */
765 cx_next_idx = cpu_cx_lowest;
766 if (sc->cpu_prev_sleep < 100)
767 for (i = cpu_cx_lowest; i >= 0; i--)
768 if (sc->cpu_cx_states[i].trans_lat <= sc->cpu_prev_sleep) {
769 cx_next_idx = i;
770 break;
771 }
772
773 /*
774 * Check for bus master activity. If there was activity, clear
775 * the bit and use the lowest non-C3 state. Note that the USB
776 * driver polling for new devices keeps this bit set all the
777 * time if USB is loaded.
778 */
779 if ((cpu_quirks & CPU_QUIRK_NO_BM_CTRL) == 0) {
780 AcpiGetRegister(ACPI_BITREG_BUS_MASTER_STATUS, &bm_active,
781 ACPI_MTX_DO_NOT_LOCK);
782 if (bm_active != 0) {
783 AcpiSetRegister(ACPI_BITREG_BUS_MASTER_STATUS, 1,
784 ACPI_MTX_DO_NOT_LOCK);
785 cx_next_idx = min(cx_next_idx, cpu_non_c3);
786 }
787 }
788
789 /* Select the next state and update statistics. */
790 cx_next = &sc->cpu_cx_states[cx_next_idx];
791 cpu_cx_stats[cx_next_idx]++;
792 KASSERT(cx_next->type != ACPI_STATE_C0, ("acpi_cpu_idle: C0 sleep"));
793
794 /*
795 * Execute HLT (or equivalent) and wait for an interrupt. We can't
796 * calculate the time spent in C1 since the place we wake up is an
797 * ISR. Assume we slept one quantum and return.
798 */
799 if (cx_next->type == ACPI_STATE_C1) {
800 sc->cpu_prev_sleep = 1000000 / hz;
801 acpi_cpu_c1();
802 return;
803 }
804
805 /*
806 * For C3, disable bus master arbitration and enable bus master wake
807 * if BM control is available, otherwise flush the CPU cache.
808 */
809 if (cx_next->type == ACPI_STATE_C3) {
810 if ((cpu_quirks & CPU_QUIRK_NO_BM_CTRL) == 0) {
811 AcpiSetRegister(ACPI_BITREG_ARB_DISABLE, 1, ACPI_MTX_DO_NOT_LOCK);
812 AcpiSetRegister(ACPI_BITREG_BUS_MASTER_RLD, 1,
813 ACPI_MTX_DO_NOT_LOCK);
814 } else
815 ACPI_FLUSH_CPU_CACHE();
816 }
817
818 /*
819 * Read from P_LVLx to enter C2(+), checking time spent asleep.
820 * Use the ACPI timer for measuring sleep time. Since we need to
821 * get the time very close to the CPU start/stop clock logic, this
822 * is the only reliable time source.
823 */
824 AcpiHwLowLevelRead(32, &start_time, &AcpiGbl_FADT->XPmTmrBlk);
825 CPU_GET_REG(cx_next->p_lvlx, 1);
826
827 /*
828 * Read the end time twice. Since it may take an arbitrary time
829 * to enter the idle state, the first read may be executed before
830 * the processor has stopped. Doing it again provides enough
831 * margin that we are certain to have a correct value.
832 */
833 AcpiHwLowLevelRead(32, &end_time, &AcpiGbl_FADT->XPmTmrBlk);
834 AcpiHwLowLevelRead(32, &end_time, &AcpiGbl_FADT->XPmTmrBlk);
835
836 /* Enable bus master arbitration and disable bus master wakeup. */
837 if (cx_next->type == ACPI_STATE_C3 &&
838 (cpu_quirks & CPU_QUIRK_NO_BM_CTRL) == 0) {
839 AcpiSetRegister(ACPI_BITREG_ARB_DISABLE, 0, ACPI_MTX_DO_NOT_LOCK);
840 AcpiSetRegister(ACPI_BITREG_BUS_MASTER_RLD, 0, ACPI_MTX_DO_NOT_LOCK);
841 }
842
843 /* Find the actual time asleep in microseconds, minus overhead. */
844 end_time = acpi_TimerDelta(end_time, start_time);
845 sc->cpu_prev_sleep = PM_USEC(end_time) - cx_next->trans_lat;
846 ACPI_ENABLE_IRQS();
847 }
848
849 /*
850 * Re-evaluate the _CST object when we are notified that it changed.
851 *
852 * XXX Re-evaluation disabled until locking is done.
853 */
854 static void
855 acpi_cpu_notify(ACPI_HANDLE h, UINT32 notify, void *context)
856 {
857 struct acpi_cpu_softc *sc = (struct acpi_cpu_softc *)context;
858
859 if (notify != ACPI_NOTIFY_CX_STATES)
860 return;
861
862 device_printf(sc->cpu_dev, "Cx states changed\n");
863 /* acpi_cpu_cx_cst(sc); */
864 }
865
866 static int
867 acpi_cpu_quirks(struct acpi_cpu_softc *sc)
868 {
869 device_t acpi_dev;
870
871 /*
872 * C3 on multiple CPUs requires using the expensive flush cache
873 * instruction.
874 */
875 if (mp_ncpus > 1)
876 cpu_quirks |= CPU_QUIRK_NO_BM_CTRL;
877
878 /* Look for various quirks of the PIIX4 part. */
879 acpi_dev = pci_find_device(PCI_VENDOR_INTEL, PCI_DEVICE_82371AB_3);
880 if (acpi_dev != NULL) {
881 switch (pci_get_revid(acpi_dev)) {
882 /*
883 * Disable C3 support for all PIIX4 chipsets. Some of these parts
884 * do not report the BMIDE status to the BM status register and
885 * others have a livelock bug if Type-F DMA is enabled. Linux
886 * works around the BMIDE bug by reading the BM status directly
887 * but we take the simpler approach of disabling C3 for these
888 * parts.
889 *
890 * See erratum #18 ("C3 Power State/BMIDE and Type-F DMA
891 * Livelock") from the January 2002 PIIX4 specification update.
892 * Applies to all PIIX4 models.
893 */
894 case PCI_REVISION_4E:
895 case PCI_REVISION_4M:
896 cpu_quirks |= CPU_QUIRK_NO_C3;
897 break;
898 default:
899 break;
900 }
901 }
902
903 return (0);
904 }
905
906 static int
907 acpi_cpu_usage_sysctl(SYSCTL_HANDLER_ARGS)
908 {
909 struct sbuf sb;
910 char buf[128];
911 int i;
912 uintmax_t fract, sum, whole;
913
914 sum = 0;
915 for (i = 0; i < cpu_cx_count; i++)
916 sum += cpu_cx_stats[i];
917 sbuf_new(&sb, buf, sizeof(buf), SBUF_FIXEDLEN);
918 for (i = 0; i < cpu_cx_count; i++) {
919 if (sum > 0) {
920 whole = (uintmax_t)cpu_cx_stats[i] * 100;
921 fract = (whole % sum) * 100;
922 sbuf_printf(&sb, "%u.%02u%% ", (u_int)(whole / sum),
923 (u_int)(fract / sum));
924 } else
925 sbuf_printf(&sb, "0%% ");
926 }
927 sbuf_trim(&sb);
928 sbuf_finish(&sb);
929 sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req);
930 sbuf_delete(&sb);
931
932 return (0);
933 }
934
935 static int
936 acpi_cpu_cx_lowest_sysctl(SYSCTL_HANDLER_ARGS)
937 {
938 struct acpi_cpu_softc *sc;
939 char state[8];
940 int val, error, i;
941
942 sc = device_get_softc(cpu_devices[0]);
943 snprintf(state, sizeof(state), "C%d", cpu_cx_lowest + 1);
944 error = sysctl_handle_string(oidp, state, sizeof(state), req);
945 if (error != 0 || req->newptr == NULL)
946 return (error);
947 if (strlen(state) < 2 || toupper(state[0]) != 'C')
948 return (EINVAL);
949 val = (int) strtol(state + 1, NULL, 10) - 1;
950 if (val < 0 || val > cpu_cx_count - 1)
951 return (EINVAL);
952
953 ACPI_SERIAL_BEGIN(cpu);
954 cpu_cx_lowest = val;
955
956 /* If not disabling, cache the new lowest non-C3 state. */
957 cpu_non_c3 = 0;
958 for (i = cpu_cx_lowest; i >= 0; i--) {
959 if (sc->cpu_cx_states[i].type < ACPI_STATE_C3) {
960 cpu_non_c3 = i;
961 break;
962 }
963 }
964
965 /* Reset the statistics counters. */
966 bzero(cpu_cx_stats, sizeof(cpu_cx_stats));
967 ACPI_SERIAL_END(cpu);
968
969 return (0);
970 }
Cache object: ba1cb60e98509d1a31d66e86faac2053
|