1 /*-
2 * Copyright (c) 2003-2005 Nate Lawson (SDG)
3 * Copyright (c) 2001 Michael Smith
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30
31 #include "opt_acpi.h"
32 #include <sys/param.h>
33 #include <sys/bus.h>
34 #include <sys/cpu.h>
35 #include <sys/kernel.h>
36 #include <sys/malloc.h>
37 #include <sys/module.h>
38 #include <sys/pcpu.h>
39 #include <sys/power.h>
40 #include <sys/proc.h>
41 #include <sys/sbuf.h>
42 #include <sys/smp.h>
43
44 #include <dev/pci/pcivar.h>
45 #include <machine/atomic.h>
46 #include <machine/bus.h>
47 #include <sys/rman.h>
48
49 #include "acpi.h"
50 #include <dev/acpica/acpivar.h>
51
52 /*
53 * Support for ACPI Processor devices, including C[1-3] sleep states.
54 *
55 * TODO: implement scans of all CPUs to be sure all Cx states are
56 * equivalent.
57 */
58
59 /* Hooks for the ACPI CA debugging infrastructure */
60 #define _COMPONENT ACPI_PROCESSOR
61 ACPI_MODULE_NAME("PROCESSOR")
62
63 struct acpi_cx {
64 struct resource *p_lvlx; /* Register to read to enter state. */
65 uint32_t type; /* C1-3 (C4 and up treated as C3). */
66 uint32_t trans_lat; /* Transition latency (usec). */
67 uint32_t power; /* Power consumed (mW). */
68 int res_type; /* Resource type for p_lvlx. */
69 };
70 #define MAX_CX_STATES 8
71
72 struct acpi_cpu_softc {
73 device_t cpu_dev;
74 ACPI_HANDLE cpu_handle;
75 struct pcpu *cpu_pcpu;
76 uint32_t cpu_acpi_id; /* ACPI processor id */
77 uint32_t cpu_p_blk; /* ACPI P_BLK location */
78 uint32_t cpu_p_blk_len; /* P_BLK length (must be 6). */
79 struct acpi_cx cpu_cx_states[MAX_CX_STATES];
80 int cpu_cx_count; /* Number of valid Cx states. */
81 int cpu_prev_sleep;/* Last idle sleep duration. */
82 int cpu_features; /* Child driver supported features. */
83 };
84
85 struct acpi_cpu_device {
86 struct resource_list ad_rl;
87 };
88
89 #define CPU_GET_REG(reg, width) \
90 (bus_space_read_ ## width(rman_get_bustag((reg)), \
91 rman_get_bushandle((reg)), 0))
92 #define CPU_SET_REG(reg, width, val) \
93 (bus_space_write_ ## width(rman_get_bustag((reg)), \
94 rman_get_bushandle((reg)), 0, (val)))
95
96 #define PM_USEC(x) ((x) >> 2) /* ~4 clocks per usec (3.57955 Mhz) */
97
98 #define ACPI_NOTIFY_CX_STATES 0x81 /* _CST changed. */
99
100 #define CPU_QUIRK_NO_C3 (1<<0) /* C3-type states are not usable. */
101 #define CPU_QUIRK_NO_BM_CTRL (1<<2) /* No bus mastering control. */
102
103 #define PCI_VENDOR_INTEL 0x8086
104 #define PCI_DEVICE_82371AB_3 0x7113 /* PIIX4 chipset for quirks. */
105 #define PCI_REVISION_A_STEP 0
106 #define PCI_REVISION_B_STEP 1
107 #define PCI_REVISION_4E 2
108 #define PCI_REVISION_4M 3
109
110 /* Platform hardware resource information. */
111 static uint32_t cpu_smi_cmd; /* Value to write to SMI_CMD. */
112 static uint8_t cpu_cst_cnt; /* Indicate we are _CST aware. */
113 static int cpu_rid; /* Driver-wide resource id. */
114 static int cpu_quirks; /* Indicate any hardware bugs. */
115
116 /* Runtime state. */
117 static int cpu_cx_count; /* Number of valid states */
118 static int cpu_non_c3; /* Index of lowest non-C3 state. */
119 static u_int cpu_cx_stats[MAX_CX_STATES];/* Cx usage history. */
120
121 /* Values for sysctl. */
122 static struct sysctl_ctx_list acpi_cpu_sysctl_ctx;
123 static struct sysctl_oid *acpi_cpu_sysctl_tree;
124 static int cpu_cx_lowest;
125 static char cpu_cx_supported[64];
126
127 static device_t *cpu_devices;
128 static int cpu_ndevices;
129 static struct acpi_cpu_softc **cpu_softc;
130 ACPI_SERIAL_DECL(cpu, "ACPI CPU");
131
132 static int acpi_cpu_probe(device_t dev);
133 static int acpi_cpu_attach(device_t dev);
134 static int acpi_pcpu_get_id(uint32_t idx, uint32_t *acpi_id,
135 uint32_t *cpu_id);
136 static struct resource_list *acpi_cpu_get_rlist(device_t dev, device_t child);
137 static device_t acpi_cpu_add_child(device_t dev, int order, const char *name,
138 int unit);
139 static int acpi_cpu_read_ivar(device_t dev, device_t child, int index,
140 uintptr_t *result);
141 static int acpi_cpu_shutdown(device_t dev);
142 static int acpi_cpu_cx_probe(struct acpi_cpu_softc *sc);
143 static int acpi_cpu_cx_cst(struct acpi_cpu_softc *sc);
144 static void acpi_cpu_startup(void *arg);
145 static void acpi_cpu_startup_cx(void);
146 static void acpi_cpu_idle(void);
147 static void acpi_cpu_notify(ACPI_HANDLE h, UINT32 notify, void *context);
148 static int acpi_cpu_quirks(struct acpi_cpu_softc *sc);
149 static int acpi_cpu_usage_sysctl(SYSCTL_HANDLER_ARGS);
150 static int acpi_cpu_cx_lowest_sysctl(SYSCTL_HANDLER_ARGS);
151
152 static device_method_t acpi_cpu_methods[] = {
153 /* Device interface */
154 DEVMETHOD(device_probe, acpi_cpu_probe),
155 DEVMETHOD(device_attach, acpi_cpu_attach),
156 DEVMETHOD(device_detach, bus_generic_detach),
157 DEVMETHOD(device_shutdown, acpi_cpu_shutdown),
158 DEVMETHOD(device_suspend, bus_generic_suspend),
159 DEVMETHOD(device_resume, bus_generic_resume),
160
161 /* Bus interface */
162 DEVMETHOD(bus_add_child, acpi_cpu_add_child),
163 DEVMETHOD(bus_read_ivar, acpi_cpu_read_ivar),
164 DEVMETHOD(bus_get_resource_list, acpi_cpu_get_rlist),
165 DEVMETHOD(bus_get_resource, bus_generic_rl_get_resource),
166 DEVMETHOD(bus_set_resource, bus_generic_rl_set_resource),
167 DEVMETHOD(bus_alloc_resource, bus_generic_rl_alloc_resource),
168 DEVMETHOD(bus_release_resource, bus_generic_rl_release_resource),
169 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
170 DEVMETHOD(bus_activate_resource, bus_generic_activate_resource),
171 DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource),
172 DEVMETHOD(bus_setup_intr, bus_generic_setup_intr),
173 DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr),
174
175 {0, 0}
176 };
177
178 static driver_t acpi_cpu_driver = {
179 "cpu",
180 acpi_cpu_methods,
181 sizeof(struct acpi_cpu_softc),
182 };
183
184 static devclass_t acpi_cpu_devclass;
185 DRIVER_MODULE(cpu, acpi, acpi_cpu_driver, acpi_cpu_devclass, 0, 0);
186 MODULE_DEPEND(cpu, acpi, 1, 1, 1);
187
188 static int
189 acpi_cpu_probe(device_t dev)
190 {
191 int acpi_id, cpu_id, cx_count;
192 ACPI_BUFFER buf;
193 ACPI_HANDLE handle;
194 char msg[32];
195 ACPI_OBJECT *obj;
196 ACPI_STATUS status;
197
198 if (acpi_disabled("cpu") || acpi_get_type(dev) != ACPI_TYPE_PROCESSOR)
199 return (ENXIO);
200
201 handle = acpi_get_handle(dev);
202 if (cpu_softc == NULL)
203 cpu_softc = malloc(sizeof(struct acpi_cpu_softc *) *
204 (mp_maxid + 1), M_TEMP /* XXX */, M_WAITOK | M_ZERO);
205
206 /* Get our Processor object. */
207 buf.Pointer = NULL;
208 buf.Length = ACPI_ALLOCATE_BUFFER;
209 status = AcpiEvaluateObject(handle, NULL, NULL, &buf);
210 if (ACPI_FAILURE(status)) {
211 device_printf(dev, "probe failed to get Processor obj - %s\n",
212 AcpiFormatException(status));
213 return (ENXIO);
214 }
215 obj = (ACPI_OBJECT *)buf.Pointer;
216 if (obj->Type != ACPI_TYPE_PROCESSOR) {
217 device_printf(dev, "Processor object has bad type %d\n", obj->Type);
218 AcpiOsFree(obj);
219 return (ENXIO);
220 }
221
222 /*
223 * Find the processor associated with our unit. We could use the
224 * ProcId as a key, however, some boxes do not have the same values
225 * in their Processor object as the ProcId values in the MADT.
226 */
227 acpi_id = obj->Processor.ProcId;
228 AcpiOsFree(obj);
229 if (acpi_pcpu_get_id(device_get_unit(dev), &acpi_id, &cpu_id) != 0)
230 return (ENXIO);
231
232 /*
233 * Check if we already probed this processor. We scan the bus twice
234 * so it's possible we've already seen this one.
235 */
236 if (cpu_softc[cpu_id] != NULL)
237 return (ENXIO);
238
239 /* Get a count of Cx states for our device string. */
240 cx_count = 0;
241 buf.Pointer = NULL;
242 buf.Length = ACPI_ALLOCATE_BUFFER;
243 status = AcpiEvaluateObject(handle, "_CST", NULL, &buf);
244 if (ACPI_SUCCESS(status)) {
245 obj = (ACPI_OBJECT *)buf.Pointer;
246 if (ACPI_PKG_VALID(obj, 2))
247 acpi_PkgInt32(obj, 0, &cx_count);
248 AcpiOsFree(obj);
249 } else {
250 if (AcpiGbl_FADT->Plvl2Lat <= 100)
251 cx_count++;
252 if (AcpiGbl_FADT->Plvl3Lat <= 1000)
253 cx_count++;
254 if (cx_count > 0)
255 cx_count++;
256 }
257 if (cx_count > 0)
258 snprintf(msg, sizeof(msg), "ACPI CPU (%d Cx states)", cx_count);
259 else
260 strlcpy(msg, "ACPI CPU", sizeof(msg));
261 device_set_desc_copy(dev, msg);
262
263 /* Mark this processor as in-use and save our derived id for attach. */
264 cpu_softc[cpu_id] = (void *)1;
265 acpi_set_magic(dev, cpu_id);
266
267 return (0);
268 }
269
270 static int
271 acpi_cpu_attach(device_t dev)
272 {
273 ACPI_BUFFER buf;
274 ACPI_OBJECT arg, *obj;
275 ACPI_OBJECT_LIST arglist;
276 struct pcpu *pcpu_data;
277 struct acpi_cpu_softc *sc;
278 struct acpi_softc *acpi_sc;
279 ACPI_STATUS status;
280 u_int features;
281 int cpu_id, drv_count, i;
282 driver_t **drivers;
283 uint32_t cap_set[3];
284
285 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
286
287 sc = device_get_softc(dev);
288 sc->cpu_dev = dev;
289 sc->cpu_handle = acpi_get_handle(dev);
290 cpu_id = acpi_get_magic(dev);
291 cpu_softc[cpu_id] = sc;
292 pcpu_data = pcpu_find(cpu_id);
293 pcpu_data->pc_device = dev;
294 sc->cpu_pcpu = pcpu_data;
295 cpu_smi_cmd = AcpiGbl_FADT->SmiCmd;
296 cpu_cst_cnt = AcpiGbl_FADT->CstCnt;
297
298 buf.Pointer = NULL;
299 buf.Length = ACPI_ALLOCATE_BUFFER;
300 status = AcpiEvaluateObject(sc->cpu_handle, NULL, NULL, &buf);
301 if (ACPI_FAILURE(status)) {
302 device_printf(dev, "attach failed to get Processor obj - %s\n",
303 AcpiFormatException(status));
304 return (ENXIO);
305 }
306 obj = (ACPI_OBJECT *)buf.Pointer;
307 sc->cpu_p_blk = obj->Processor.PblkAddress;
308 sc->cpu_p_blk_len = obj->Processor.PblkLength;
309 sc->cpu_acpi_id = obj->Processor.ProcId;
310 AcpiOsFree(obj);
311 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "acpi_cpu%d: P_BLK at %#x/%d\n",
312 device_get_unit(dev), sc->cpu_p_blk, sc->cpu_p_blk_len));
313
314 acpi_sc = acpi_device_get_parent_softc(dev);
315 sysctl_ctx_init(&acpi_cpu_sysctl_ctx);
316 acpi_cpu_sysctl_tree = SYSCTL_ADD_NODE(&acpi_cpu_sysctl_ctx,
317 SYSCTL_CHILDREN(acpi_sc->acpi_sysctl_tree), OID_AUTO, "cpu",
318 CTLFLAG_RD, 0, "");
319
320 /*
321 * Before calling any CPU methods, collect child driver feature hints
322 * and notify ACPI of them.
323 */
324 sc->cpu_features = 0;
325 if (devclass_get_drivers(acpi_cpu_devclass, &drivers, &drv_count) == 0) {
326 for (i = 0; i < drv_count; i++) {
327 if (ACPI_GET_FEATURES(drivers[i], &features) == 0)
328 sc->cpu_features |= features;
329 }
330 free(drivers, M_TEMP);
331 }
332
333 /*
334 * CPU capabilities are specified as a buffer of 32-bit integers:
335 * revision, count, and one or more capabilities. The revision of
336 * "1" is not specified anywhere but seems to match Linux. We should
337 * also support _OSC here.
338 */
339 if (sc->cpu_features) {
340 arglist.Pointer = &arg;
341 arglist.Count = 1;
342 arg.Type = ACPI_TYPE_BUFFER;
343 arg.Buffer.Length = sizeof(cap_set);
344 arg.Buffer.Pointer = (uint8_t *)cap_set;
345 cap_set[0] = 1; /* revision */
346 cap_set[1] = 1; /* number of capabilities integers */
347 cap_set[2] = sc->cpu_features;
348 AcpiEvaluateObject(sc->cpu_handle, "_PDC", &arglist, NULL);
349 }
350
351 /*
352 * Probe for Cx state support. If it isn't present, free up unused
353 * resources.
354 */
355 if (acpi_cpu_cx_probe(sc) == 0) {
356 status = AcpiInstallNotifyHandler(sc->cpu_handle, ACPI_DEVICE_NOTIFY,
357 acpi_cpu_notify, sc);
358 if (device_get_unit(dev) == 0)
359 AcpiOsQueueForExecution(OSD_PRIORITY_LO, acpi_cpu_startup, NULL);
360 } else
361 sysctl_ctx_free(&acpi_cpu_sysctl_ctx);
362
363 /* Finally, call identify and probe/attach for child devices. */
364 bus_generic_probe(dev);
365 bus_generic_attach(dev);
366
367 return (0);
368 }
369
370 /*
371 * Find the nth present CPU and return its pc_cpuid as well as set the
372 * pc_acpi_id from the most reliable source.
373 */
374 static int
375 acpi_pcpu_get_id(uint32_t idx, uint32_t *acpi_id, uint32_t *cpu_id)
376 {
377 struct pcpu *pcpu_data;
378 uint32_t i;
379
380 KASSERT(acpi_id != NULL, ("Null acpi_id"));
381 KASSERT(cpu_id != NULL, ("Null cpu_id"));
382 for (i = 0; i <= mp_maxid; i++) {
383 if (CPU_ABSENT(i))
384 continue;
385 pcpu_data = pcpu_find(i);
386 KASSERT(pcpu_data != NULL, ("no pcpu data for %d", i));
387 if (idx-- == 0) {
388 /*
389 * If pc_acpi_id was not initialized (e.g., a non-APIC UP box)
390 * override it with the value from the ASL. Otherwise, if the
391 * two don't match, prefer the MADT-derived value. Finally,
392 * return the pc_cpuid to reference this processor.
393 */
394 if (pcpu_data->pc_acpi_id == 0xffffffff)
395 pcpu_data->pc_acpi_id = *acpi_id;
396 else if (pcpu_data->pc_acpi_id != *acpi_id)
397 *acpi_id = pcpu_data->pc_acpi_id;
398 *cpu_id = pcpu_data->pc_cpuid;
399 return (0);
400 }
401 }
402
403 return (ESRCH);
404 }
405
406 static struct resource_list *
407 acpi_cpu_get_rlist(device_t dev, device_t child)
408 {
409 struct acpi_cpu_device *ad;
410
411 ad = device_get_ivars(child);
412 if (ad == NULL)
413 return (NULL);
414 return (&ad->ad_rl);
415 }
416
417 static device_t
418 acpi_cpu_add_child(device_t dev, int order, const char *name, int unit)
419 {
420 struct acpi_cpu_device *ad;
421 device_t child;
422
423 if ((ad = malloc(sizeof(*ad), M_TEMP, M_NOWAIT | M_ZERO)) == NULL)
424 return (NULL);
425
426 resource_list_init(&ad->ad_rl);
427
428 child = device_add_child_ordered(dev, order, name, unit);
429 if (child != NULL)
430 device_set_ivars(child, ad);
431 return (child);
432 }
433
434 static int
435 acpi_cpu_read_ivar(device_t dev, device_t child, int index, uintptr_t *result)
436 {
437 struct acpi_cpu_softc *sc;
438
439 sc = device_get_softc(dev);
440 switch (index) {
441 case ACPI_IVAR_HANDLE:
442 *result = (uintptr_t)sc->cpu_handle;
443 break;
444 case CPU_IVAR_PCPU:
445 *result = (uintptr_t)sc->cpu_pcpu;
446 break;
447 default:
448 return (ENOENT);
449 }
450 return (0);
451 }
452
453 static int
454 acpi_cpu_shutdown(device_t dev)
455 {
456 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
457
458 /* Allow children to shutdown first. */
459 bus_generic_shutdown(dev);
460
461 /* Disable any entry to the idle function. */
462 cpu_cx_count = 0;
463
464 /* Signal and wait for all processors to exit acpi_cpu_idle(). */
465 smp_rendezvous(NULL, NULL, NULL, NULL);
466
467 return_VALUE (0);
468 }
469
470 static int
471 acpi_cpu_cx_probe(struct acpi_cpu_softc *sc)
472 {
473 ACPI_GENERIC_ADDRESS gas;
474 struct acpi_cx *cx_ptr;
475 int error;
476
477 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
478
479 /*
480 * Bus mastering arbitration control is needed to keep caches coherent
481 * while sleeping in C3. If it's not present but a working flush cache
482 * instruction is present, flush the caches before entering C3 instead.
483 * Otherwise, just disable C3 completely.
484 */
485 if (AcpiGbl_FADT->V1_Pm2CntBlk == 0 || AcpiGbl_FADT->Pm2CntLen == 0) {
486 if (AcpiGbl_FADT->WbInvd && AcpiGbl_FADT->WbInvdFlush == 0) {
487 cpu_quirks |= CPU_QUIRK_NO_BM_CTRL;
488 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
489 "acpi_cpu%d: no BM control, using flush cache method\n",
490 device_get_unit(sc->cpu_dev)));
491 } else {
492 cpu_quirks |= CPU_QUIRK_NO_C3;
493 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
494 "acpi_cpu%d: no BM control, C3 not available\n",
495 device_get_unit(sc->cpu_dev)));
496 }
497 }
498
499 /*
500 * First, check for the ACPI 2.0 _CST sleep states object.
501 * If not usable, fall back to the P_BLK's P_LVL2 and P_LVL3.
502 */
503 sc->cpu_cx_count = 0;
504 error = acpi_cpu_cx_cst(sc);
505 if (error != 0) {
506 cx_ptr = sc->cpu_cx_states;
507
508 /* C1 has been required since just after ACPI 1.0 */
509 cx_ptr->type = ACPI_STATE_C1;
510 cx_ptr->trans_lat = 0;
511 cpu_non_c3 = 0;
512 cx_ptr++;
513 sc->cpu_cx_count++;
514
515 /*
516 * The spec says P_BLK must be 6 bytes long. However, some systems
517 * use it to indicate a fractional set of features present so we
518 * take 5 as C2. Some may also have a value of 7 to indicate
519 * another C3 but most use _CST for this (as required) and having
520 * "only" C1-C3 is not a hardship.
521 */
522 if (sc->cpu_p_blk_len < 5)
523 goto done;
524
525 /* Validate and allocate resources for C2 (P_LVL2). */
526 gas.AddressSpaceId = ACPI_ADR_SPACE_SYSTEM_IO;
527 gas.RegisterBitWidth = 8;
528 if (AcpiGbl_FADT->Plvl2Lat <= 100) {
529 gas.Address = sc->cpu_p_blk + 4;
530 acpi_bus_alloc_gas(sc->cpu_dev, &cx_ptr->res_type, &cpu_rid, &gas,
531 &cx_ptr->p_lvlx);
532 if (cx_ptr->p_lvlx != NULL) {
533 cpu_rid++;
534 cx_ptr->type = ACPI_STATE_C2;
535 cx_ptr->trans_lat = AcpiGbl_FADT->Plvl2Lat;
536 cpu_non_c3 = 1;
537 cx_ptr++;
538 sc->cpu_cx_count++;
539 }
540 }
541 if (sc->cpu_p_blk_len < 6)
542 goto done;
543
544 /* Validate and allocate resources for C3 (P_LVL3). */
545 if (AcpiGbl_FADT->Plvl3Lat <= 1000 &&
546 (cpu_quirks & CPU_QUIRK_NO_C3) == 0) {
547 gas.Address = sc->cpu_p_blk + 5;
548 acpi_bus_alloc_gas(sc->cpu_dev, &cx_ptr->res_type, &cpu_rid, &gas,
549 &cx_ptr->p_lvlx);
550 if (cx_ptr->p_lvlx != NULL) {
551 cpu_rid++;
552 cx_ptr->type = ACPI_STATE_C3;
553 cx_ptr->trans_lat = AcpiGbl_FADT->Plvl3Lat;
554 cx_ptr++;
555 sc->cpu_cx_count++;
556 }
557 }
558 }
559
560 done:
561 /* If no valid registers were found, don't attach. */
562 if (sc->cpu_cx_count == 0)
563 return (ENXIO);
564
565 /* Use initial sleep value of 1 sec. to start with lowest idle state. */
566 sc->cpu_prev_sleep = 1000000;
567
568 return (0);
569 }
570
571 /*
572 * Parse a _CST package and set up its Cx states. Since the _CST object
573 * can change dynamically, our notify handler may call this function
574 * to clean up and probe the new _CST package.
575 */
576 static int
577 acpi_cpu_cx_cst(struct acpi_cpu_softc *sc)
578 {
579 struct acpi_cx *cx_ptr;
580 ACPI_STATUS status;
581 ACPI_BUFFER buf;
582 ACPI_OBJECT *top;
583 ACPI_OBJECT *pkg;
584 uint32_t count;
585 int i;
586
587 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
588
589 buf.Pointer = NULL;
590 buf.Length = ACPI_ALLOCATE_BUFFER;
591 status = AcpiEvaluateObject(sc->cpu_handle, "_CST", NULL, &buf);
592 if (ACPI_FAILURE(status))
593 return (ENXIO);
594
595 /* _CST is a package with a count and at least one Cx package. */
596 top = (ACPI_OBJECT *)buf.Pointer;
597 if (!ACPI_PKG_VALID(top, 2) || acpi_PkgInt32(top, 0, &count) != 0) {
598 device_printf(sc->cpu_dev, "Invalid _CST package\n");
599 AcpiOsFree(buf.Pointer);
600 return (ENXIO);
601 }
602 if (count != top->Package.Count - 1) {
603 device_printf(sc->cpu_dev, "Invalid _CST state count (%d != %d)\n",
604 count, top->Package.Count - 1);
605 count = top->Package.Count - 1;
606 }
607 if (count > MAX_CX_STATES) {
608 device_printf(sc->cpu_dev, "_CST has too many states (%d)\n", count);
609 count = MAX_CX_STATES;
610 }
611
612 /* Set up all valid states. */
613 sc->cpu_cx_count = 0;
614 cx_ptr = sc->cpu_cx_states;
615 for (i = 0; i < count; i++) {
616 pkg = &top->Package.Elements[i + 1];
617 if (!ACPI_PKG_VALID(pkg, 4) ||
618 acpi_PkgInt32(pkg, 1, &cx_ptr->type) != 0 ||
619 acpi_PkgInt32(pkg, 2, &cx_ptr->trans_lat) != 0 ||
620 acpi_PkgInt32(pkg, 3, &cx_ptr->power) != 0) {
621
622 device_printf(sc->cpu_dev, "Skipping invalid Cx state package\n");
623 continue;
624 }
625
626 /* Validate the state to see if we should use it. */
627 switch (cx_ptr->type) {
628 case ACPI_STATE_C1:
629 cpu_non_c3 = i;
630 cx_ptr++;
631 sc->cpu_cx_count++;
632 continue;
633 case ACPI_STATE_C2:
634 if (cx_ptr->trans_lat > 100) {
635 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
636 "acpi_cpu%d: C2[%d] not available.\n",
637 device_get_unit(sc->cpu_dev), i));
638 continue;
639 }
640 cpu_non_c3 = i;
641 break;
642 case ACPI_STATE_C3:
643 default:
644 if (cx_ptr->trans_lat > 1000 ||
645 (cpu_quirks & CPU_QUIRK_NO_C3) != 0) {
646
647 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
648 "acpi_cpu%d: C3[%d] not available.\n",
649 device_get_unit(sc->cpu_dev), i));
650 continue;
651 }
652 break;
653 }
654
655 #ifdef notyet
656 /* Free up any previous register. */
657 if (cx_ptr->p_lvlx != NULL) {
658 bus_release_resource(sc->cpu_dev, 0, 0, cx_ptr->p_lvlx);
659 cx_ptr->p_lvlx = NULL;
660 }
661 #endif
662
663 /* Allocate the control register for C2 or C3. */
664 acpi_PkgGas(sc->cpu_dev, pkg, 0, &cx_ptr->res_type, &cpu_rid,
665 &cx_ptr->p_lvlx);
666 if (cx_ptr->p_lvlx) {
667 cpu_rid++;
668 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
669 "acpi_cpu%d: Got C%d - %d latency\n",
670 device_get_unit(sc->cpu_dev), cx_ptr->type,
671 cx_ptr->trans_lat));
672 cx_ptr++;
673 sc->cpu_cx_count++;
674 }
675 }
676 AcpiOsFree(buf.Pointer);
677
678 return (0);
679 }
680
681 /*
682 * Call this *after* all CPUs have been attached.
683 */
684 static void
685 acpi_cpu_startup(void *arg)
686 {
687 struct acpi_cpu_softc *sc;
688 int count, i;
689
690 /* Get set of CPU devices */
691 devclass_get_devices(acpi_cpu_devclass, &cpu_devices, &cpu_ndevices);
692
693 /* Check for quirks via the first CPU device. */
694 sc = device_get_softc(cpu_devices[0]);
695 acpi_cpu_quirks(sc);
696
697 /*
698 * Make sure all the processors' Cx counts match. We should probably
699 * also check the contents of each. However, no known systems have
700 * non-matching Cx counts so we'll deal with this later.
701 */
702 count = MAX_CX_STATES;
703 for (i = 0; i < cpu_ndevices; i++) {
704 sc = device_get_softc(cpu_devices[i]);
705 count = min(sc->cpu_cx_count, count);
706 }
707 cpu_cx_count = count;
708
709 /* Perform Cx final initialization. */
710 sc = device_get_softc(cpu_devices[0]);
711 if (cpu_cx_count > 0)
712 acpi_cpu_startup_cx();
713 }
714
715 static void
716 acpi_cpu_startup_cx()
717 {
718 struct acpi_cpu_softc *sc;
719 struct sbuf sb;
720 int i;
721
722 /*
723 * Set up the list of Cx states, eliminating C3 states by truncating
724 * cpu_cx_count if quirks indicate C3 is not usable.
725 */
726 sc = device_get_softc(cpu_devices[0]);
727 sbuf_new(&sb, cpu_cx_supported, sizeof(cpu_cx_supported), SBUF_FIXEDLEN);
728 for (i = 0; i < cpu_cx_count; i++) {
729 if ((cpu_quirks & CPU_QUIRK_NO_C3) == 0 ||
730 sc->cpu_cx_states[i].type != ACPI_STATE_C3)
731 sbuf_printf(&sb, "C%d/%d ", i + 1, sc->cpu_cx_states[i].trans_lat);
732 else
733 cpu_cx_count = i;
734 }
735 sbuf_trim(&sb);
736 sbuf_finish(&sb);
737 SYSCTL_ADD_STRING(&acpi_cpu_sysctl_ctx,
738 SYSCTL_CHILDREN(acpi_cpu_sysctl_tree),
739 OID_AUTO, "cx_supported", CTLFLAG_RD, cpu_cx_supported,
740 0, "Cx/microsecond values for supported Cx states");
741 SYSCTL_ADD_PROC(&acpi_cpu_sysctl_ctx,
742 SYSCTL_CHILDREN(acpi_cpu_sysctl_tree),
743 OID_AUTO, "cx_lowest", CTLTYPE_STRING | CTLFLAG_RW,
744 NULL, 0, acpi_cpu_cx_lowest_sysctl, "A",
745 "lowest Cx sleep state to use");
746 SYSCTL_ADD_PROC(&acpi_cpu_sysctl_ctx,
747 SYSCTL_CHILDREN(acpi_cpu_sysctl_tree),
748 OID_AUTO, "cx_usage", CTLTYPE_STRING | CTLFLAG_RD,
749 NULL, 0, acpi_cpu_usage_sysctl, "A",
750 "percent usage for each Cx state");
751
752 #ifdef notyet
753 /* Signal platform that we can handle _CST notification. */
754 if (cpu_cst_cnt != 0) {
755 ACPI_LOCK(acpi);
756 AcpiOsWritePort(cpu_smi_cmd, cpu_cst_cnt, 8);
757 ACPI_UNLOCK(acpi);
758 }
759 #endif
760
761 /* Take over idling from cpu_idle_default(). */
762 cpu_idle_hook = acpi_cpu_idle;
763 }
764
765 /*
766 * Idle the CPU in the lowest state possible. This function is called with
767 * interrupts disabled. Note that once it re-enables interrupts, a task
768 * switch can occur so do not access shared data (i.e. the softc) after
769 * interrupts are re-enabled.
770 */
771 static void
772 acpi_cpu_idle()
773 {
774 struct acpi_cpu_softc *sc;
775 struct acpi_cx *cx_next;
776 uint32_t start_time, end_time;
777 int bm_active, cx_next_idx, i;
778
779 /* If disabled, return immediately. */
780 if (cpu_cx_count == 0) {
781 ACPI_ENABLE_IRQS();
782 return;
783 }
784
785 /*
786 * Look up our CPU id to get our softc. If it's NULL, we'll use C1
787 * since there is no ACPI processor object for this CPU. This occurs
788 * for logical CPUs in the HTT case.
789 */
790 sc = cpu_softc[PCPU_GET(cpuid)];
791 if (sc == NULL) {
792 acpi_cpu_c1();
793 return;
794 }
795
796 /*
797 * If we slept 100 us or more, use the lowest Cx state. Otherwise,
798 * find the lowest state that has a latency less than or equal to
799 * the length of our last sleep.
800 */
801 cx_next_idx = cpu_cx_lowest;
802 if (sc->cpu_prev_sleep < 100)
803 for (i = cpu_cx_lowest; i >= 0; i--)
804 if (sc->cpu_cx_states[i].trans_lat <= sc->cpu_prev_sleep) {
805 cx_next_idx = i;
806 break;
807 }
808
809 /*
810 * Check for bus master activity. If there was activity, clear
811 * the bit and use the lowest non-C3 state. Note that the USB
812 * driver polling for new devices keeps this bit set all the
813 * time if USB is loaded.
814 */
815 if ((cpu_quirks & CPU_QUIRK_NO_BM_CTRL) == 0) {
816 AcpiGetRegister(ACPI_BITREG_BUS_MASTER_STATUS, &bm_active,
817 ACPI_MTX_DO_NOT_LOCK);
818 if (bm_active != 0) {
819 AcpiSetRegister(ACPI_BITREG_BUS_MASTER_STATUS, 1,
820 ACPI_MTX_DO_NOT_LOCK);
821 cx_next_idx = min(cx_next_idx, cpu_non_c3);
822 }
823 }
824
825 /* Select the next state and update statistics. */
826 cx_next = &sc->cpu_cx_states[cx_next_idx];
827 cpu_cx_stats[cx_next_idx]++;
828 KASSERT(cx_next->type != ACPI_STATE_C0, ("acpi_cpu_idle: C0 sleep"));
829
830 /*
831 * Execute HLT (or equivalent) and wait for an interrupt. We can't
832 * calculate the time spent in C1 since the place we wake up is an
833 * ISR. Assume we slept one quantum and return.
834 */
835 if (cx_next->type == ACPI_STATE_C1) {
836 sc->cpu_prev_sleep = 1000000 / hz;
837 acpi_cpu_c1();
838 return;
839 }
840
841 /*
842 * For C3, disable bus master arbitration and enable bus master wake
843 * if BM control is available, otherwise flush the CPU cache.
844 */
845 if (cx_next->type == ACPI_STATE_C3) {
846 if ((cpu_quirks & CPU_QUIRK_NO_BM_CTRL) == 0) {
847 AcpiSetRegister(ACPI_BITREG_ARB_DISABLE, 1, ACPI_MTX_DO_NOT_LOCK);
848 AcpiSetRegister(ACPI_BITREG_BUS_MASTER_RLD, 1,
849 ACPI_MTX_DO_NOT_LOCK);
850 } else
851 ACPI_FLUSH_CPU_CACHE();
852 }
853
854 /*
855 * Read from P_LVLx to enter C2(+), checking time spent asleep.
856 * Use the ACPI timer for measuring sleep time. Since we need to
857 * get the time very close to the CPU start/stop clock logic, this
858 * is the only reliable time source.
859 */
860 AcpiHwLowLevelRead(32, &start_time, &AcpiGbl_FADT->XPmTmrBlk);
861 CPU_GET_REG(cx_next->p_lvlx, 1);
862
863 /*
864 * Read the end time twice. Since it may take an arbitrary time
865 * to enter the idle state, the first read may be executed before
866 * the processor has stopped. Doing it again provides enough
867 * margin that we are certain to have a correct value.
868 */
869 AcpiHwLowLevelRead(32, &end_time, &AcpiGbl_FADT->XPmTmrBlk);
870 AcpiHwLowLevelRead(32, &end_time, &AcpiGbl_FADT->XPmTmrBlk);
871
872 /* Enable bus master arbitration and disable bus master wakeup. */
873 if (cx_next->type == ACPI_STATE_C3 &&
874 (cpu_quirks & CPU_QUIRK_NO_BM_CTRL) == 0) {
875 AcpiSetRegister(ACPI_BITREG_ARB_DISABLE, 0, ACPI_MTX_DO_NOT_LOCK);
876 AcpiSetRegister(ACPI_BITREG_BUS_MASTER_RLD, 0, ACPI_MTX_DO_NOT_LOCK);
877 }
878
879 /* Find the actual time asleep in microseconds, minus overhead. */
880 end_time = acpi_TimerDelta(end_time, start_time);
881 sc->cpu_prev_sleep = PM_USEC(end_time) - cx_next->trans_lat;
882 ACPI_ENABLE_IRQS();
883 }
884
885 /*
886 * Re-evaluate the _CST object when we are notified that it changed.
887 *
888 * XXX Re-evaluation disabled until locking is done.
889 */
890 static void
891 acpi_cpu_notify(ACPI_HANDLE h, UINT32 notify, void *context)
892 {
893 struct acpi_cpu_softc *sc = (struct acpi_cpu_softc *)context;
894
895 if (notify != ACPI_NOTIFY_CX_STATES)
896 return;
897
898 device_printf(sc->cpu_dev, "Cx states changed\n");
899 /* acpi_cpu_cx_cst(sc); */
900 }
901
902 static int
903 acpi_cpu_quirks(struct acpi_cpu_softc *sc)
904 {
905 device_t acpi_dev;
906
907 /*
908 * C3 on multiple CPUs requires using the expensive flush cache
909 * instruction.
910 */
911 if (mp_ncpus > 1)
912 cpu_quirks |= CPU_QUIRK_NO_BM_CTRL;
913
914 /* Look for various quirks of the PIIX4 part. */
915 acpi_dev = pci_find_device(PCI_VENDOR_INTEL, PCI_DEVICE_82371AB_3);
916 if (acpi_dev != NULL) {
917 switch (pci_get_revid(acpi_dev)) {
918 /*
919 * Disable C3 support for all PIIX4 chipsets. Some of these parts
920 * do not report the BMIDE status to the BM status register and
921 * others have a livelock bug if Type-F DMA is enabled. Linux
922 * works around the BMIDE bug by reading the BM status directly
923 * but we take the simpler approach of disabling C3 for these
924 * parts.
925 *
926 * See erratum #18 ("C3 Power State/BMIDE and Type-F DMA
927 * Livelock") from the January 2002 PIIX4 specification update.
928 * Applies to all PIIX4 models.
929 */
930 case PCI_REVISION_4E:
931 case PCI_REVISION_4M:
932 cpu_quirks |= CPU_QUIRK_NO_C3;
933 break;
934 default:
935 break;
936 }
937 }
938
939 return (0);
940 }
941
942 static int
943 acpi_cpu_usage_sysctl(SYSCTL_HANDLER_ARGS)
944 {
945 struct sbuf sb;
946 char buf[128];
947 int i;
948 uintmax_t fract, sum, whole;
949
950 sum = 0;
951 for (i = 0; i < cpu_cx_count; i++)
952 sum += cpu_cx_stats[i];
953 sbuf_new(&sb, buf, sizeof(buf), SBUF_FIXEDLEN);
954 for (i = 0; i < cpu_cx_count; i++) {
955 if (sum > 0) {
956 whole = (uintmax_t)cpu_cx_stats[i] * 100;
957 fract = (whole % sum) * 100;
958 sbuf_printf(&sb, "%u.%02u%% ", (u_int)(whole / sum),
959 (u_int)(fract / sum));
960 } else
961 sbuf_printf(&sb, "0%% ");
962 }
963 sbuf_trim(&sb);
964 sbuf_finish(&sb);
965 sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req);
966 sbuf_delete(&sb);
967
968 return (0);
969 }
970
971 static int
972 acpi_cpu_cx_lowest_sysctl(SYSCTL_HANDLER_ARGS)
973 {
974 struct acpi_cpu_softc *sc;
975 char state[8];
976 int val, error, i;
977
978 sc = device_get_softc(cpu_devices[0]);
979 snprintf(state, sizeof(state), "C%d", cpu_cx_lowest + 1);
980 error = sysctl_handle_string(oidp, state, sizeof(state), req);
981 if (error != 0 || req->newptr == NULL)
982 return (error);
983 if (strlen(state) < 2 || toupper(state[0]) != 'C')
984 return (EINVAL);
985 val = (int) strtol(state + 1, NULL, 10) - 1;
986 if (val < 0 || val > cpu_cx_count - 1)
987 return (EINVAL);
988
989 ACPI_SERIAL_BEGIN(cpu);
990 cpu_cx_lowest = val;
991
992 /* If not disabling, cache the new lowest non-C3 state. */
993 cpu_non_c3 = 0;
994 for (i = cpu_cx_lowest; i >= 0; i--) {
995 if (sc->cpu_cx_states[i].type < ACPI_STATE_C3) {
996 cpu_non_c3 = i;
997 break;
998 }
999 }
1000
1001 /* Reset the statistics counters. */
1002 bzero(cpu_cx_stats, sizeof(cpu_cx_stats));
1003 ACPI_SERIAL_END(cpu);
1004
1005 return (0);
1006 }
Cache object: 98557d5f89fe51720e93378a5715a4a3
|