1 /*-
2 * Copyright (c) 2003-2005 Nate Lawson (SDG)
3 * Copyright (c) 2001 Michael Smith
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30
31 #include "opt_acpi.h"
32 #include <sys/param.h>
33 #include <sys/bus.h>
34 #include <sys/cpu.h>
35 #include <sys/kernel.h>
36 #include <sys/malloc.h>
37 #include <sys/module.h>
38 #include <sys/pcpu.h>
39 #include <sys/power.h>
40 #include <sys/proc.h>
41 #include <sys/sched.h>
42 #include <sys/sbuf.h>
43 #include <sys/smp.h>
44
45 #include <dev/pci/pcivar.h>
46 #include <machine/atomic.h>
47 #include <machine/bus.h>
48 #include <sys/rman.h>
49
50 #include <contrib/dev/acpica/include/acpi.h>
51 #include <contrib/dev/acpica/include/accommon.h>
52
53 #include <dev/acpica/acpivar.h>
54
55 /*
56 * Support for ACPI Processor devices, including C[1-3] sleep states.
57 */
58
59 /* Hooks for the ACPI CA debugging infrastructure */
60 #define _COMPONENT ACPI_PROCESSOR
61 ACPI_MODULE_NAME("PROCESSOR")
62
63 struct acpi_cx {
64 struct resource *p_lvlx; /* Register to read to enter state. */
65 uint32_t type; /* C1-3 (C4 and up treated as C3). */
66 uint32_t trans_lat; /* Transition latency (usec). */
67 uint32_t power; /* Power consumed (mW). */
68 int res_type; /* Resource type for p_lvlx. */
69 int res_rid; /* Resource ID for p_lvlx. */
70 };
71 #define MAX_CX_STATES 8
72
73 struct acpi_cpu_softc {
74 device_t cpu_dev;
75 ACPI_HANDLE cpu_handle;
76 struct pcpu *cpu_pcpu;
77 uint32_t cpu_acpi_id; /* ACPI processor id */
78 uint32_t cpu_p_blk; /* ACPI P_BLK location */
79 uint32_t cpu_p_blk_len; /* P_BLK length (must be 6). */
80 struct acpi_cx cpu_cx_states[MAX_CX_STATES];
81 int cpu_cx_count; /* Number of valid Cx states. */
82 int cpu_prev_sleep;/* Last idle sleep duration. */
83 int cpu_features; /* Child driver supported features. */
84 /* Runtime state. */
85 int cpu_non_c3; /* Index of lowest non-C3 state. */
86 u_int cpu_cx_stats[MAX_CX_STATES];/* Cx usage history. */
87 /* Values for sysctl. */
88 struct sysctl_ctx_list cpu_sysctl_ctx;
89 struct sysctl_oid *cpu_sysctl_tree;
90 int cpu_cx_lowest;
91 int cpu_cx_lowest_lim;
92 int cpu_disable_idle; /* Disable entry to idle function */
93 char cpu_cx_supported[64];
94 };
95
96 struct acpi_cpu_device {
97 struct resource_list ad_rl;
98 };
99
100 #define CPU_GET_REG(reg, width) \
101 (bus_space_read_ ## width(rman_get_bustag((reg)), \
102 rman_get_bushandle((reg)), 0))
103 #define CPU_SET_REG(reg, width, val) \
104 (bus_space_write_ ## width(rman_get_bustag((reg)), \
105 rman_get_bushandle((reg)), 0, (val)))
106
107 #define PM_USEC(x) ((x) >> 2) /* ~4 clocks per usec (3.57955 Mhz) */
108
109 #define ACPI_NOTIFY_CX_STATES 0x81 /* _CST changed. */
110
111 #define CPU_QUIRK_NO_C3 (1<<0) /* C3-type states are not usable. */
112 #define CPU_QUIRK_NO_BM_CTRL (1<<2) /* No bus mastering control. */
113
114 #define PCI_VENDOR_INTEL 0x8086
115 #define PCI_DEVICE_82371AB_3 0x7113 /* PIIX4 chipset for quirks. */
116 #define PCI_REVISION_A_STEP 0
117 #define PCI_REVISION_B_STEP 1
118 #define PCI_REVISION_4E 2
119 #define PCI_REVISION_4M 3
120 #define PIIX4_DEVACTB_REG 0x58
121 #define PIIX4_BRLD_EN_IRQ0 (1<<0)
122 #define PIIX4_BRLD_EN_IRQ (1<<1)
123 #define PIIX4_BRLD_EN_IRQ8 (1<<5)
124 #define PIIX4_STOP_BREAK_MASK (PIIX4_BRLD_EN_IRQ0 | PIIX4_BRLD_EN_IRQ | PIIX4_BRLD_EN_IRQ8)
125 #define PIIX4_PCNTRL_BST_EN (1<<10)
126
127 /* Allow users to ignore processor orders in MADT. */
128 static int cpu_unordered;
129 TUNABLE_INT("debug.acpi.cpu_unordered", &cpu_unordered);
130 SYSCTL_INT(_debug_acpi, OID_AUTO, cpu_unordered, CTLFLAG_RDTUN,
131 &cpu_unordered, 0,
132 "Do not use the MADT to match ACPI Processor objects to CPUs.");
133
134 /* Platform hardware resource information. */
135 static uint32_t cpu_smi_cmd; /* Value to write to SMI_CMD. */
136 static uint8_t cpu_cst_cnt; /* Indicate we are _CST aware. */
137 static int cpu_quirks; /* Indicate any hardware bugs. */
138
139 /* Values for sysctl. */
140 static struct sysctl_ctx_list cpu_sysctl_ctx;
141 static struct sysctl_oid *cpu_sysctl_tree;
142 static int cpu_cx_generic;
143 static int cpu_cx_lowest_lim;
144
145 static device_t *cpu_devices;
146 static int cpu_ndevices;
147 static struct acpi_cpu_softc **cpu_softc;
148 ACPI_SERIAL_DECL(cpu, "ACPI CPU");
149
150 static int acpi_cpu_probe(device_t dev);
151 static int acpi_cpu_attach(device_t dev);
152 static int acpi_cpu_suspend(device_t dev);
153 static int acpi_cpu_resume(device_t dev);
154 static int acpi_pcpu_get_id(device_t dev, uint32_t *acpi_id,
155 uint32_t *cpu_id);
156 static struct resource_list *acpi_cpu_get_rlist(device_t dev, device_t child);
157 static device_t acpi_cpu_add_child(device_t dev, u_int order, const char *name,
158 int unit);
159 static int acpi_cpu_read_ivar(device_t dev, device_t child, int index,
160 uintptr_t *result);
161 static int acpi_cpu_shutdown(device_t dev);
162 static void acpi_cpu_cx_probe(struct acpi_cpu_softc *sc);
163 static void acpi_cpu_generic_cx_probe(struct acpi_cpu_softc *sc);
164 static int acpi_cpu_cx_cst(struct acpi_cpu_softc *sc);
165 static void acpi_cpu_startup(void *arg);
166 static void acpi_cpu_startup_cx(struct acpi_cpu_softc *sc);
167 static void acpi_cpu_cx_list(struct acpi_cpu_softc *sc);
168 static void acpi_cpu_idle(void);
169 static void acpi_cpu_notify(ACPI_HANDLE h, UINT32 notify, void *context);
170 static void acpi_cpu_quirks(void);
171 static void acpi_cpu_quirks_piix4(void);
172 static int acpi_cpu_usage_sysctl(SYSCTL_HANDLER_ARGS);
173 static int acpi_cpu_set_cx_lowest(struct acpi_cpu_softc *sc);
174 static int acpi_cpu_cx_lowest_sysctl(SYSCTL_HANDLER_ARGS);
175 static int acpi_cpu_global_cx_lowest_sysctl(SYSCTL_HANDLER_ARGS);
176
177 static device_method_t acpi_cpu_methods[] = {
178 /* Device interface */
179 DEVMETHOD(device_probe, acpi_cpu_probe),
180 DEVMETHOD(device_attach, acpi_cpu_attach),
181 DEVMETHOD(device_detach, bus_generic_detach),
182 DEVMETHOD(device_shutdown, acpi_cpu_shutdown),
183 DEVMETHOD(device_suspend, acpi_cpu_suspend),
184 DEVMETHOD(device_resume, acpi_cpu_resume),
185
186 /* Bus interface */
187 DEVMETHOD(bus_add_child, acpi_cpu_add_child),
188 DEVMETHOD(bus_read_ivar, acpi_cpu_read_ivar),
189 DEVMETHOD(bus_get_resource_list, acpi_cpu_get_rlist),
190 DEVMETHOD(bus_get_resource, bus_generic_rl_get_resource),
191 DEVMETHOD(bus_set_resource, bus_generic_rl_set_resource),
192 DEVMETHOD(bus_alloc_resource, bus_generic_rl_alloc_resource),
193 DEVMETHOD(bus_release_resource, bus_generic_rl_release_resource),
194 DEVMETHOD(bus_activate_resource, bus_generic_activate_resource),
195 DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource),
196 DEVMETHOD(bus_setup_intr, bus_generic_setup_intr),
197 DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr),
198
199 DEVMETHOD_END
200 };
201
202 static driver_t acpi_cpu_driver = {
203 "cpu",
204 acpi_cpu_methods,
205 sizeof(struct acpi_cpu_softc),
206 };
207
208 static devclass_t acpi_cpu_devclass;
209 DRIVER_MODULE(cpu, acpi, acpi_cpu_driver, acpi_cpu_devclass, 0, 0);
210 MODULE_DEPEND(cpu, acpi, 1, 1, 1);
211
212 static int
213 acpi_cpu_probe(device_t dev)
214 {
215 int acpi_id, cpu_id;
216 ACPI_BUFFER buf;
217 ACPI_HANDLE handle;
218 ACPI_OBJECT *obj;
219 ACPI_STATUS status;
220
221 if (acpi_disabled("cpu") || acpi_get_type(dev) != ACPI_TYPE_PROCESSOR)
222 return (ENXIO);
223
224 handle = acpi_get_handle(dev);
225 if (cpu_softc == NULL)
226 cpu_softc = malloc(sizeof(struct acpi_cpu_softc *) *
227 (mp_maxid + 1), M_TEMP /* XXX */, M_WAITOK | M_ZERO);
228
229 /* Get our Processor object. */
230 buf.Pointer = NULL;
231 buf.Length = ACPI_ALLOCATE_BUFFER;
232 status = AcpiEvaluateObject(handle, NULL, NULL, &buf);
233 if (ACPI_FAILURE(status)) {
234 device_printf(dev, "probe failed to get Processor obj - %s\n",
235 AcpiFormatException(status));
236 return (ENXIO);
237 }
238 obj = (ACPI_OBJECT *)buf.Pointer;
239 if (obj->Type != ACPI_TYPE_PROCESSOR) {
240 device_printf(dev, "Processor object has bad type %d\n", obj->Type);
241 AcpiOsFree(obj);
242 return (ENXIO);
243 }
244
245 /*
246 * Find the processor associated with our unit. We could use the
247 * ProcId as a key, however, some boxes do not have the same values
248 * in their Processor object as the ProcId values in the MADT.
249 */
250 acpi_id = obj->Processor.ProcId;
251 AcpiOsFree(obj);
252 if (acpi_pcpu_get_id(dev, &acpi_id, &cpu_id) != 0)
253 return (ENXIO);
254
255 /*
256 * Check if we already probed this processor. We scan the bus twice
257 * so it's possible we've already seen this one.
258 */
259 if (cpu_softc[cpu_id] != NULL)
260 return (ENXIO);
261
262 /* Mark this processor as in-use and save our derived id for attach. */
263 cpu_softc[cpu_id] = (void *)1;
264 acpi_set_private(dev, (void*)(intptr_t)cpu_id);
265 device_set_desc(dev, "ACPI CPU");
266
267 return (0);
268 }
269
270 static int
271 acpi_cpu_attach(device_t dev)
272 {
273 ACPI_BUFFER buf;
274 ACPI_OBJECT arg[4], *obj;
275 ACPI_OBJECT_LIST arglist;
276 struct pcpu *pcpu_data;
277 struct acpi_cpu_softc *sc;
278 struct acpi_softc *acpi_sc;
279 ACPI_STATUS status;
280 u_int features;
281 int cpu_id, drv_count, i;
282 driver_t **drivers;
283 uint32_t cap_set[3];
284
285 /* UUID needed by _OSC evaluation */
286 static uint8_t cpu_oscuuid[16] = { 0x16, 0xA6, 0x77, 0x40, 0x0C, 0x29,
287 0xBE, 0x47, 0x9E, 0xBD, 0xD8, 0x70,
288 0x58, 0x71, 0x39, 0x53 };
289
290 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
291
292 sc = device_get_softc(dev);
293 sc->cpu_dev = dev;
294 sc->cpu_handle = acpi_get_handle(dev);
295 cpu_id = (int)(intptr_t)acpi_get_private(dev);
296 cpu_softc[cpu_id] = sc;
297 pcpu_data = pcpu_find(cpu_id);
298 pcpu_data->pc_device = dev;
299 sc->cpu_pcpu = pcpu_data;
300 cpu_smi_cmd = AcpiGbl_FADT.SmiCommand;
301 cpu_cst_cnt = AcpiGbl_FADT.CstControl;
302
303 buf.Pointer = NULL;
304 buf.Length = ACPI_ALLOCATE_BUFFER;
305 status = AcpiEvaluateObject(sc->cpu_handle, NULL, NULL, &buf);
306 if (ACPI_FAILURE(status)) {
307 device_printf(dev, "attach failed to get Processor obj - %s\n",
308 AcpiFormatException(status));
309 return (ENXIO);
310 }
311 obj = (ACPI_OBJECT *)buf.Pointer;
312 sc->cpu_p_blk = obj->Processor.PblkAddress;
313 sc->cpu_p_blk_len = obj->Processor.PblkLength;
314 sc->cpu_acpi_id = obj->Processor.ProcId;
315 AcpiOsFree(obj);
316 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "acpi_cpu%d: P_BLK at %#x/%d\n",
317 device_get_unit(dev), sc->cpu_p_blk, sc->cpu_p_blk_len));
318
319 /*
320 * If this is the first cpu we attach, create and initialize the generic
321 * resources that will be used by all acpi cpu devices.
322 */
323 if (device_get_unit(dev) == 0) {
324 /* Assume we won't be using generic Cx mode by default */
325 cpu_cx_generic = FALSE;
326
327 /* Install hw.acpi.cpu sysctl tree */
328 acpi_sc = acpi_device_get_parent_softc(dev);
329 sysctl_ctx_init(&cpu_sysctl_ctx);
330 cpu_sysctl_tree = SYSCTL_ADD_NODE(&cpu_sysctl_ctx,
331 SYSCTL_CHILDREN(acpi_sc->acpi_sysctl_tree), OID_AUTO, "cpu",
332 CTLFLAG_RD, 0, "node for CPU children");
333
334 /* Queue post cpu-probing task handler */
335 AcpiOsExecute(OSL_NOTIFY_HANDLER, acpi_cpu_startup, NULL);
336 }
337
338 /*
339 * Before calling any CPU methods, collect child driver feature hints
340 * and notify ACPI of them. We support unified SMP power control
341 * so advertise this ourselves. Note this is not the same as independent
342 * SMP control where each CPU can have different settings.
343 */
344 sc->cpu_features = ACPI_CAP_SMP_SAME | ACPI_CAP_SMP_SAME_C3;
345 if (devclass_get_drivers(acpi_cpu_devclass, &drivers, &drv_count) == 0) {
346 for (i = 0; i < drv_count; i++) {
347 if (ACPI_GET_FEATURES(drivers[i], &features) == 0)
348 sc->cpu_features |= features;
349 }
350 free(drivers, M_TEMP);
351 }
352
353 /*
354 * CPU capabilities are specified in
355 * Intel Processor Vendor-Specific ACPI Interface Specification.
356 */
357 if (sc->cpu_features) {
358 arglist.Pointer = arg;
359 arglist.Count = 4;
360 arg[0].Type = ACPI_TYPE_BUFFER;
361 arg[0].Buffer.Length = sizeof(cpu_oscuuid);
362 arg[0].Buffer.Pointer = cpu_oscuuid; /* UUID */
363 arg[1].Type = ACPI_TYPE_INTEGER;
364 arg[1].Integer.Value = 1; /* revision */
365 arg[2].Type = ACPI_TYPE_INTEGER;
366 arg[2].Integer.Value = 1; /* count */
367 arg[3].Type = ACPI_TYPE_BUFFER;
368 arg[3].Buffer.Length = sizeof(cap_set); /* Capabilities buffer */
369 arg[3].Buffer.Pointer = (uint8_t *)cap_set;
370 cap_set[0] = 0; /* status */
371 cap_set[1] = sc->cpu_features;
372 status = AcpiEvaluateObject(sc->cpu_handle, "_OSC", &arglist, NULL);
373 if (ACPI_SUCCESS(status)) {
374 if (cap_set[0] != 0)
375 device_printf(dev, "_OSC returned status %#x\n", cap_set[0]);
376 }
377 else {
378 arglist.Pointer = arg;
379 arglist.Count = 1;
380 arg[0].Type = ACPI_TYPE_BUFFER;
381 arg[0].Buffer.Length = sizeof(cap_set);
382 arg[0].Buffer.Pointer = (uint8_t *)cap_set;
383 cap_set[0] = 1; /* revision */
384 cap_set[1] = 1; /* number of capabilities integers */
385 cap_set[2] = sc->cpu_features;
386 AcpiEvaluateObject(sc->cpu_handle, "_PDC", &arglist, NULL);
387 }
388 }
389
390 /* Probe for Cx state support. */
391 acpi_cpu_cx_probe(sc);
392
393 return (0);
394 }
395
396 static void
397 acpi_cpu_postattach(void *unused __unused)
398 {
399 device_t *devices;
400 int err;
401 int i, n;
402
403 err = devclass_get_devices(acpi_cpu_devclass, &devices, &n);
404 if (err != 0) {
405 printf("devclass_get_devices(acpi_cpu_devclass) failed\n");
406 return;
407 }
408 for (i = 0; i < n; i++)
409 bus_generic_probe(devices[i]);
410 for (i = 0; i < n; i++)
411 bus_generic_attach(devices[i]);
412 free(devices, M_TEMP);
413 }
414
415 SYSINIT(acpi_cpu, SI_SUB_CONFIGURE, SI_ORDER_MIDDLE,
416 acpi_cpu_postattach, NULL);
417
418 static void
419 disable_idle(struct acpi_cpu_softc *sc)
420 {
421 cpumask_t cpuset;
422
423 cpuset = sc->cpu_pcpu->pc_cpumask;
424 sc->cpu_disable_idle = TRUE;
425
426 /*
427 * Ensure that the CPU is not in idle state or in acpi_cpu_idle().
428 * Note that this code depends on the fact that the rendezvous IPI
429 * can not penetrate context where interrupts are disabled and acpi_cpu_idle
430 * is called and executed in such a context with interrupts being re-enabled
431 * right before return.
432 */
433 smp_rendezvous_cpus(cpuset, smp_no_rendevous_barrier, NULL,
434 smp_no_rendevous_barrier, NULL);
435 }
436
437 static void
438 enable_idle(struct acpi_cpu_softc *sc)
439 {
440
441 sc->cpu_disable_idle = FALSE;
442 }
443
444 static int
445 is_idle_disabled(struct acpi_cpu_softc *sc)
446 {
447
448 return (sc->cpu_disable_idle);
449 }
450
451 /*
452 * Disable any entry to the idle function during suspend and re-enable it
453 * during resume.
454 */
455 static int
456 acpi_cpu_suspend(device_t dev)
457 {
458 int error;
459
460 error = bus_generic_suspend(dev);
461 if (error)
462 return (error);
463 disable_idle(device_get_softc(dev));
464 return (0);
465 }
466
467 static int
468 acpi_cpu_resume(device_t dev)
469 {
470
471 enable_idle(device_get_softc(dev));
472 return (bus_generic_resume(dev));
473 }
474
475 /*
476 * Find the processor associated with a given ACPI ID. By default,
477 * use the MADT to map ACPI IDs to APIC IDs and use that to locate a
478 * processor. Some systems have inconsistent ASL and MADT however.
479 * For these systems the cpu_unordered tunable can be set in which
480 * case we assume that Processor objects are listed in the same order
481 * in both the MADT and ASL.
482 */
483 static int
484 acpi_pcpu_get_id(device_t dev, uint32_t *acpi_id, uint32_t *cpu_id)
485 {
486 struct pcpu *pc;
487 uint32_t i, idx;
488
489 KASSERT(acpi_id != NULL, ("Null acpi_id"));
490 KASSERT(cpu_id != NULL, ("Null cpu_id"));
491 idx = device_get_unit(dev);
492
493 /*
494 * If pc_acpi_id for CPU 0 is not initialized (e.g. a non-APIC
495 * UP box) use the ACPI ID from the first processor we find.
496 */
497 if (idx == 0 && mp_ncpus == 1) {
498 pc = pcpu_find(0);
499 if (pc->pc_acpi_id == 0xffffffff)
500 pc->pc_acpi_id = *acpi_id;
501 *cpu_id = 0;
502 return (0);
503 }
504
505 CPU_FOREACH(i) {
506 pc = pcpu_find(i);
507 KASSERT(pc != NULL, ("no pcpu data for %d", i));
508 if (cpu_unordered) {
509 if (idx-- == 0) {
510 /*
511 * If pc_acpi_id doesn't match the ACPI ID from the
512 * ASL, prefer the MADT-derived value.
513 */
514 if (pc->pc_acpi_id != *acpi_id)
515 *acpi_id = pc->pc_acpi_id;
516 *cpu_id = pc->pc_cpuid;
517 return (0);
518 }
519 } else {
520 if (pc->pc_acpi_id == *acpi_id) {
521 if (bootverbose)
522 device_printf(dev,
523 "Processor %s (ACPI ID %u) -> APIC ID %d\n",
524 acpi_name(acpi_get_handle(dev)), *acpi_id,
525 pc->pc_cpuid);
526 *cpu_id = pc->pc_cpuid;
527 return (0);
528 }
529 }
530 }
531
532 if (bootverbose)
533 printf("ACPI: Processor %s (ACPI ID %u) ignored\n",
534 acpi_name(acpi_get_handle(dev)), *acpi_id);
535
536 return (ESRCH);
537 }
538
539 static struct resource_list *
540 acpi_cpu_get_rlist(device_t dev, device_t child)
541 {
542 struct acpi_cpu_device *ad;
543
544 ad = device_get_ivars(child);
545 if (ad == NULL)
546 return (NULL);
547 return (&ad->ad_rl);
548 }
549
550 static device_t
551 acpi_cpu_add_child(device_t dev, u_int order, const char *name, int unit)
552 {
553 struct acpi_cpu_device *ad;
554 device_t child;
555
556 if ((ad = malloc(sizeof(*ad), M_TEMP, M_NOWAIT | M_ZERO)) == NULL)
557 return (NULL);
558
559 resource_list_init(&ad->ad_rl);
560
561 child = device_add_child_ordered(dev, order, name, unit);
562 if (child != NULL)
563 device_set_ivars(child, ad);
564 else
565 free(ad, M_TEMP);
566 return (child);
567 }
568
569 static int
570 acpi_cpu_read_ivar(device_t dev, device_t child, int index, uintptr_t *result)
571 {
572 struct acpi_cpu_softc *sc;
573
574 sc = device_get_softc(dev);
575 switch (index) {
576 case ACPI_IVAR_HANDLE:
577 *result = (uintptr_t)sc->cpu_handle;
578 break;
579 case CPU_IVAR_PCPU:
580 *result = (uintptr_t)sc->cpu_pcpu;
581 break;
582 default:
583 return (ENOENT);
584 }
585 return (0);
586 }
587
588 static int
589 acpi_cpu_shutdown(device_t dev)
590 {
591 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
592
593 /* Allow children to shutdown first. */
594 bus_generic_shutdown(dev);
595
596 /*
597 * Disable any entry to the idle function.
598 */
599 disable_idle(device_get_softc(dev));
600
601 /*
602 * CPU devices are not truely detached and remain referenced,
603 * so their resources are not freed.
604 */
605
606 return_VALUE (0);
607 }
608
609 static void
610 acpi_cpu_cx_probe(struct acpi_cpu_softc *sc)
611 {
612 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
613
614 /* Use initial sleep value of 1 sec. to start with lowest idle state. */
615 sc->cpu_prev_sleep = 1000000;
616 sc->cpu_cx_lowest = 0;
617 sc->cpu_cx_lowest_lim = 0;
618
619 /*
620 * Check for the ACPI 2.0 _CST sleep states object. If we can't find
621 * any, we'll revert to generic FADT/P_BLK Cx control method which will
622 * be handled by acpi_cpu_startup. We need to defer to after having
623 * probed all the cpus in the system before probing for generic Cx
624 * states as we may already have found cpus with valid _CST packages
625 */
626 if (!cpu_cx_generic && acpi_cpu_cx_cst(sc) != 0) {
627 /*
628 * We were unable to find a _CST package for this cpu or there
629 * was an error parsing it. Switch back to generic mode.
630 */
631 cpu_cx_generic = TRUE;
632 if (bootverbose)
633 device_printf(sc->cpu_dev, "switching to generic Cx mode\n");
634 }
635
636 /*
637 * TODO: _CSD Package should be checked here.
638 */
639 }
640
641 static void
642 acpi_cpu_generic_cx_probe(struct acpi_cpu_softc *sc)
643 {
644 ACPI_GENERIC_ADDRESS gas;
645 struct acpi_cx *cx_ptr;
646
647 sc->cpu_cx_count = 0;
648 cx_ptr = sc->cpu_cx_states;
649
650 /* Use initial sleep value of 1 sec. to start with lowest idle state. */
651 sc->cpu_prev_sleep = 1000000;
652
653 /* C1 has been required since just after ACPI 1.0 */
654 cx_ptr->type = ACPI_STATE_C1;
655 cx_ptr->trans_lat = 0;
656 cx_ptr++;
657 sc->cpu_non_c3 = sc->cpu_cx_count;
658 sc->cpu_cx_count++;
659
660 /*
661 * The spec says P_BLK must be 6 bytes long. However, some systems
662 * use it to indicate a fractional set of features present so we
663 * take 5 as C2. Some may also have a value of 7 to indicate
664 * another C3 but most use _CST for this (as required) and having
665 * "only" C1-C3 is not a hardship.
666 */
667 if (sc->cpu_p_blk_len < 5)
668 return;
669
670 /* Validate and allocate resources for C2 (P_LVL2). */
671 gas.SpaceId = ACPI_ADR_SPACE_SYSTEM_IO;
672 gas.BitWidth = 8;
673 if (AcpiGbl_FADT.C2Latency <= 100) {
674 gas.Address = sc->cpu_p_blk + 4;
675 cx_ptr->res_rid = 0;
676 acpi_bus_alloc_gas(sc->cpu_dev, &cx_ptr->res_type, &cx_ptr->res_rid,
677 &gas, &cx_ptr->p_lvlx, RF_SHAREABLE);
678 if (cx_ptr->p_lvlx != NULL) {
679 cx_ptr->type = ACPI_STATE_C2;
680 cx_ptr->trans_lat = AcpiGbl_FADT.C2Latency;
681 cx_ptr++;
682 sc->cpu_non_c3 = sc->cpu_cx_count;
683 sc->cpu_cx_count++;
684 }
685 }
686 if (sc->cpu_p_blk_len < 6)
687 return;
688
689 /* Validate and allocate resources for C3 (P_LVL3). */
690 if (AcpiGbl_FADT.C3Latency <= 1000 && !(cpu_quirks & CPU_QUIRK_NO_C3)) {
691 gas.Address = sc->cpu_p_blk + 5;
692 cx_ptr->res_rid = 1;
693 acpi_bus_alloc_gas(sc->cpu_dev, &cx_ptr->res_type, &cx_ptr->res_rid,
694 &gas, &cx_ptr->p_lvlx, RF_SHAREABLE);
695 if (cx_ptr->p_lvlx != NULL) {
696 cx_ptr->type = ACPI_STATE_C3;
697 cx_ptr->trans_lat = AcpiGbl_FADT.C3Latency;
698 cx_ptr++;
699 sc->cpu_cx_count++;
700 }
701 }
702 }
703
704 /*
705 * Parse a _CST package and set up its Cx states. Since the _CST object
706 * can change dynamically, our notify handler may call this function
707 * to clean up and probe the new _CST package.
708 */
709 static int
710 acpi_cpu_cx_cst(struct acpi_cpu_softc *sc)
711 {
712 struct acpi_cx *cx_ptr;
713 ACPI_STATUS status;
714 ACPI_BUFFER buf;
715 ACPI_OBJECT *top;
716 ACPI_OBJECT *pkg;
717 uint32_t count;
718 int i;
719
720 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
721
722 buf.Pointer = NULL;
723 buf.Length = ACPI_ALLOCATE_BUFFER;
724 status = AcpiEvaluateObject(sc->cpu_handle, "_CST", NULL, &buf);
725 if (ACPI_FAILURE(status))
726 return (ENXIO);
727
728 /* _CST is a package with a count and at least one Cx package. */
729 top = (ACPI_OBJECT *)buf.Pointer;
730 if (!ACPI_PKG_VALID(top, 2) || acpi_PkgInt32(top, 0, &count) != 0) {
731 device_printf(sc->cpu_dev, "invalid _CST package\n");
732 AcpiOsFree(buf.Pointer);
733 return (ENXIO);
734 }
735 if (count != top->Package.Count - 1) {
736 device_printf(sc->cpu_dev, "invalid _CST state count (%d != %d)\n",
737 count, top->Package.Count - 1);
738 count = top->Package.Count - 1;
739 }
740 if (count > MAX_CX_STATES) {
741 device_printf(sc->cpu_dev, "_CST has too many states (%d)\n", count);
742 count = MAX_CX_STATES;
743 }
744
745 sc->cpu_non_c3 = 0;
746 sc->cpu_cx_count = 0;
747 cx_ptr = sc->cpu_cx_states;
748
749 /*
750 * C1 has been required since just after ACPI 1.0.
751 * Reserve the first slot for it.
752 */
753 cx_ptr->type = ACPI_STATE_C0;
754 cx_ptr++;
755 sc->cpu_cx_count++;
756
757 /* Set up all valid states. */
758 for (i = 0; i < count; i++) {
759 pkg = &top->Package.Elements[i + 1];
760 if (!ACPI_PKG_VALID(pkg, 4) ||
761 acpi_PkgInt32(pkg, 1, &cx_ptr->type) != 0 ||
762 acpi_PkgInt32(pkg, 2, &cx_ptr->trans_lat) != 0 ||
763 acpi_PkgInt32(pkg, 3, &cx_ptr->power) != 0) {
764
765 device_printf(sc->cpu_dev, "skipping invalid Cx state package\n");
766 continue;
767 }
768
769 /* Validate the state to see if we should use it. */
770 switch (cx_ptr->type) {
771 case ACPI_STATE_C1:
772 if (sc->cpu_cx_states[0].type == ACPI_STATE_C0) {
773 /* This is the first C1 state. Use the reserved slot. */
774 sc->cpu_cx_states[0] = *cx_ptr;
775 } else {
776 sc->cpu_non_c3 = sc->cpu_cx_count;
777 cx_ptr++;
778 sc->cpu_cx_count++;
779 }
780 continue;
781 case ACPI_STATE_C2:
782 sc->cpu_non_c3 = sc->cpu_cx_count;
783 break;
784 case ACPI_STATE_C3:
785 default:
786 if ((cpu_quirks & CPU_QUIRK_NO_C3) != 0) {
787 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
788 "acpi_cpu%d: C3[%d] not available.\n",
789 device_get_unit(sc->cpu_dev), i));
790 continue;
791 }
792 break;
793 }
794
795 /* Free up any previous register. */
796 if (cx_ptr->p_lvlx != NULL) {
797 bus_release_resource(sc->cpu_dev, cx_ptr->res_type, cx_ptr->res_rid,
798 cx_ptr->p_lvlx);
799 cx_ptr->p_lvlx = NULL;
800 }
801
802 /* Allocate the control register for C2 or C3. */
803 cx_ptr->res_rid = sc->cpu_cx_count;
804 acpi_PkgGas(sc->cpu_dev, pkg, 0, &cx_ptr->res_type, &cx_ptr->res_rid,
805 &cx_ptr->p_lvlx, RF_SHAREABLE);
806 if (cx_ptr->p_lvlx) {
807 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
808 "acpi_cpu%d: Got C%d - %d latency\n",
809 device_get_unit(sc->cpu_dev), cx_ptr->type,
810 cx_ptr->trans_lat));
811 cx_ptr++;
812 sc->cpu_cx_count++;
813 }
814 }
815 AcpiOsFree(buf.Pointer);
816
817 /* If C1 state was not found, we need one now. */
818 cx_ptr = sc->cpu_cx_states;
819 if (cx_ptr->type == ACPI_STATE_C0) {
820 cx_ptr->type = ACPI_STATE_C1;
821 cx_ptr->trans_lat = 0;
822 }
823
824 return (0);
825 }
826
827 /*
828 * Call this *after* all CPUs have been attached.
829 */
830 static void
831 acpi_cpu_startup(void *arg)
832 {
833 struct acpi_cpu_softc *sc;
834 int i;
835
836 /* Get set of CPU devices */
837 devclass_get_devices(acpi_cpu_devclass, &cpu_devices, &cpu_ndevices);
838
839 /*
840 * Setup any quirks that might necessary now that we have probed
841 * all the CPUs
842 */
843 acpi_cpu_quirks();
844
845 if (cpu_cx_generic) {
846 /*
847 * We are using generic Cx mode, probe for available Cx states
848 * for all processors.
849 */
850 for (i = 0; i < cpu_ndevices; i++) {
851 sc = device_get_softc(cpu_devices[i]);
852 acpi_cpu_generic_cx_probe(sc);
853 }
854 } else {
855 /*
856 * We are using _CST mode, remove C3 state if necessary.
857 * As we now know for sure that we will be using _CST mode
858 * install our notify handler.
859 */
860 for (i = 0; i < cpu_ndevices; i++) {
861 sc = device_get_softc(cpu_devices[i]);
862 if (cpu_quirks & CPU_QUIRK_NO_C3) {
863 sc->cpu_cx_count = sc->cpu_non_c3 + 1;
864 }
865 AcpiInstallNotifyHandler(sc->cpu_handle, ACPI_DEVICE_NOTIFY,
866 acpi_cpu_notify, sc);
867 }
868 }
869
870 /* Perform Cx final initialization. */
871 for (i = 0; i < cpu_ndevices; i++) {
872 sc = device_get_softc(cpu_devices[i]);
873 acpi_cpu_startup_cx(sc);
874 }
875
876 /* Add a sysctl handler to handle global Cx lowest setting */
877 SYSCTL_ADD_PROC(&cpu_sysctl_ctx, SYSCTL_CHILDREN(cpu_sysctl_tree),
878 OID_AUTO, "cx_lowest", CTLTYPE_STRING | CTLFLAG_RW,
879 NULL, 0, acpi_cpu_global_cx_lowest_sysctl, "A",
880 "Global lowest Cx sleep state to use");
881
882 /* Take over idling from cpu_idle_default(). */
883 cpu_cx_lowest_lim = 0;
884 for (i = 0; i < cpu_ndevices; i++) {
885 sc = device_get_softc(cpu_devices[i]);
886 enable_idle(sc);
887 }
888 cpu_idle_hook = acpi_cpu_idle;
889 }
890
891 static void
892 acpi_cpu_cx_list(struct acpi_cpu_softc *sc)
893 {
894 struct sbuf sb;
895 int i;
896
897 /*
898 * Set up the list of Cx states
899 */
900 sc->cpu_non_c3 = 0;
901 sbuf_new(&sb, sc->cpu_cx_supported, sizeof(sc->cpu_cx_supported),
902 SBUF_FIXEDLEN);
903 for (i = 0; i < sc->cpu_cx_count; i++) {
904 sbuf_printf(&sb, "C%d/%d/%d ", i + 1, sc->cpu_cx_states[i].type,
905 sc->cpu_cx_states[i].trans_lat);
906 if (sc->cpu_cx_states[i].type < ACPI_STATE_C3)
907 sc->cpu_non_c3 = i;
908 }
909 sbuf_trim(&sb);
910 sbuf_finish(&sb);
911 }
912
913 static void
914 acpi_cpu_startup_cx(struct acpi_cpu_softc *sc)
915 {
916 acpi_cpu_cx_list(sc);
917
918 SYSCTL_ADD_STRING(&sc->cpu_sysctl_ctx,
919 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->cpu_dev)),
920 OID_AUTO, "cx_supported", CTLFLAG_RD,
921 sc->cpu_cx_supported, 0,
922 "Cx/microsecond values for supported Cx states");
923 SYSCTL_ADD_PROC(&sc->cpu_sysctl_ctx,
924 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->cpu_dev)),
925 OID_AUTO, "cx_lowest", CTLTYPE_STRING | CTLFLAG_RW,
926 (void *)sc, 0, acpi_cpu_cx_lowest_sysctl, "A",
927 "lowest Cx sleep state to use");
928 SYSCTL_ADD_PROC(&sc->cpu_sysctl_ctx,
929 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->cpu_dev)),
930 OID_AUTO, "cx_usage", CTLTYPE_STRING | CTLFLAG_RD,
931 (void *)sc, 0, acpi_cpu_usage_sysctl, "A",
932 "percent usage for each Cx state");
933
934 /* Signal platform that we can handle _CST notification. */
935 if (!cpu_cx_generic && cpu_cst_cnt != 0) {
936 ACPI_LOCK(acpi);
937 AcpiOsWritePort(cpu_smi_cmd, cpu_cst_cnt, 8);
938 ACPI_UNLOCK(acpi);
939 }
940 }
941
942 /*
943 * Idle the CPU in the lowest state possible. This function is called with
944 * interrupts disabled. Note that once it re-enables interrupts, a task
945 * switch can occur so do not access shared data (i.e. the softc) after
946 * interrupts are re-enabled.
947 */
948 static void
949 acpi_cpu_idle()
950 {
951 struct acpi_cpu_softc *sc;
952 struct acpi_cx *cx_next;
953 uint64_t cputicks;
954 uint32_t start_time, end_time;
955 int bm_active, cx_next_idx, i;
956
957 /*
958 * Look up our CPU id to get our softc. If it's NULL, we'll use C1
959 * since there is no ACPI processor object for this CPU. This occurs
960 * for logical CPUs in the HTT case.
961 */
962 sc = cpu_softc[PCPU_GET(cpuid)];
963 if (sc == NULL) {
964 acpi_cpu_c1();
965 return;
966 }
967
968 /* If disabled, take the safe path. */
969 if (is_idle_disabled(sc)) {
970 acpi_cpu_c1();
971 return;
972 }
973
974 /* Find the lowest state that has small enough latency. */
975 cx_next_idx = 0;
976 for (i = sc->cpu_cx_lowest; i >= 0; i--) {
977 if (sc->cpu_cx_states[i].trans_lat * 3 <= sc->cpu_prev_sleep) {
978 cx_next_idx = i;
979 break;
980 }
981 }
982
983 /*
984 * Check for bus master activity. If there was activity, clear
985 * the bit and use the lowest non-C3 state. Note that the USB
986 * driver polling for new devices keeps this bit set all the
987 * time if USB is loaded.
988 */
989 if ((cpu_quirks & CPU_QUIRK_NO_BM_CTRL) == 0 &&
990 cx_next_idx > sc->cpu_non_c3) {
991 AcpiReadBitRegister(ACPI_BITREG_BUS_MASTER_STATUS, &bm_active);
992 if (bm_active != 0) {
993 AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_STATUS, 1);
994 cx_next_idx = sc->cpu_non_c3;
995 }
996 }
997
998 /* Select the next state and update statistics. */
999 cx_next = &sc->cpu_cx_states[cx_next_idx];
1000 sc->cpu_cx_stats[cx_next_idx]++;
1001 KASSERT(cx_next->type != ACPI_STATE_C0, ("acpi_cpu_idle: C0 sleep"));
1002
1003 /*
1004 * Execute HLT (or equivalent) and wait for an interrupt. We can't
1005 * precisely calculate the time spent in C1 since the place we wake up
1006 * is an ISR. Assume we slept no more then half of quantum.
1007 */
1008 if (cx_next->type == ACPI_STATE_C1) {
1009 cputicks = cpu_ticks();
1010 acpi_cpu_c1();
1011 end_time = ((cpu_ticks() - cputicks) << 20) / cpu_tickrate();
1012 if (curthread->td_critnest == 0)
1013 end_time = min(end_time, 500000 / hz);
1014 sc->cpu_prev_sleep = (sc->cpu_prev_sleep * 3 + end_time) / 4;
1015 return;
1016 }
1017
1018 /*
1019 * For C3, disable bus master arbitration and enable bus master wake
1020 * if BM control is available, otherwise flush the CPU cache.
1021 */
1022 if (cx_next->type == ACPI_STATE_C3) {
1023 if ((cpu_quirks & CPU_QUIRK_NO_BM_CTRL) == 0) {
1024 AcpiWriteBitRegister(ACPI_BITREG_ARB_DISABLE, 1);
1025 AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_RLD, 1);
1026 } else
1027 ACPI_FLUSH_CPU_CACHE();
1028 }
1029
1030 /*
1031 * Read from P_LVLx to enter C2(+), checking time spent asleep.
1032 * Use the ACPI timer for measuring sleep time. Since we need to
1033 * get the time very close to the CPU start/stop clock logic, this
1034 * is the only reliable time source.
1035 */
1036 if (cx_next->type == ACPI_STATE_C3) {
1037 AcpiHwRead(&start_time, &AcpiGbl_FADT.XPmTimerBlock);
1038 cputicks = 0;
1039 } else {
1040 start_time = 0;
1041 cputicks = cpu_ticks();
1042 }
1043 CPU_GET_REG(cx_next->p_lvlx, 1);
1044
1045 /*
1046 * Read the end time twice. Since it may take an arbitrary time
1047 * to enter the idle state, the first read may be executed before
1048 * the processor has stopped. Doing it again provides enough
1049 * margin that we are certain to have a correct value.
1050 */
1051 AcpiHwRead(&end_time, &AcpiGbl_FADT.XPmTimerBlock);
1052 if (cx_next->type == ACPI_STATE_C3) {
1053 AcpiHwRead(&end_time, &AcpiGbl_FADT.XPmTimerBlock);
1054 end_time = acpi_TimerDelta(end_time, start_time);
1055 } else
1056 end_time = ((cpu_ticks() - cputicks) << 20) / cpu_tickrate();
1057
1058 /* Enable bus master arbitration and disable bus master wakeup. */
1059 if (cx_next->type == ACPI_STATE_C3 &&
1060 (cpu_quirks & CPU_QUIRK_NO_BM_CTRL) == 0) {
1061 AcpiWriteBitRegister(ACPI_BITREG_ARB_DISABLE, 0);
1062 AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_RLD, 0);
1063 }
1064 ACPI_ENABLE_IRQS();
1065
1066 sc->cpu_prev_sleep = (sc->cpu_prev_sleep * 3 + PM_USEC(end_time)) / 4;
1067 }
1068
1069 /*
1070 * Re-evaluate the _CST object when we are notified that it changed.
1071 */
1072 static void
1073 acpi_cpu_notify(ACPI_HANDLE h, UINT32 notify, void *context)
1074 {
1075 struct acpi_cpu_softc *sc = (struct acpi_cpu_softc *)context;
1076
1077 if (notify != ACPI_NOTIFY_CX_STATES)
1078 return;
1079
1080 /*
1081 * C-state data for target CPU is going to be in flux while we execute
1082 * acpi_cpu_cx_cst, so disable entering acpi_cpu_idle.
1083 * Also, it may happen that multiple ACPI taskqueues may concurrently
1084 * execute notifications for the same CPU. ACPI_SERIAL is used to
1085 * protect against that.
1086 */
1087 ACPI_SERIAL_BEGIN(cpu);
1088 disable_idle(sc);
1089
1090 /* Update the list of Cx states. */
1091 acpi_cpu_cx_cst(sc);
1092 acpi_cpu_cx_list(sc);
1093 acpi_cpu_set_cx_lowest(sc);
1094
1095 enable_idle(sc);
1096 ACPI_SERIAL_END(cpu);
1097
1098 acpi_UserNotify("PROCESSOR", sc->cpu_handle, notify);
1099 }
1100
1101 static void
1102 acpi_cpu_quirks(void)
1103 {
1104 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
1105
1106 /*
1107 * Bus mastering arbitration control is needed to keep caches coherent
1108 * while sleeping in C3. If it's not present but a working flush cache
1109 * instruction is present, flush the caches before entering C3 instead.
1110 * Otherwise, just disable C3 completely.
1111 */
1112 if (AcpiGbl_FADT.Pm2ControlBlock == 0 ||
1113 AcpiGbl_FADT.Pm2ControlLength == 0) {
1114 if ((AcpiGbl_FADT.Flags & ACPI_FADT_WBINVD) &&
1115 (AcpiGbl_FADT.Flags & ACPI_FADT_WBINVD_FLUSH) == 0) {
1116 cpu_quirks |= CPU_QUIRK_NO_BM_CTRL;
1117 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1118 "acpi_cpu: no BM control, using flush cache method\n"));
1119 } else {
1120 cpu_quirks |= CPU_QUIRK_NO_C3;
1121 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1122 "acpi_cpu: no BM control, C3 not available\n"));
1123 }
1124 }
1125
1126 /*
1127 * If we are using generic Cx mode, C3 on multiple CPUs requires using
1128 * the expensive flush cache instruction.
1129 */
1130 if (cpu_cx_generic && mp_ncpus > 1) {
1131 cpu_quirks |= CPU_QUIRK_NO_BM_CTRL;
1132 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1133 "acpi_cpu: SMP, using flush cache mode for C3\n"));
1134 }
1135
1136 /* Look for various quirks of the PIIX4 part. */
1137 acpi_cpu_quirks_piix4();
1138 }
1139
1140 static void
1141 acpi_cpu_quirks_piix4(void)
1142 {
1143 #ifdef __i386__
1144 device_t acpi_dev;
1145 uint32_t val;
1146
1147 acpi_dev = pci_find_device(PCI_VENDOR_INTEL, PCI_DEVICE_82371AB_3);
1148 if (acpi_dev != NULL) {
1149 switch (pci_get_revid(acpi_dev)) {
1150 /*
1151 * Disable C3 support for all PIIX4 chipsets. Some of these parts
1152 * do not report the BMIDE status to the BM status register and
1153 * others have a livelock bug if Type-F DMA is enabled. Linux
1154 * works around the BMIDE bug by reading the BM status directly
1155 * but we take the simpler approach of disabling C3 for these
1156 * parts.
1157 *
1158 * See erratum #18 ("C3 Power State/BMIDE and Type-F DMA
1159 * Livelock") from the January 2002 PIIX4 specification update.
1160 * Applies to all PIIX4 models.
1161 *
1162 * Also, make sure that all interrupts cause a "Stop Break"
1163 * event to exit from C2 state.
1164 * Also, BRLD_EN_BM (ACPI_BITREG_BUS_MASTER_RLD in ACPI-speak)
1165 * should be set to zero, otherwise it causes C2 to short-sleep.
1166 * PIIX4 doesn't properly support C3 and bus master activity
1167 * need not break out of C2.
1168 */
1169 case PCI_REVISION_A_STEP:
1170 case PCI_REVISION_B_STEP:
1171 case PCI_REVISION_4E:
1172 case PCI_REVISION_4M:
1173 cpu_quirks |= CPU_QUIRK_NO_C3;
1174 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1175 "acpi_cpu: working around PIIX4 bug, disabling C3\n"));
1176
1177 val = pci_read_config(acpi_dev, PIIX4_DEVACTB_REG, 4);
1178 if ((val & PIIX4_STOP_BREAK_MASK) != PIIX4_STOP_BREAK_MASK) {
1179 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1180 "acpi_cpu: PIIX4: enabling IRQs to generate Stop Break\n"));
1181 val |= PIIX4_STOP_BREAK_MASK;
1182 pci_write_config(acpi_dev, PIIX4_DEVACTB_REG, val, 4);
1183 }
1184 AcpiReadBitRegister(ACPI_BITREG_BUS_MASTER_RLD, &val);
1185 if (val) {
1186 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1187 "acpi_cpu: PIIX4: reset BRLD_EN_BM\n"));
1188 AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_RLD, 0);
1189 }
1190 break;
1191 default:
1192 break;
1193 }
1194 }
1195 #endif
1196 }
1197
1198 static int
1199 acpi_cpu_usage_sysctl(SYSCTL_HANDLER_ARGS)
1200 {
1201 struct acpi_cpu_softc *sc;
1202 struct sbuf sb;
1203 char buf[128];
1204 int i;
1205 uintmax_t fract, sum, whole;
1206
1207 sc = (struct acpi_cpu_softc *) arg1;
1208 sum = 0;
1209 for (i = 0; i < sc->cpu_cx_count; i++)
1210 sum += sc->cpu_cx_stats[i];
1211 sbuf_new(&sb, buf, sizeof(buf), SBUF_FIXEDLEN);
1212 for (i = 0; i < sc->cpu_cx_count; i++) {
1213 if (sum > 0) {
1214 whole = (uintmax_t)sc->cpu_cx_stats[i] * 100;
1215 fract = (whole % sum) * 100;
1216 sbuf_printf(&sb, "%u.%02u%% ", (u_int)(whole / sum),
1217 (u_int)(fract / sum));
1218 } else
1219 sbuf_printf(&sb, "0.00%% ");
1220 }
1221 sbuf_printf(&sb, "last %dus", sc->cpu_prev_sleep);
1222 sbuf_trim(&sb);
1223 sbuf_finish(&sb);
1224 sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req);
1225 sbuf_delete(&sb);
1226
1227 return (0);
1228 }
1229
1230 static int
1231 acpi_cpu_set_cx_lowest(struct acpi_cpu_softc *sc)
1232 {
1233 int i;
1234
1235 ACPI_SERIAL_ASSERT(cpu);
1236 sc->cpu_cx_lowest = min(sc->cpu_cx_lowest_lim, sc->cpu_cx_count - 1);
1237
1238 /* If not disabling, cache the new lowest non-C3 state. */
1239 sc->cpu_non_c3 = 0;
1240 for (i = sc->cpu_cx_lowest; i >= 0; i--) {
1241 if (sc->cpu_cx_states[i].type < ACPI_STATE_C3) {
1242 sc->cpu_non_c3 = i;
1243 break;
1244 }
1245 }
1246
1247 /* Reset the statistics counters. */
1248 bzero(sc->cpu_cx_stats, sizeof(sc->cpu_cx_stats));
1249 return (0);
1250 }
1251
1252 static int
1253 acpi_cpu_cx_lowest_sysctl(SYSCTL_HANDLER_ARGS)
1254 {
1255 struct acpi_cpu_softc *sc;
1256 char state[8];
1257 int val, error;
1258
1259 sc = (struct acpi_cpu_softc *) arg1;
1260 snprintf(state, sizeof(state), "C%d", sc->cpu_cx_lowest_lim + 1);
1261 error = sysctl_handle_string(oidp, state, sizeof(state), req);
1262 if (error != 0 || req->newptr == NULL)
1263 return (error);
1264 if (strlen(state) < 2 || toupper(state[0]) != 'C')
1265 return (EINVAL);
1266 if (strcasecmp(state, "Cmax") == 0)
1267 val = MAX_CX_STATES;
1268 else {
1269 val = (int) strtol(state + 1, NULL, 10);
1270 if (val < 1 || val > MAX_CX_STATES)
1271 return (EINVAL);
1272 }
1273
1274 ACPI_SERIAL_BEGIN(cpu);
1275 sc->cpu_cx_lowest_lim = val - 1;
1276 acpi_cpu_set_cx_lowest(sc);
1277 ACPI_SERIAL_END(cpu);
1278
1279 return (0);
1280 }
1281
1282 static int
1283 acpi_cpu_global_cx_lowest_sysctl(SYSCTL_HANDLER_ARGS)
1284 {
1285 struct acpi_cpu_softc *sc;
1286 char state[8];
1287 int val, error, i;
1288
1289 snprintf(state, sizeof(state), "C%d", cpu_cx_lowest_lim + 1);
1290 error = sysctl_handle_string(oidp, state, sizeof(state), req);
1291 if (error != 0 || req->newptr == NULL)
1292 return (error);
1293 if (strlen(state) < 2 || toupper(state[0]) != 'C')
1294 return (EINVAL);
1295 if (strcasecmp(state, "Cmax") == 0)
1296 val = MAX_CX_STATES;
1297 else {
1298 val = (int) strtol(state + 1, NULL, 10);
1299 if (val < 1 || val > MAX_CX_STATES)
1300 return (EINVAL);
1301 }
1302
1303 /* Update the new lowest useable Cx state for all CPUs. */
1304 ACPI_SERIAL_BEGIN(cpu);
1305 cpu_cx_lowest_lim = val - 1;
1306 for (i = 0; i < cpu_ndevices; i++) {
1307 sc = device_get_softc(cpu_devices[i]);
1308 sc->cpu_cx_lowest_lim = cpu_cx_lowest_lim;
1309 acpi_cpu_set_cx_lowest(sc);
1310 }
1311 ACPI_SERIAL_END(cpu);
1312
1313 return (0);
1314 }
Cache object: 7f28c10c30569cb16583abbb09c95be2
|