1 /*-
2 * Copyright (c) 2003-2005 Nate Lawson (SDG)
3 * Copyright (c) 2001 Michael Smith
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD: releng/9.2/sys/dev/acpica/acpi_cpu.c 244618 2012-12-23 12:09:41Z avg $");
30
31 #include "opt_acpi.h"
32 #include <sys/param.h>
33 #include <sys/bus.h>
34 #include <sys/cpu.h>
35 #include <sys/kernel.h>
36 #include <sys/malloc.h>
37 #include <sys/module.h>
38 #include <sys/pcpu.h>
39 #include <sys/power.h>
40 #include <sys/proc.h>
41 #include <sys/sched.h>
42 #include <sys/sbuf.h>
43 #include <sys/smp.h>
44
45 #include <dev/pci/pcivar.h>
46 #include <machine/atomic.h>
47 #include <machine/bus.h>
48 #if defined(__amd64__) || defined(__i386__)
49 #include <machine/clock.h>
50 #endif
51 #include <sys/rman.h>
52
53 #include <contrib/dev/acpica/include/acpi.h>
54 #include <contrib/dev/acpica/include/accommon.h>
55
56 #include <dev/acpica/acpivar.h>
57
58 /*
59 * Support for ACPI Processor devices, including C[1-3] sleep states.
60 */
61
62 /* Hooks for the ACPI CA debugging infrastructure */
63 #define _COMPONENT ACPI_PROCESSOR
64 ACPI_MODULE_NAME("PROCESSOR")
65
66 struct acpi_cx {
67 struct resource *p_lvlx; /* Register to read to enter state. */
68 uint32_t type; /* C1-3 (C4 and up treated as C3). */
69 uint32_t trans_lat; /* Transition latency (usec). */
70 uint32_t power; /* Power consumed (mW). */
71 int res_type; /* Resource type for p_lvlx. */
72 int res_rid; /* Resource ID for p_lvlx. */
73 };
74 #define MAX_CX_STATES 8
75
76 struct acpi_cpu_softc {
77 device_t cpu_dev;
78 ACPI_HANDLE cpu_handle;
79 struct pcpu *cpu_pcpu;
80 uint32_t cpu_acpi_id; /* ACPI processor id */
81 uint32_t cpu_p_blk; /* ACPI P_BLK location */
82 uint32_t cpu_p_blk_len; /* P_BLK length (must be 6). */
83 struct acpi_cx cpu_cx_states[MAX_CX_STATES];
84 int cpu_cx_count; /* Number of valid Cx states. */
85 int cpu_prev_sleep;/* Last idle sleep duration. */
86 int cpu_features; /* Child driver supported features. */
87 /* Runtime state. */
88 int cpu_non_c3; /* Index of lowest non-C3 state. */
89 u_int cpu_cx_stats[MAX_CX_STATES];/* Cx usage history. */
90 /* Values for sysctl. */
91 struct sysctl_ctx_list cpu_sysctl_ctx;
92 struct sysctl_oid *cpu_sysctl_tree;
93 int cpu_cx_lowest;
94 int cpu_cx_lowest_lim;
95 int cpu_disable_idle; /* Disable entry to idle function */
96 char cpu_cx_supported[64];
97 };
98
99 struct acpi_cpu_device {
100 struct resource_list ad_rl;
101 };
102
103 #define CPU_GET_REG(reg, width) \
104 (bus_space_read_ ## width(rman_get_bustag((reg)), \
105 rman_get_bushandle((reg)), 0))
106 #define CPU_SET_REG(reg, width, val) \
107 (bus_space_write_ ## width(rman_get_bustag((reg)), \
108 rman_get_bushandle((reg)), 0, (val)))
109
110 #define PM_USEC(x) ((x) >> 2) /* ~4 clocks per usec (3.57955 Mhz) */
111
112 #define ACPI_NOTIFY_CX_STATES 0x81 /* _CST changed. */
113
114 #define CPU_QUIRK_NO_C3 (1<<0) /* C3-type states are not usable. */
115 #define CPU_QUIRK_NO_BM_CTRL (1<<2) /* No bus mastering control. */
116
117 #define PCI_VENDOR_INTEL 0x8086
118 #define PCI_DEVICE_82371AB_3 0x7113 /* PIIX4 chipset for quirks. */
119 #define PCI_REVISION_A_STEP 0
120 #define PCI_REVISION_B_STEP 1
121 #define PCI_REVISION_4E 2
122 #define PCI_REVISION_4M 3
123 #define PIIX4_DEVACTB_REG 0x58
124 #define PIIX4_BRLD_EN_IRQ0 (1<<0)
125 #define PIIX4_BRLD_EN_IRQ (1<<1)
126 #define PIIX4_BRLD_EN_IRQ8 (1<<5)
127 #define PIIX4_STOP_BREAK_MASK (PIIX4_BRLD_EN_IRQ0 | PIIX4_BRLD_EN_IRQ | PIIX4_BRLD_EN_IRQ8)
128 #define PIIX4_PCNTRL_BST_EN (1<<10)
129
130 /* Allow users to ignore processor orders in MADT. */
131 static int cpu_unordered;
132 TUNABLE_INT("debug.acpi.cpu_unordered", &cpu_unordered);
133 SYSCTL_INT(_debug_acpi, OID_AUTO, cpu_unordered, CTLFLAG_RDTUN,
134 &cpu_unordered, 0,
135 "Do not use the MADT to match ACPI Processor objects to CPUs.");
136
137 /* Platform hardware resource information. */
138 static uint32_t cpu_smi_cmd; /* Value to write to SMI_CMD. */
139 static uint8_t cpu_cst_cnt; /* Indicate we are _CST aware. */
140 static int cpu_quirks; /* Indicate any hardware bugs. */
141
142 /* Values for sysctl. */
143 static struct sysctl_ctx_list cpu_sysctl_ctx;
144 static struct sysctl_oid *cpu_sysctl_tree;
145 static int cpu_cx_generic;
146 static int cpu_cx_lowest_lim;
147
148 static device_t *cpu_devices;
149 static int cpu_ndevices;
150 static struct acpi_cpu_softc **cpu_softc;
151 ACPI_SERIAL_DECL(cpu, "ACPI CPU");
152
153 static int acpi_cpu_probe(device_t dev);
154 static int acpi_cpu_attach(device_t dev);
155 static int acpi_cpu_suspend(device_t dev);
156 static int acpi_cpu_resume(device_t dev);
157 static int acpi_pcpu_get_id(device_t dev, uint32_t *acpi_id,
158 uint32_t *cpu_id);
159 static struct resource_list *acpi_cpu_get_rlist(device_t dev, device_t child);
160 static device_t acpi_cpu_add_child(device_t dev, u_int order, const char *name,
161 int unit);
162 static int acpi_cpu_read_ivar(device_t dev, device_t child, int index,
163 uintptr_t *result);
164 static int acpi_cpu_shutdown(device_t dev);
165 static void acpi_cpu_cx_probe(struct acpi_cpu_softc *sc);
166 static void acpi_cpu_generic_cx_probe(struct acpi_cpu_softc *sc);
167 static int acpi_cpu_cx_cst(struct acpi_cpu_softc *sc);
168 static void acpi_cpu_startup(void *arg);
169 static void acpi_cpu_startup_cx(struct acpi_cpu_softc *sc);
170 static void acpi_cpu_cx_list(struct acpi_cpu_softc *sc);
171 static void acpi_cpu_idle(void);
172 static void acpi_cpu_notify(ACPI_HANDLE h, UINT32 notify, void *context);
173 static int acpi_cpu_quirks(void);
174 static int acpi_cpu_usage_sysctl(SYSCTL_HANDLER_ARGS);
175 static int acpi_cpu_set_cx_lowest(struct acpi_cpu_softc *sc);
176 static int acpi_cpu_cx_lowest_sysctl(SYSCTL_HANDLER_ARGS);
177 static int acpi_cpu_global_cx_lowest_sysctl(SYSCTL_HANDLER_ARGS);
178
179 static device_method_t acpi_cpu_methods[] = {
180 /* Device interface */
181 DEVMETHOD(device_probe, acpi_cpu_probe),
182 DEVMETHOD(device_attach, acpi_cpu_attach),
183 DEVMETHOD(device_detach, bus_generic_detach),
184 DEVMETHOD(device_shutdown, acpi_cpu_shutdown),
185 DEVMETHOD(device_suspend, acpi_cpu_suspend),
186 DEVMETHOD(device_resume, acpi_cpu_resume),
187
188 /* Bus interface */
189 DEVMETHOD(bus_add_child, acpi_cpu_add_child),
190 DEVMETHOD(bus_read_ivar, acpi_cpu_read_ivar),
191 DEVMETHOD(bus_get_resource_list, acpi_cpu_get_rlist),
192 DEVMETHOD(bus_get_resource, bus_generic_rl_get_resource),
193 DEVMETHOD(bus_set_resource, bus_generic_rl_set_resource),
194 DEVMETHOD(bus_alloc_resource, bus_generic_rl_alloc_resource),
195 DEVMETHOD(bus_release_resource, bus_generic_rl_release_resource),
196 DEVMETHOD(bus_activate_resource, bus_generic_activate_resource),
197 DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource),
198 DEVMETHOD(bus_setup_intr, bus_generic_setup_intr),
199 DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr),
200
201 DEVMETHOD_END
202 };
203
204 static driver_t acpi_cpu_driver = {
205 "cpu",
206 acpi_cpu_methods,
207 sizeof(struct acpi_cpu_softc),
208 };
209
210 static devclass_t acpi_cpu_devclass;
211 DRIVER_MODULE(cpu, acpi, acpi_cpu_driver, acpi_cpu_devclass, 0, 0);
212 MODULE_DEPEND(cpu, acpi, 1, 1, 1);
213
214 static int
215 acpi_cpu_probe(device_t dev)
216 {
217 int acpi_id, cpu_id;
218 ACPI_BUFFER buf;
219 ACPI_HANDLE handle;
220 ACPI_OBJECT *obj;
221 ACPI_STATUS status;
222
223 if (acpi_disabled("cpu") || acpi_get_type(dev) != ACPI_TYPE_PROCESSOR)
224 return (ENXIO);
225
226 handle = acpi_get_handle(dev);
227 if (cpu_softc == NULL)
228 cpu_softc = malloc(sizeof(struct acpi_cpu_softc *) *
229 (mp_maxid + 1), M_TEMP /* XXX */, M_WAITOK | M_ZERO);
230
231 /* Get our Processor object. */
232 buf.Pointer = NULL;
233 buf.Length = ACPI_ALLOCATE_BUFFER;
234 status = AcpiEvaluateObject(handle, NULL, NULL, &buf);
235 if (ACPI_FAILURE(status)) {
236 device_printf(dev, "probe failed to get Processor obj - %s\n",
237 AcpiFormatException(status));
238 return (ENXIO);
239 }
240 obj = (ACPI_OBJECT *)buf.Pointer;
241 if (obj->Type != ACPI_TYPE_PROCESSOR) {
242 device_printf(dev, "Processor object has bad type %d\n", obj->Type);
243 AcpiOsFree(obj);
244 return (ENXIO);
245 }
246
247 /*
248 * Find the processor associated with our unit. We could use the
249 * ProcId as a key, however, some boxes do not have the same values
250 * in their Processor object as the ProcId values in the MADT.
251 */
252 acpi_id = obj->Processor.ProcId;
253 AcpiOsFree(obj);
254 if (acpi_pcpu_get_id(dev, &acpi_id, &cpu_id) != 0)
255 return (ENXIO);
256
257 /*
258 * Check if we already probed this processor. We scan the bus twice
259 * so it's possible we've already seen this one.
260 */
261 if (cpu_softc[cpu_id] != NULL)
262 return (ENXIO);
263
264 /* Mark this processor as in-use and save our derived id for attach. */
265 cpu_softc[cpu_id] = (void *)1;
266 acpi_set_private(dev, (void*)(intptr_t)cpu_id);
267 device_set_desc(dev, "ACPI CPU");
268
269 return (0);
270 }
271
272 static int
273 acpi_cpu_attach(device_t dev)
274 {
275 ACPI_BUFFER buf;
276 ACPI_OBJECT arg[4], *obj;
277 ACPI_OBJECT_LIST arglist;
278 struct pcpu *pcpu_data;
279 struct acpi_cpu_softc *sc;
280 struct acpi_softc *acpi_sc;
281 ACPI_STATUS status;
282 u_int features;
283 int cpu_id, drv_count, i;
284 driver_t **drivers;
285 uint32_t cap_set[3];
286
287 /* UUID needed by _OSC evaluation */
288 static uint8_t cpu_oscuuid[16] = { 0x16, 0xA6, 0x77, 0x40, 0x0C, 0x29,
289 0xBE, 0x47, 0x9E, 0xBD, 0xD8, 0x70,
290 0x58, 0x71, 0x39, 0x53 };
291
292 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
293
294 sc = device_get_softc(dev);
295 sc->cpu_dev = dev;
296 sc->cpu_handle = acpi_get_handle(dev);
297 cpu_id = (int)(intptr_t)acpi_get_private(dev);
298 cpu_softc[cpu_id] = sc;
299 pcpu_data = pcpu_find(cpu_id);
300 pcpu_data->pc_device = dev;
301 sc->cpu_pcpu = pcpu_data;
302 cpu_smi_cmd = AcpiGbl_FADT.SmiCommand;
303 cpu_cst_cnt = AcpiGbl_FADT.CstControl;
304
305 buf.Pointer = NULL;
306 buf.Length = ACPI_ALLOCATE_BUFFER;
307 status = AcpiEvaluateObject(sc->cpu_handle, NULL, NULL, &buf);
308 if (ACPI_FAILURE(status)) {
309 device_printf(dev, "attach failed to get Processor obj - %s\n",
310 AcpiFormatException(status));
311 return (ENXIO);
312 }
313 obj = (ACPI_OBJECT *)buf.Pointer;
314 sc->cpu_p_blk = obj->Processor.PblkAddress;
315 sc->cpu_p_blk_len = obj->Processor.PblkLength;
316 sc->cpu_acpi_id = obj->Processor.ProcId;
317 AcpiOsFree(obj);
318 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "acpi_cpu%d: P_BLK at %#x/%d\n",
319 device_get_unit(dev), sc->cpu_p_blk, sc->cpu_p_blk_len));
320
321 /*
322 * If this is the first cpu we attach, create and initialize the generic
323 * resources that will be used by all acpi cpu devices.
324 */
325 if (device_get_unit(dev) == 0) {
326 /* Assume we won't be using generic Cx mode by default */
327 cpu_cx_generic = FALSE;
328
329 /* Install hw.acpi.cpu sysctl tree */
330 acpi_sc = acpi_device_get_parent_softc(dev);
331 sysctl_ctx_init(&cpu_sysctl_ctx);
332 cpu_sysctl_tree = SYSCTL_ADD_NODE(&cpu_sysctl_ctx,
333 SYSCTL_CHILDREN(acpi_sc->acpi_sysctl_tree), OID_AUTO, "cpu",
334 CTLFLAG_RD, 0, "node for CPU children");
335
336 /* Queue post cpu-probing task handler */
337 AcpiOsExecute(OSL_NOTIFY_HANDLER, acpi_cpu_startup, NULL);
338 }
339
340 /*
341 * Before calling any CPU methods, collect child driver feature hints
342 * and notify ACPI of them. We support unified SMP power control
343 * so advertise this ourselves. Note this is not the same as independent
344 * SMP control where each CPU can have different settings.
345 */
346 sc->cpu_features = ACPI_CAP_SMP_SAME | ACPI_CAP_SMP_SAME_C3;
347 if (devclass_get_drivers(acpi_cpu_devclass, &drivers, &drv_count) == 0) {
348 for (i = 0; i < drv_count; i++) {
349 if (ACPI_GET_FEATURES(drivers[i], &features) == 0)
350 sc->cpu_features |= features;
351 }
352 free(drivers, M_TEMP);
353 }
354
355 /*
356 * CPU capabilities are specified in
357 * Intel Processor Vendor-Specific ACPI Interface Specification.
358 */
359 if (sc->cpu_features) {
360 arglist.Pointer = arg;
361 arglist.Count = 4;
362 arg[0].Type = ACPI_TYPE_BUFFER;
363 arg[0].Buffer.Length = sizeof(cpu_oscuuid);
364 arg[0].Buffer.Pointer = cpu_oscuuid; /* UUID */
365 arg[1].Type = ACPI_TYPE_INTEGER;
366 arg[1].Integer.Value = 1; /* revision */
367 arg[2].Type = ACPI_TYPE_INTEGER;
368 arg[2].Integer.Value = 1; /* count */
369 arg[3].Type = ACPI_TYPE_BUFFER;
370 arg[3].Buffer.Length = sizeof(cap_set); /* Capabilities buffer */
371 arg[3].Buffer.Pointer = (uint8_t *)cap_set;
372 cap_set[0] = 0; /* status */
373 cap_set[1] = sc->cpu_features;
374 status = AcpiEvaluateObject(sc->cpu_handle, "_OSC", &arglist, NULL);
375 if (ACPI_SUCCESS(status)) {
376 if (cap_set[0] != 0)
377 device_printf(dev, "_OSC returned status %#x\n", cap_set[0]);
378 }
379 else {
380 arglist.Pointer = arg;
381 arglist.Count = 1;
382 arg[0].Type = ACPI_TYPE_BUFFER;
383 arg[0].Buffer.Length = sizeof(cap_set);
384 arg[0].Buffer.Pointer = (uint8_t *)cap_set;
385 cap_set[0] = 1; /* revision */
386 cap_set[1] = 1; /* number of capabilities integers */
387 cap_set[2] = sc->cpu_features;
388 AcpiEvaluateObject(sc->cpu_handle, "_PDC", &arglist, NULL);
389 }
390 }
391
392 /* Probe for Cx state support. */
393 acpi_cpu_cx_probe(sc);
394
395 return (0);
396 }
397
398 static void
399 acpi_cpu_postattach(void *unused __unused)
400 {
401 device_t *devices;
402 int err;
403 int i, n;
404
405 err = devclass_get_devices(acpi_cpu_devclass, &devices, &n);
406 if (err != 0) {
407 printf("devclass_get_devices(acpi_cpu_devclass) failed\n");
408 return;
409 }
410 for (i = 0; i < n; i++)
411 bus_generic_probe(devices[i]);
412 for (i = 0; i < n; i++)
413 bus_generic_attach(devices[i]);
414 free(devices, M_TEMP);
415 }
416
417 SYSINIT(acpi_cpu, SI_SUB_CONFIGURE, SI_ORDER_MIDDLE,
418 acpi_cpu_postattach, NULL);
419
420 static void
421 disable_idle(struct acpi_cpu_softc *sc)
422 {
423 cpuset_t cpuset;
424
425 CPU_SETOF(sc->cpu_pcpu->pc_cpuid, &cpuset);
426 sc->cpu_disable_idle = TRUE;
427
428 /*
429 * Ensure that the CPU is not in idle state or in acpi_cpu_idle().
430 * Note that this code depends on the fact that the rendezvous IPI
431 * can not penetrate context where interrupts are disabled and acpi_cpu_idle
432 * is called and executed in such a context with interrupts being re-enabled
433 * right before return.
434 */
435 smp_rendezvous_cpus(cpuset, smp_no_rendevous_barrier, NULL,
436 smp_no_rendevous_barrier, NULL);
437 }
438
439 static void
440 enable_idle(struct acpi_cpu_softc *sc)
441 {
442
443 sc->cpu_disable_idle = FALSE;
444 }
445
446 static int
447 is_idle_disabled(struct acpi_cpu_softc *sc)
448 {
449
450 return (sc->cpu_disable_idle);
451 }
452
453 /*
454 * Disable any entry to the idle function during suspend and re-enable it
455 * during resume.
456 */
457 static int
458 acpi_cpu_suspend(device_t dev)
459 {
460 int error;
461
462 error = bus_generic_suspend(dev);
463 if (error)
464 return (error);
465 disable_idle(device_get_softc(dev));
466 return (0);
467 }
468
469 static int
470 acpi_cpu_resume(device_t dev)
471 {
472
473 enable_idle(device_get_softc(dev));
474 return (bus_generic_resume(dev));
475 }
476
477 /*
478 * Find the processor associated with a given ACPI ID. By default,
479 * use the MADT to map ACPI IDs to APIC IDs and use that to locate a
480 * processor. Some systems have inconsistent ASL and MADT however.
481 * For these systems the cpu_unordered tunable can be set in which
482 * case we assume that Processor objects are listed in the same order
483 * in both the MADT and ASL.
484 */
485 static int
486 acpi_pcpu_get_id(device_t dev, uint32_t *acpi_id, uint32_t *cpu_id)
487 {
488 struct pcpu *pc;
489 uint32_t i, idx;
490
491 KASSERT(acpi_id != NULL, ("Null acpi_id"));
492 KASSERT(cpu_id != NULL, ("Null cpu_id"));
493 idx = device_get_unit(dev);
494
495 /*
496 * If pc_acpi_id for CPU 0 is not initialized (e.g. a non-APIC
497 * UP box) use the ACPI ID from the first processor we find.
498 */
499 if (idx == 0 && mp_ncpus == 1) {
500 pc = pcpu_find(0);
501 if (pc->pc_acpi_id == 0xffffffff)
502 pc->pc_acpi_id = *acpi_id;
503 *cpu_id = 0;
504 return (0);
505 }
506
507 CPU_FOREACH(i) {
508 pc = pcpu_find(i);
509 KASSERT(pc != NULL, ("no pcpu data for %d", i));
510 if (cpu_unordered) {
511 if (idx-- == 0) {
512 /*
513 * If pc_acpi_id doesn't match the ACPI ID from the
514 * ASL, prefer the MADT-derived value.
515 */
516 if (pc->pc_acpi_id != *acpi_id)
517 *acpi_id = pc->pc_acpi_id;
518 *cpu_id = pc->pc_cpuid;
519 return (0);
520 }
521 } else {
522 if (pc->pc_acpi_id == *acpi_id) {
523 if (bootverbose)
524 device_printf(dev,
525 "Processor %s (ACPI ID %u) -> APIC ID %d\n",
526 acpi_name(acpi_get_handle(dev)), *acpi_id,
527 pc->pc_cpuid);
528 *cpu_id = pc->pc_cpuid;
529 return (0);
530 }
531 }
532 }
533
534 if (bootverbose)
535 printf("ACPI: Processor %s (ACPI ID %u) ignored\n",
536 acpi_name(acpi_get_handle(dev)), *acpi_id);
537
538 return (ESRCH);
539 }
540
541 static struct resource_list *
542 acpi_cpu_get_rlist(device_t dev, device_t child)
543 {
544 struct acpi_cpu_device *ad;
545
546 ad = device_get_ivars(child);
547 if (ad == NULL)
548 return (NULL);
549 return (&ad->ad_rl);
550 }
551
552 static device_t
553 acpi_cpu_add_child(device_t dev, u_int order, const char *name, int unit)
554 {
555 struct acpi_cpu_device *ad;
556 device_t child;
557
558 if ((ad = malloc(sizeof(*ad), M_TEMP, M_NOWAIT | M_ZERO)) == NULL)
559 return (NULL);
560
561 resource_list_init(&ad->ad_rl);
562
563 child = device_add_child_ordered(dev, order, name, unit);
564 if (child != NULL)
565 device_set_ivars(child, ad);
566 else
567 free(ad, M_TEMP);
568 return (child);
569 }
570
571 static int
572 acpi_cpu_read_ivar(device_t dev, device_t child, int index, uintptr_t *result)
573 {
574 struct acpi_cpu_softc *sc;
575
576 sc = device_get_softc(dev);
577 switch (index) {
578 case ACPI_IVAR_HANDLE:
579 *result = (uintptr_t)sc->cpu_handle;
580 break;
581 case CPU_IVAR_PCPU:
582 *result = (uintptr_t)sc->cpu_pcpu;
583 break;
584 #if defined(__amd64__) || defined(__i386__)
585 case CPU_IVAR_NOMINAL_MHZ:
586 if (tsc_is_invariant) {
587 *result = (uintptr_t)(atomic_load_acq_64(&tsc_freq) / 1000000);
588 break;
589 }
590 /* FALLTHROUGH */
591 #endif
592 default:
593 return (ENOENT);
594 }
595 return (0);
596 }
597
598 static int
599 acpi_cpu_shutdown(device_t dev)
600 {
601 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
602
603 /* Allow children to shutdown first. */
604 bus_generic_shutdown(dev);
605
606 /*
607 * Disable any entry to the idle function.
608 */
609 disable_idle(device_get_softc(dev));
610
611 /*
612 * CPU devices are not truely detached and remain referenced,
613 * so their resources are not freed.
614 */
615
616 return_VALUE (0);
617 }
618
619 static void
620 acpi_cpu_cx_probe(struct acpi_cpu_softc *sc)
621 {
622 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
623
624 /* Use initial sleep value of 1 sec. to start with lowest idle state. */
625 sc->cpu_prev_sleep = 1000000;
626 sc->cpu_cx_lowest = 0;
627 sc->cpu_cx_lowest_lim = 0;
628
629 /*
630 * Check for the ACPI 2.0 _CST sleep states object. If we can't find
631 * any, we'll revert to generic FADT/P_BLK Cx control method which will
632 * be handled by acpi_cpu_startup. We need to defer to after having
633 * probed all the cpus in the system before probing for generic Cx
634 * states as we may already have found cpus with valid _CST packages
635 */
636 if (!cpu_cx_generic && acpi_cpu_cx_cst(sc) != 0) {
637 /*
638 * We were unable to find a _CST package for this cpu or there
639 * was an error parsing it. Switch back to generic mode.
640 */
641 cpu_cx_generic = TRUE;
642 if (bootverbose)
643 device_printf(sc->cpu_dev, "switching to generic Cx mode\n");
644 }
645
646 /*
647 * TODO: _CSD Package should be checked here.
648 */
649 }
650
651 static void
652 acpi_cpu_generic_cx_probe(struct acpi_cpu_softc *sc)
653 {
654 ACPI_GENERIC_ADDRESS gas;
655 struct acpi_cx *cx_ptr;
656
657 sc->cpu_cx_count = 0;
658 cx_ptr = sc->cpu_cx_states;
659
660 /* Use initial sleep value of 1 sec. to start with lowest idle state. */
661 sc->cpu_prev_sleep = 1000000;
662
663 /* C1 has been required since just after ACPI 1.0 */
664 cx_ptr->type = ACPI_STATE_C1;
665 cx_ptr->trans_lat = 0;
666 cx_ptr++;
667 sc->cpu_non_c3 = sc->cpu_cx_count;
668 sc->cpu_cx_count++;
669
670 /*
671 * The spec says P_BLK must be 6 bytes long. However, some systems
672 * use it to indicate a fractional set of features present so we
673 * take 5 as C2. Some may also have a value of 7 to indicate
674 * another C3 but most use _CST for this (as required) and having
675 * "only" C1-C3 is not a hardship.
676 */
677 if (sc->cpu_p_blk_len < 5)
678 return;
679
680 /* Validate and allocate resources for C2 (P_LVL2). */
681 gas.SpaceId = ACPI_ADR_SPACE_SYSTEM_IO;
682 gas.BitWidth = 8;
683 if (AcpiGbl_FADT.C2Latency <= 100) {
684 gas.Address = sc->cpu_p_blk + 4;
685 cx_ptr->res_rid = 0;
686 acpi_bus_alloc_gas(sc->cpu_dev, &cx_ptr->res_type, &cx_ptr->res_rid,
687 &gas, &cx_ptr->p_lvlx, RF_SHAREABLE);
688 if (cx_ptr->p_lvlx != NULL) {
689 cx_ptr->type = ACPI_STATE_C2;
690 cx_ptr->trans_lat = AcpiGbl_FADT.C2Latency;
691 cx_ptr++;
692 sc->cpu_non_c3 = sc->cpu_cx_count;
693 sc->cpu_cx_count++;
694 }
695 }
696 if (sc->cpu_p_blk_len < 6)
697 return;
698
699 /* Validate and allocate resources for C3 (P_LVL3). */
700 if (AcpiGbl_FADT.C3Latency <= 1000 && !(cpu_quirks & CPU_QUIRK_NO_C3)) {
701 gas.Address = sc->cpu_p_blk + 5;
702 cx_ptr->res_rid = 1;
703 acpi_bus_alloc_gas(sc->cpu_dev, &cx_ptr->res_type, &cx_ptr->res_rid,
704 &gas, &cx_ptr->p_lvlx, RF_SHAREABLE);
705 if (cx_ptr->p_lvlx != NULL) {
706 cx_ptr->type = ACPI_STATE_C3;
707 cx_ptr->trans_lat = AcpiGbl_FADT.C3Latency;
708 cx_ptr++;
709 sc->cpu_cx_count++;
710 cpu_can_deep_sleep = 1;
711 }
712 }
713 }
714
715 /*
716 * Parse a _CST package and set up its Cx states. Since the _CST object
717 * can change dynamically, our notify handler may call this function
718 * to clean up and probe the new _CST package.
719 */
720 static int
721 acpi_cpu_cx_cst(struct acpi_cpu_softc *sc)
722 {
723 struct acpi_cx *cx_ptr;
724 ACPI_STATUS status;
725 ACPI_BUFFER buf;
726 ACPI_OBJECT *top;
727 ACPI_OBJECT *pkg;
728 uint32_t count;
729 int i;
730
731 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
732
733 buf.Pointer = NULL;
734 buf.Length = ACPI_ALLOCATE_BUFFER;
735 status = AcpiEvaluateObject(sc->cpu_handle, "_CST", NULL, &buf);
736 if (ACPI_FAILURE(status))
737 return (ENXIO);
738
739 /* _CST is a package with a count and at least one Cx package. */
740 top = (ACPI_OBJECT *)buf.Pointer;
741 if (!ACPI_PKG_VALID(top, 2) || acpi_PkgInt32(top, 0, &count) != 0) {
742 device_printf(sc->cpu_dev, "invalid _CST package\n");
743 AcpiOsFree(buf.Pointer);
744 return (ENXIO);
745 }
746 if (count != top->Package.Count - 1) {
747 device_printf(sc->cpu_dev, "invalid _CST state count (%d != %d)\n",
748 count, top->Package.Count - 1);
749 count = top->Package.Count - 1;
750 }
751 if (count > MAX_CX_STATES) {
752 device_printf(sc->cpu_dev, "_CST has too many states (%d)\n", count);
753 count = MAX_CX_STATES;
754 }
755
756 sc->cpu_non_c3 = 0;
757 sc->cpu_cx_count = 0;
758 cx_ptr = sc->cpu_cx_states;
759
760 /*
761 * C1 has been required since just after ACPI 1.0.
762 * Reserve the first slot for it.
763 */
764 cx_ptr->type = ACPI_STATE_C0;
765 cx_ptr++;
766 sc->cpu_cx_count++;
767
768 /* Set up all valid states. */
769 for (i = 0; i < count; i++) {
770 pkg = &top->Package.Elements[i + 1];
771 if (!ACPI_PKG_VALID(pkg, 4) ||
772 acpi_PkgInt32(pkg, 1, &cx_ptr->type) != 0 ||
773 acpi_PkgInt32(pkg, 2, &cx_ptr->trans_lat) != 0 ||
774 acpi_PkgInt32(pkg, 3, &cx_ptr->power) != 0) {
775
776 device_printf(sc->cpu_dev, "skipping invalid Cx state package\n");
777 continue;
778 }
779
780 /* Validate the state to see if we should use it. */
781 switch (cx_ptr->type) {
782 case ACPI_STATE_C1:
783 if (sc->cpu_cx_states[0].type == ACPI_STATE_C0) {
784 /* This is the first C1 state. Use the reserved slot. */
785 sc->cpu_cx_states[0] = *cx_ptr;
786 } else {
787 sc->cpu_non_c3 = sc->cpu_cx_count;
788 cx_ptr++;
789 sc->cpu_cx_count++;
790 }
791 continue;
792 case ACPI_STATE_C2:
793 sc->cpu_non_c3 = sc->cpu_cx_count;
794 break;
795 case ACPI_STATE_C3:
796 default:
797 if ((cpu_quirks & CPU_QUIRK_NO_C3) != 0) {
798 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
799 "acpi_cpu%d: C3[%d] not available.\n",
800 device_get_unit(sc->cpu_dev), i));
801 continue;
802 } else
803 cpu_can_deep_sleep = 1;
804 break;
805 }
806
807 /* Free up any previous register. */
808 if (cx_ptr->p_lvlx != NULL) {
809 bus_release_resource(sc->cpu_dev, cx_ptr->res_type, cx_ptr->res_rid,
810 cx_ptr->p_lvlx);
811 cx_ptr->p_lvlx = NULL;
812 }
813
814 /* Allocate the control register for C2 or C3. */
815 cx_ptr->res_rid = sc->cpu_cx_count;
816 acpi_PkgGas(sc->cpu_dev, pkg, 0, &cx_ptr->res_type, &cx_ptr->res_rid,
817 &cx_ptr->p_lvlx, RF_SHAREABLE);
818 if (cx_ptr->p_lvlx) {
819 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
820 "acpi_cpu%d: Got C%d - %d latency\n",
821 device_get_unit(sc->cpu_dev), cx_ptr->type,
822 cx_ptr->trans_lat));
823 cx_ptr++;
824 sc->cpu_cx_count++;
825 }
826 }
827 AcpiOsFree(buf.Pointer);
828
829 /* If C1 state was not found, we need one now. */
830 cx_ptr = sc->cpu_cx_states;
831 if (cx_ptr->type == ACPI_STATE_C0) {
832 cx_ptr->type = ACPI_STATE_C1;
833 cx_ptr->trans_lat = 0;
834 }
835
836 return (0);
837 }
838
839 /*
840 * Call this *after* all CPUs have been attached.
841 */
842 static void
843 acpi_cpu_startup(void *arg)
844 {
845 struct acpi_cpu_softc *sc;
846 int i;
847
848 /* Get set of CPU devices */
849 devclass_get_devices(acpi_cpu_devclass, &cpu_devices, &cpu_ndevices);
850
851 /*
852 * Setup any quirks that might necessary now that we have probed
853 * all the CPUs
854 */
855 acpi_cpu_quirks();
856
857 if (cpu_cx_generic) {
858 /*
859 * We are using generic Cx mode, probe for available Cx states
860 * for all processors.
861 */
862 for (i = 0; i < cpu_ndevices; i++) {
863 sc = device_get_softc(cpu_devices[i]);
864 acpi_cpu_generic_cx_probe(sc);
865 }
866 } else {
867 /*
868 * We are using _CST mode, remove C3 state if necessary.
869 * As we now know for sure that we will be using _CST mode
870 * install our notify handler.
871 */
872 for (i = 0; i < cpu_ndevices; i++) {
873 sc = device_get_softc(cpu_devices[i]);
874 if (cpu_quirks & CPU_QUIRK_NO_C3) {
875 sc->cpu_cx_count = sc->cpu_non_c3 + 1;
876 }
877 AcpiInstallNotifyHandler(sc->cpu_handle, ACPI_DEVICE_NOTIFY,
878 acpi_cpu_notify, sc);
879 }
880 }
881
882 /* Perform Cx final initialization. */
883 for (i = 0; i < cpu_ndevices; i++) {
884 sc = device_get_softc(cpu_devices[i]);
885 acpi_cpu_startup_cx(sc);
886 }
887
888 /* Add a sysctl handler to handle global Cx lowest setting */
889 SYSCTL_ADD_PROC(&cpu_sysctl_ctx, SYSCTL_CHILDREN(cpu_sysctl_tree),
890 OID_AUTO, "cx_lowest", CTLTYPE_STRING | CTLFLAG_RW,
891 NULL, 0, acpi_cpu_global_cx_lowest_sysctl, "A",
892 "Global lowest Cx sleep state to use");
893
894 /* Take over idling from cpu_idle_default(). */
895 cpu_cx_lowest_lim = 0;
896 for (i = 0; i < cpu_ndevices; i++) {
897 sc = device_get_softc(cpu_devices[i]);
898 enable_idle(sc);
899 }
900 cpu_idle_hook = acpi_cpu_idle;
901 }
902
903 static void
904 acpi_cpu_cx_list(struct acpi_cpu_softc *sc)
905 {
906 struct sbuf sb;
907 int i;
908
909 /*
910 * Set up the list of Cx states
911 */
912 sbuf_new(&sb, sc->cpu_cx_supported, sizeof(sc->cpu_cx_supported),
913 SBUF_FIXEDLEN);
914 for (i = 0; i < sc->cpu_cx_count; i++)
915 sbuf_printf(&sb, "C%d/%d/%d ", i + 1, sc->cpu_cx_states[i].type,
916 sc->cpu_cx_states[i].trans_lat);
917 sbuf_trim(&sb);
918 sbuf_finish(&sb);
919 }
920
921 static void
922 acpi_cpu_startup_cx(struct acpi_cpu_softc *sc)
923 {
924 acpi_cpu_cx_list(sc);
925
926 SYSCTL_ADD_STRING(&sc->cpu_sysctl_ctx,
927 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->cpu_dev)),
928 OID_AUTO, "cx_supported", CTLFLAG_RD,
929 sc->cpu_cx_supported, 0,
930 "Cx/microsecond values for supported Cx states");
931 SYSCTL_ADD_PROC(&sc->cpu_sysctl_ctx,
932 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->cpu_dev)),
933 OID_AUTO, "cx_lowest", CTLTYPE_STRING | CTLFLAG_RW,
934 (void *)sc, 0, acpi_cpu_cx_lowest_sysctl, "A",
935 "lowest Cx sleep state to use");
936 SYSCTL_ADD_PROC(&sc->cpu_sysctl_ctx,
937 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->cpu_dev)),
938 OID_AUTO, "cx_usage", CTLTYPE_STRING | CTLFLAG_RD,
939 (void *)sc, 0, acpi_cpu_usage_sysctl, "A",
940 "percent usage for each Cx state");
941
942 /* Signal platform that we can handle _CST notification. */
943 if (!cpu_cx_generic && cpu_cst_cnt != 0) {
944 ACPI_LOCK(acpi);
945 AcpiOsWritePort(cpu_smi_cmd, cpu_cst_cnt, 8);
946 ACPI_UNLOCK(acpi);
947 }
948 }
949
950 /*
951 * Idle the CPU in the lowest state possible. This function is called with
952 * interrupts disabled. Note that once it re-enables interrupts, a task
953 * switch can occur so do not access shared data (i.e. the softc) after
954 * interrupts are re-enabled.
955 */
956 static void
957 acpi_cpu_idle()
958 {
959 struct acpi_cpu_softc *sc;
960 struct acpi_cx *cx_next;
961 uint64_t cputicks;
962 uint32_t start_time, end_time;
963 int bm_active, cx_next_idx, i;
964
965 /*
966 * Look up our CPU id to get our softc. If it's NULL, we'll use C1
967 * since there is no ACPI processor object for this CPU. This occurs
968 * for logical CPUs in the HTT case.
969 */
970 sc = cpu_softc[PCPU_GET(cpuid)];
971 if (sc == NULL) {
972 acpi_cpu_c1();
973 return;
974 }
975
976 /* If disabled, take the safe path. */
977 if (is_idle_disabled(sc)) {
978 acpi_cpu_c1();
979 return;
980 }
981
982 /* Find the lowest state that has small enough latency. */
983 cx_next_idx = 0;
984 if (cpu_disable_deep_sleep)
985 i = min(sc->cpu_cx_lowest, sc->cpu_non_c3);
986 else
987 i = sc->cpu_cx_lowest;
988 for (; i >= 0; i--) {
989 if (sc->cpu_cx_states[i].trans_lat * 3 <= sc->cpu_prev_sleep) {
990 cx_next_idx = i;
991 break;
992 }
993 }
994
995 /*
996 * Check for bus master activity. If there was activity, clear
997 * the bit and use the lowest non-C3 state. Note that the USB
998 * driver polling for new devices keeps this bit set all the
999 * time if USB is loaded.
1000 */
1001 if ((cpu_quirks & CPU_QUIRK_NO_BM_CTRL) == 0 &&
1002 cx_next_idx > sc->cpu_non_c3) {
1003 AcpiReadBitRegister(ACPI_BITREG_BUS_MASTER_STATUS, &bm_active);
1004 if (bm_active != 0) {
1005 AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_STATUS, 1);
1006 cx_next_idx = sc->cpu_non_c3;
1007 }
1008 }
1009
1010 /* Select the next state and update statistics. */
1011 cx_next = &sc->cpu_cx_states[cx_next_idx];
1012 sc->cpu_cx_stats[cx_next_idx]++;
1013 KASSERT(cx_next->type != ACPI_STATE_C0, ("acpi_cpu_idle: C0 sleep"));
1014
1015 /*
1016 * Execute HLT (or equivalent) and wait for an interrupt. We can't
1017 * precisely calculate the time spent in C1 since the place we wake up
1018 * is an ISR. Assume we slept no more then half of quantum, unless
1019 * we are called inside critical section, delaying context switch.
1020 */
1021 if (cx_next->type == ACPI_STATE_C1) {
1022 cputicks = cpu_ticks();
1023 acpi_cpu_c1();
1024 end_time = ((cpu_ticks() - cputicks) << 20) / cpu_tickrate();
1025 if (curthread->td_critnest == 0)
1026 end_time = min(end_time, 500000 / hz);
1027 sc->cpu_prev_sleep = (sc->cpu_prev_sleep * 3 + end_time) / 4;
1028 return;
1029 }
1030
1031 /*
1032 * For C3, disable bus master arbitration and enable bus master wake
1033 * if BM control is available, otherwise flush the CPU cache.
1034 */
1035 if (cx_next->type == ACPI_STATE_C3) {
1036 if ((cpu_quirks & CPU_QUIRK_NO_BM_CTRL) == 0) {
1037 AcpiWriteBitRegister(ACPI_BITREG_ARB_DISABLE, 1);
1038 AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_RLD, 1);
1039 } else
1040 ACPI_FLUSH_CPU_CACHE();
1041 }
1042
1043 /*
1044 * Read from P_LVLx to enter C2(+), checking time spent asleep.
1045 * Use the ACPI timer for measuring sleep time. Since we need to
1046 * get the time very close to the CPU start/stop clock logic, this
1047 * is the only reliable time source.
1048 */
1049 if (cx_next->type == ACPI_STATE_C3) {
1050 AcpiHwRead(&start_time, &AcpiGbl_FADT.XPmTimerBlock);
1051 cputicks = 0;
1052 } else {
1053 start_time = 0;
1054 cputicks = cpu_ticks();
1055 }
1056 CPU_GET_REG(cx_next->p_lvlx, 1);
1057
1058 /*
1059 * Read the end time twice. Since it may take an arbitrary time
1060 * to enter the idle state, the first read may be executed before
1061 * the processor has stopped. Doing it again provides enough
1062 * margin that we are certain to have a correct value.
1063 */
1064 AcpiHwRead(&end_time, &AcpiGbl_FADT.XPmTimerBlock);
1065 if (cx_next->type == ACPI_STATE_C3) {
1066 AcpiHwRead(&end_time, &AcpiGbl_FADT.XPmTimerBlock);
1067 end_time = acpi_TimerDelta(end_time, start_time);
1068 } else
1069 end_time = ((cpu_ticks() - cputicks) << 20) / cpu_tickrate();
1070
1071 /* Enable bus master arbitration and disable bus master wakeup. */
1072 if (cx_next->type == ACPI_STATE_C3 &&
1073 (cpu_quirks & CPU_QUIRK_NO_BM_CTRL) == 0) {
1074 AcpiWriteBitRegister(ACPI_BITREG_ARB_DISABLE, 0);
1075 AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_RLD, 0);
1076 }
1077 ACPI_ENABLE_IRQS();
1078
1079 sc->cpu_prev_sleep = (sc->cpu_prev_sleep * 3 + PM_USEC(end_time)) / 4;
1080 }
1081
1082 /*
1083 * Re-evaluate the _CST object when we are notified that it changed.
1084 */
1085 static void
1086 acpi_cpu_notify(ACPI_HANDLE h, UINT32 notify, void *context)
1087 {
1088 struct acpi_cpu_softc *sc = (struct acpi_cpu_softc *)context;
1089
1090 if (notify != ACPI_NOTIFY_CX_STATES)
1091 return;
1092
1093 /*
1094 * C-state data for target CPU is going to be in flux while we execute
1095 * acpi_cpu_cx_cst, so disable entering acpi_cpu_idle.
1096 * Also, it may happen that multiple ACPI taskqueues may concurrently
1097 * execute notifications for the same CPU. ACPI_SERIAL is used to
1098 * protect against that.
1099 */
1100 ACPI_SERIAL_BEGIN(cpu);
1101 disable_idle(sc);
1102
1103 /* Update the list of Cx states. */
1104 acpi_cpu_cx_cst(sc);
1105 acpi_cpu_cx_list(sc);
1106 acpi_cpu_set_cx_lowest(sc);
1107
1108 enable_idle(sc);
1109 ACPI_SERIAL_END(cpu);
1110
1111 acpi_UserNotify("PROCESSOR", sc->cpu_handle, notify);
1112 }
1113
1114 static int
1115 acpi_cpu_quirks(void)
1116 {
1117 device_t acpi_dev;
1118 uint32_t val;
1119
1120 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
1121
1122 /*
1123 * Bus mastering arbitration control is needed to keep caches coherent
1124 * while sleeping in C3. If it's not present but a working flush cache
1125 * instruction is present, flush the caches before entering C3 instead.
1126 * Otherwise, just disable C3 completely.
1127 */
1128 if (AcpiGbl_FADT.Pm2ControlBlock == 0 ||
1129 AcpiGbl_FADT.Pm2ControlLength == 0) {
1130 if ((AcpiGbl_FADT.Flags & ACPI_FADT_WBINVD) &&
1131 (AcpiGbl_FADT.Flags & ACPI_FADT_WBINVD_FLUSH) == 0) {
1132 cpu_quirks |= CPU_QUIRK_NO_BM_CTRL;
1133 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1134 "acpi_cpu: no BM control, using flush cache method\n"));
1135 } else {
1136 cpu_quirks |= CPU_QUIRK_NO_C3;
1137 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1138 "acpi_cpu: no BM control, C3 not available\n"));
1139 }
1140 }
1141
1142 /*
1143 * If we are using generic Cx mode, C3 on multiple CPUs requires using
1144 * the expensive flush cache instruction.
1145 */
1146 if (cpu_cx_generic && mp_ncpus > 1) {
1147 cpu_quirks |= CPU_QUIRK_NO_BM_CTRL;
1148 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1149 "acpi_cpu: SMP, using flush cache mode for C3\n"));
1150 }
1151
1152 /* Look for various quirks of the PIIX4 part. */
1153 acpi_dev = pci_find_device(PCI_VENDOR_INTEL, PCI_DEVICE_82371AB_3);
1154 if (acpi_dev != NULL) {
1155 switch (pci_get_revid(acpi_dev)) {
1156 /*
1157 * Disable C3 support for all PIIX4 chipsets. Some of these parts
1158 * do not report the BMIDE status to the BM status register and
1159 * others have a livelock bug if Type-F DMA is enabled. Linux
1160 * works around the BMIDE bug by reading the BM status directly
1161 * but we take the simpler approach of disabling C3 for these
1162 * parts.
1163 *
1164 * See erratum #18 ("C3 Power State/BMIDE and Type-F DMA
1165 * Livelock") from the January 2002 PIIX4 specification update.
1166 * Applies to all PIIX4 models.
1167 *
1168 * Also, make sure that all interrupts cause a "Stop Break"
1169 * event to exit from C2 state.
1170 * Also, BRLD_EN_BM (ACPI_BITREG_BUS_MASTER_RLD in ACPI-speak)
1171 * should be set to zero, otherwise it causes C2 to short-sleep.
1172 * PIIX4 doesn't properly support C3 and bus master activity
1173 * need not break out of C2.
1174 */
1175 case PCI_REVISION_A_STEP:
1176 case PCI_REVISION_B_STEP:
1177 case PCI_REVISION_4E:
1178 case PCI_REVISION_4M:
1179 cpu_quirks |= CPU_QUIRK_NO_C3;
1180 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1181 "acpi_cpu: working around PIIX4 bug, disabling C3\n"));
1182
1183 val = pci_read_config(acpi_dev, PIIX4_DEVACTB_REG, 4);
1184 if ((val & PIIX4_STOP_BREAK_MASK) != PIIX4_STOP_BREAK_MASK) {
1185 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1186 "acpi_cpu: PIIX4: enabling IRQs to generate Stop Break\n"));
1187 val |= PIIX4_STOP_BREAK_MASK;
1188 pci_write_config(acpi_dev, PIIX4_DEVACTB_REG, val, 4);
1189 }
1190 AcpiReadBitRegister(ACPI_BITREG_BUS_MASTER_RLD, &val);
1191 if (val) {
1192 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1193 "acpi_cpu: PIIX4: reset BRLD_EN_BM\n"));
1194 AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_RLD, 0);
1195 }
1196 break;
1197 default:
1198 break;
1199 }
1200 }
1201
1202 return (0);
1203 }
1204
1205 static int
1206 acpi_cpu_usage_sysctl(SYSCTL_HANDLER_ARGS)
1207 {
1208 struct acpi_cpu_softc *sc;
1209 struct sbuf sb;
1210 char buf[128];
1211 int i;
1212 uintmax_t fract, sum, whole;
1213
1214 sc = (struct acpi_cpu_softc *) arg1;
1215 sum = 0;
1216 for (i = 0; i < sc->cpu_cx_count; i++)
1217 sum += sc->cpu_cx_stats[i];
1218 sbuf_new(&sb, buf, sizeof(buf), SBUF_FIXEDLEN);
1219 for (i = 0; i < sc->cpu_cx_count; i++) {
1220 if (sum > 0) {
1221 whole = (uintmax_t)sc->cpu_cx_stats[i] * 100;
1222 fract = (whole % sum) * 100;
1223 sbuf_printf(&sb, "%u.%02u%% ", (u_int)(whole / sum),
1224 (u_int)(fract / sum));
1225 } else
1226 sbuf_printf(&sb, "0.00%% ");
1227 }
1228 sbuf_printf(&sb, "last %dus", sc->cpu_prev_sleep);
1229 sbuf_trim(&sb);
1230 sbuf_finish(&sb);
1231 sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req);
1232 sbuf_delete(&sb);
1233
1234 return (0);
1235 }
1236
1237 static int
1238 acpi_cpu_set_cx_lowest(struct acpi_cpu_softc *sc)
1239 {
1240 int i;
1241
1242 ACPI_SERIAL_ASSERT(cpu);
1243 sc->cpu_cx_lowest = min(sc->cpu_cx_lowest_lim, sc->cpu_cx_count - 1);
1244
1245 /* If not disabling, cache the new lowest non-C3 state. */
1246 sc->cpu_non_c3 = 0;
1247 for (i = sc->cpu_cx_lowest; i >= 0; i--) {
1248 if (sc->cpu_cx_states[i].type < ACPI_STATE_C3) {
1249 sc->cpu_non_c3 = i;
1250 break;
1251 }
1252 }
1253
1254 /* Reset the statistics counters. */
1255 bzero(sc->cpu_cx_stats, sizeof(sc->cpu_cx_stats));
1256 return (0);
1257 }
1258
1259 static int
1260 acpi_cpu_cx_lowest_sysctl(SYSCTL_HANDLER_ARGS)
1261 {
1262 struct acpi_cpu_softc *sc;
1263 char state[8];
1264 int val, error;
1265
1266 sc = (struct acpi_cpu_softc *) arg1;
1267 snprintf(state, sizeof(state), "C%d", sc->cpu_cx_lowest_lim + 1);
1268 error = sysctl_handle_string(oidp, state, sizeof(state), req);
1269 if (error != 0 || req->newptr == NULL)
1270 return (error);
1271 if (strlen(state) < 2 || toupper(state[0]) != 'C')
1272 return (EINVAL);
1273 if (strcasecmp(state, "Cmax") == 0)
1274 val = MAX_CX_STATES;
1275 else {
1276 val = (int) strtol(state + 1, NULL, 10);
1277 if (val < 1 || val > MAX_CX_STATES)
1278 return (EINVAL);
1279 }
1280
1281 ACPI_SERIAL_BEGIN(cpu);
1282 sc->cpu_cx_lowest_lim = val - 1;
1283 acpi_cpu_set_cx_lowest(sc);
1284 ACPI_SERIAL_END(cpu);
1285
1286 return (0);
1287 }
1288
1289 static int
1290 acpi_cpu_global_cx_lowest_sysctl(SYSCTL_HANDLER_ARGS)
1291 {
1292 struct acpi_cpu_softc *sc;
1293 char state[8];
1294 int val, error, i;
1295
1296 snprintf(state, sizeof(state), "C%d", cpu_cx_lowest_lim + 1);
1297 error = sysctl_handle_string(oidp, state, sizeof(state), req);
1298 if (error != 0 || req->newptr == NULL)
1299 return (error);
1300 if (strlen(state) < 2 || toupper(state[0]) != 'C')
1301 return (EINVAL);
1302 if (strcasecmp(state, "Cmax") == 0)
1303 val = MAX_CX_STATES;
1304 else {
1305 val = (int) strtol(state + 1, NULL, 10);
1306 if (val < 1 || val > MAX_CX_STATES)
1307 return (EINVAL);
1308 }
1309
1310 /* Update the new lowest useable Cx state for all CPUs. */
1311 ACPI_SERIAL_BEGIN(cpu);
1312 cpu_cx_lowest_lim = val - 1;
1313 for (i = 0; i < cpu_ndevices; i++) {
1314 sc = device_get_softc(cpu_devices[i]);
1315 sc->cpu_cx_lowest_lim = cpu_cx_lowest_lim;
1316 acpi_cpu_set_cx_lowest(sc);
1317 }
1318 ACPI_SERIAL_END(cpu);
1319
1320 return (0);
1321 }
Cache object: 642a8c11f692f3743c23526bff215047
|