1 /*-
2 * Copyright (c) 2003-2005 Nate Lawson (SDG)
3 * Copyright (c) 2001 Michael Smith
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD: releng/10.2/sys/dev/acpica/acpi_cpu.c 283892 2015-06-01 19:26:24Z jkim $");
30
31 #include "opt_acpi.h"
32 #include <sys/param.h>
33 #include <sys/bus.h>
34 #include <sys/cpu.h>
35 #include <sys/kernel.h>
36 #include <sys/malloc.h>
37 #include <sys/module.h>
38 #include <sys/pcpu.h>
39 #include <sys/power.h>
40 #include <sys/proc.h>
41 #include <sys/sched.h>
42 #include <sys/sbuf.h>
43 #include <sys/smp.h>
44
45 #include <dev/pci/pcivar.h>
46 #include <machine/atomic.h>
47 #include <machine/bus.h>
48 #if defined(__amd64__) || defined(__i386__)
49 #include <machine/clock.h>
50 #endif
51 #include <sys/rman.h>
52
53 #include <contrib/dev/acpica/include/acpi.h>
54 #include <contrib/dev/acpica/include/accommon.h>
55
56 #include <dev/acpica/acpivar.h>
57
58 /*
59 * Support for ACPI Processor devices, including C[1-3] sleep states.
60 */
61
62 /* Hooks for the ACPI CA debugging infrastructure */
63 #define _COMPONENT ACPI_PROCESSOR
64 ACPI_MODULE_NAME("PROCESSOR")
65
66 struct acpi_cx {
67 struct resource *p_lvlx; /* Register to read to enter state. */
68 uint32_t type; /* C1-3 (C4 and up treated as C3). */
69 uint32_t trans_lat; /* Transition latency (usec). */
70 uint32_t power; /* Power consumed (mW). */
71 int res_type; /* Resource type for p_lvlx. */
72 int res_rid; /* Resource ID for p_lvlx. */
73 };
74 #define MAX_CX_STATES 8
75
76 struct acpi_cpu_softc {
77 device_t cpu_dev;
78 ACPI_HANDLE cpu_handle;
79 struct pcpu *cpu_pcpu;
80 uint32_t cpu_acpi_id; /* ACPI processor id */
81 uint32_t cpu_p_blk; /* ACPI P_BLK location */
82 uint32_t cpu_p_blk_len; /* P_BLK length (must be 6). */
83 struct acpi_cx cpu_cx_states[MAX_CX_STATES];
84 int cpu_cx_count; /* Number of valid Cx states. */
85 int cpu_prev_sleep;/* Last idle sleep duration. */
86 int cpu_features; /* Child driver supported features. */
87 /* Runtime state. */
88 int cpu_non_c2; /* Index of lowest non-C2 state. */
89 int cpu_non_c3; /* Index of lowest non-C3 state. */
90 u_int cpu_cx_stats[MAX_CX_STATES];/* Cx usage history. */
91 /* Values for sysctl. */
92 struct sysctl_ctx_list cpu_sysctl_ctx;
93 struct sysctl_oid *cpu_sysctl_tree;
94 int cpu_cx_lowest;
95 int cpu_cx_lowest_lim;
96 int cpu_disable_idle; /* Disable entry to idle function */
97 char cpu_cx_supported[64];
98 };
99
100 struct acpi_cpu_device {
101 struct resource_list ad_rl;
102 };
103
104 #define CPU_GET_REG(reg, width) \
105 (bus_space_read_ ## width(rman_get_bustag((reg)), \
106 rman_get_bushandle((reg)), 0))
107 #define CPU_SET_REG(reg, width, val) \
108 (bus_space_write_ ## width(rman_get_bustag((reg)), \
109 rman_get_bushandle((reg)), 0, (val)))
110
111 #define PM_USEC(x) ((x) >> 2) /* ~4 clocks per usec (3.57955 Mhz) */
112
113 #define ACPI_NOTIFY_CX_STATES 0x81 /* _CST changed. */
114
115 #define CPU_QUIRK_NO_C3 (1<<0) /* C3-type states are not usable. */
116 #define CPU_QUIRK_NO_BM_CTRL (1<<2) /* No bus mastering control. */
117
118 #define PCI_VENDOR_INTEL 0x8086
119 #define PCI_DEVICE_82371AB_3 0x7113 /* PIIX4 chipset for quirks. */
120 #define PCI_REVISION_A_STEP 0
121 #define PCI_REVISION_B_STEP 1
122 #define PCI_REVISION_4E 2
123 #define PCI_REVISION_4M 3
124 #define PIIX4_DEVACTB_REG 0x58
125 #define PIIX4_BRLD_EN_IRQ0 (1<<0)
126 #define PIIX4_BRLD_EN_IRQ (1<<1)
127 #define PIIX4_BRLD_EN_IRQ8 (1<<5)
128 #define PIIX4_STOP_BREAK_MASK (PIIX4_BRLD_EN_IRQ0 | PIIX4_BRLD_EN_IRQ | PIIX4_BRLD_EN_IRQ8)
129 #define PIIX4_PCNTRL_BST_EN (1<<10)
130
131 /* Allow users to ignore processor orders in MADT. */
132 static int cpu_unordered;
133 TUNABLE_INT("debug.acpi.cpu_unordered", &cpu_unordered);
134 SYSCTL_INT(_debug_acpi, OID_AUTO, cpu_unordered, CTLFLAG_RDTUN,
135 &cpu_unordered, 0,
136 "Do not use the MADT to match ACPI Processor objects to CPUs.");
137
138 /* Platform hardware resource information. */
139 static uint32_t cpu_smi_cmd; /* Value to write to SMI_CMD. */
140 static uint8_t cpu_cst_cnt; /* Indicate we are _CST aware. */
141 static int cpu_quirks; /* Indicate any hardware bugs. */
142
143 /* Values for sysctl. */
144 static struct sysctl_ctx_list cpu_sysctl_ctx;
145 static struct sysctl_oid *cpu_sysctl_tree;
146 static int cpu_cx_generic;
147 static int cpu_cx_lowest_lim;
148
149 static device_t *cpu_devices;
150 static int cpu_ndevices;
151 static struct acpi_cpu_softc **cpu_softc;
152 ACPI_SERIAL_DECL(cpu, "ACPI CPU");
153
154 static int acpi_cpu_probe(device_t dev);
155 static int acpi_cpu_attach(device_t dev);
156 static int acpi_cpu_suspend(device_t dev);
157 static int acpi_cpu_resume(device_t dev);
158 static int acpi_pcpu_get_id(device_t dev, uint32_t *acpi_id,
159 uint32_t *cpu_id);
160 static struct resource_list *acpi_cpu_get_rlist(device_t dev, device_t child);
161 static device_t acpi_cpu_add_child(device_t dev, u_int order, const char *name,
162 int unit);
163 static int acpi_cpu_read_ivar(device_t dev, device_t child, int index,
164 uintptr_t *result);
165 static int acpi_cpu_shutdown(device_t dev);
166 static void acpi_cpu_cx_probe(struct acpi_cpu_softc *sc);
167 static void acpi_cpu_generic_cx_probe(struct acpi_cpu_softc *sc);
168 static int acpi_cpu_cx_cst(struct acpi_cpu_softc *sc);
169 static void acpi_cpu_startup(void *arg);
170 static void acpi_cpu_startup_cx(struct acpi_cpu_softc *sc);
171 static void acpi_cpu_cx_list(struct acpi_cpu_softc *sc);
172 static void acpi_cpu_idle(sbintime_t sbt);
173 static void acpi_cpu_notify(ACPI_HANDLE h, UINT32 notify, void *context);
174 static void acpi_cpu_quirks(void);
175 static void acpi_cpu_quirks_piix4(void);
176 static int acpi_cpu_usage_sysctl(SYSCTL_HANDLER_ARGS);
177 static int acpi_cpu_set_cx_lowest(struct acpi_cpu_softc *sc);
178 static int acpi_cpu_cx_lowest_sysctl(SYSCTL_HANDLER_ARGS);
179 static int acpi_cpu_global_cx_lowest_sysctl(SYSCTL_HANDLER_ARGS);
180
181 static device_method_t acpi_cpu_methods[] = {
182 /* Device interface */
183 DEVMETHOD(device_probe, acpi_cpu_probe),
184 DEVMETHOD(device_attach, acpi_cpu_attach),
185 DEVMETHOD(device_detach, bus_generic_detach),
186 DEVMETHOD(device_shutdown, acpi_cpu_shutdown),
187 DEVMETHOD(device_suspend, acpi_cpu_suspend),
188 DEVMETHOD(device_resume, acpi_cpu_resume),
189
190 /* Bus interface */
191 DEVMETHOD(bus_add_child, acpi_cpu_add_child),
192 DEVMETHOD(bus_read_ivar, acpi_cpu_read_ivar),
193 DEVMETHOD(bus_get_resource_list, acpi_cpu_get_rlist),
194 DEVMETHOD(bus_get_resource, bus_generic_rl_get_resource),
195 DEVMETHOD(bus_set_resource, bus_generic_rl_set_resource),
196 DEVMETHOD(bus_alloc_resource, bus_generic_rl_alloc_resource),
197 DEVMETHOD(bus_release_resource, bus_generic_rl_release_resource),
198 DEVMETHOD(bus_activate_resource, bus_generic_activate_resource),
199 DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource),
200 DEVMETHOD(bus_setup_intr, bus_generic_setup_intr),
201 DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr),
202
203 DEVMETHOD_END
204 };
205
206 static driver_t acpi_cpu_driver = {
207 "cpu",
208 acpi_cpu_methods,
209 sizeof(struct acpi_cpu_softc),
210 };
211
212 static devclass_t acpi_cpu_devclass;
213 DRIVER_MODULE(cpu, acpi, acpi_cpu_driver, acpi_cpu_devclass, 0, 0);
214 MODULE_DEPEND(cpu, acpi, 1, 1, 1);
215
216 static int
217 acpi_cpu_probe(device_t dev)
218 {
219 int acpi_id, cpu_id;
220 ACPI_BUFFER buf;
221 ACPI_HANDLE handle;
222 ACPI_OBJECT *obj;
223 ACPI_STATUS status;
224
225 if (acpi_disabled("cpu") || acpi_get_type(dev) != ACPI_TYPE_PROCESSOR)
226 return (ENXIO);
227
228 handle = acpi_get_handle(dev);
229 if (cpu_softc == NULL)
230 cpu_softc = malloc(sizeof(struct acpi_cpu_softc *) *
231 (mp_maxid + 1), M_TEMP /* XXX */, M_WAITOK | M_ZERO);
232
233 /* Get our Processor object. */
234 buf.Pointer = NULL;
235 buf.Length = ACPI_ALLOCATE_BUFFER;
236 status = AcpiEvaluateObject(handle, NULL, NULL, &buf);
237 if (ACPI_FAILURE(status)) {
238 device_printf(dev, "probe failed to get Processor obj - %s\n",
239 AcpiFormatException(status));
240 return (ENXIO);
241 }
242 obj = (ACPI_OBJECT *)buf.Pointer;
243 if (obj->Type != ACPI_TYPE_PROCESSOR) {
244 device_printf(dev, "Processor object has bad type %d\n", obj->Type);
245 AcpiOsFree(obj);
246 return (ENXIO);
247 }
248
249 /*
250 * Find the processor associated with our unit. We could use the
251 * ProcId as a key, however, some boxes do not have the same values
252 * in their Processor object as the ProcId values in the MADT.
253 */
254 acpi_id = obj->Processor.ProcId;
255 AcpiOsFree(obj);
256 if (acpi_pcpu_get_id(dev, &acpi_id, &cpu_id) != 0)
257 return (ENXIO);
258
259 /*
260 * Check if we already probed this processor. We scan the bus twice
261 * so it's possible we've already seen this one.
262 */
263 if (cpu_softc[cpu_id] != NULL)
264 return (ENXIO);
265
266 /* Mark this processor as in-use and save our derived id for attach. */
267 cpu_softc[cpu_id] = (void *)1;
268 acpi_set_private(dev, (void*)(intptr_t)cpu_id);
269 device_set_desc(dev, "ACPI CPU");
270
271 return (0);
272 }
273
274 static int
275 acpi_cpu_attach(device_t dev)
276 {
277 ACPI_BUFFER buf;
278 ACPI_OBJECT arg[4], *obj;
279 ACPI_OBJECT_LIST arglist;
280 struct pcpu *pcpu_data;
281 struct acpi_cpu_softc *sc;
282 struct acpi_softc *acpi_sc;
283 ACPI_STATUS status;
284 u_int features;
285 int cpu_id, drv_count, i;
286 driver_t **drivers;
287 uint32_t cap_set[3];
288
289 /* UUID needed by _OSC evaluation */
290 static uint8_t cpu_oscuuid[16] = { 0x16, 0xA6, 0x77, 0x40, 0x0C, 0x29,
291 0xBE, 0x47, 0x9E, 0xBD, 0xD8, 0x70,
292 0x58, 0x71, 0x39, 0x53 };
293
294 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
295
296 sc = device_get_softc(dev);
297 sc->cpu_dev = dev;
298 sc->cpu_handle = acpi_get_handle(dev);
299 cpu_id = (int)(intptr_t)acpi_get_private(dev);
300 cpu_softc[cpu_id] = sc;
301 pcpu_data = pcpu_find(cpu_id);
302 pcpu_data->pc_device = dev;
303 sc->cpu_pcpu = pcpu_data;
304 cpu_smi_cmd = AcpiGbl_FADT.SmiCommand;
305 cpu_cst_cnt = AcpiGbl_FADT.CstControl;
306
307 buf.Pointer = NULL;
308 buf.Length = ACPI_ALLOCATE_BUFFER;
309 status = AcpiEvaluateObject(sc->cpu_handle, NULL, NULL, &buf);
310 if (ACPI_FAILURE(status)) {
311 device_printf(dev, "attach failed to get Processor obj - %s\n",
312 AcpiFormatException(status));
313 return (ENXIO);
314 }
315 obj = (ACPI_OBJECT *)buf.Pointer;
316 sc->cpu_p_blk = obj->Processor.PblkAddress;
317 sc->cpu_p_blk_len = obj->Processor.PblkLength;
318 sc->cpu_acpi_id = obj->Processor.ProcId;
319 AcpiOsFree(obj);
320 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "acpi_cpu%d: P_BLK at %#x/%d\n",
321 device_get_unit(dev), sc->cpu_p_blk, sc->cpu_p_blk_len));
322
323 /*
324 * If this is the first cpu we attach, create and initialize the generic
325 * resources that will be used by all acpi cpu devices.
326 */
327 if (device_get_unit(dev) == 0) {
328 /* Assume we won't be using generic Cx mode by default */
329 cpu_cx_generic = FALSE;
330
331 /* Install hw.acpi.cpu sysctl tree */
332 acpi_sc = acpi_device_get_parent_softc(dev);
333 sysctl_ctx_init(&cpu_sysctl_ctx);
334 cpu_sysctl_tree = SYSCTL_ADD_NODE(&cpu_sysctl_ctx,
335 SYSCTL_CHILDREN(acpi_sc->acpi_sysctl_tree), OID_AUTO, "cpu",
336 CTLFLAG_RD, 0, "node for CPU children");
337
338 /* Queue post cpu-probing task handler */
339 AcpiOsExecute(OSL_NOTIFY_HANDLER, acpi_cpu_startup, NULL);
340 }
341
342 /*
343 * Before calling any CPU methods, collect child driver feature hints
344 * and notify ACPI of them. We support unified SMP power control
345 * so advertise this ourselves. Note this is not the same as independent
346 * SMP control where each CPU can have different settings.
347 */
348 sc->cpu_features = ACPI_CAP_SMP_SAME | ACPI_CAP_SMP_SAME_C3;
349 if (devclass_get_drivers(acpi_cpu_devclass, &drivers, &drv_count) == 0) {
350 for (i = 0; i < drv_count; i++) {
351 if (ACPI_GET_FEATURES(drivers[i], &features) == 0)
352 sc->cpu_features |= features;
353 }
354 free(drivers, M_TEMP);
355 }
356
357 /*
358 * CPU capabilities are specified in
359 * Intel Processor Vendor-Specific ACPI Interface Specification.
360 */
361 if (sc->cpu_features) {
362 arglist.Pointer = arg;
363 arglist.Count = 4;
364 arg[0].Type = ACPI_TYPE_BUFFER;
365 arg[0].Buffer.Length = sizeof(cpu_oscuuid);
366 arg[0].Buffer.Pointer = cpu_oscuuid; /* UUID */
367 arg[1].Type = ACPI_TYPE_INTEGER;
368 arg[1].Integer.Value = 1; /* revision */
369 arg[2].Type = ACPI_TYPE_INTEGER;
370 arg[2].Integer.Value = 1; /* count */
371 arg[3].Type = ACPI_TYPE_BUFFER;
372 arg[3].Buffer.Length = sizeof(cap_set); /* Capabilities buffer */
373 arg[3].Buffer.Pointer = (uint8_t *)cap_set;
374 cap_set[0] = 0; /* status */
375 cap_set[1] = sc->cpu_features;
376 status = AcpiEvaluateObject(sc->cpu_handle, "_OSC", &arglist, NULL);
377 if (ACPI_SUCCESS(status)) {
378 if (cap_set[0] != 0)
379 device_printf(dev, "_OSC returned status %#x\n", cap_set[0]);
380 }
381 else {
382 arglist.Pointer = arg;
383 arglist.Count = 1;
384 arg[0].Type = ACPI_TYPE_BUFFER;
385 arg[0].Buffer.Length = sizeof(cap_set);
386 arg[0].Buffer.Pointer = (uint8_t *)cap_set;
387 cap_set[0] = 1; /* revision */
388 cap_set[1] = 1; /* number of capabilities integers */
389 cap_set[2] = sc->cpu_features;
390 AcpiEvaluateObject(sc->cpu_handle, "_PDC", &arglist, NULL);
391 }
392 }
393
394 /* Probe for Cx state support. */
395 acpi_cpu_cx_probe(sc);
396
397 return (0);
398 }
399
400 static void
401 acpi_cpu_postattach(void *unused __unused)
402 {
403 device_t *devices;
404 int err;
405 int i, n;
406
407 err = devclass_get_devices(acpi_cpu_devclass, &devices, &n);
408 if (err != 0) {
409 printf("devclass_get_devices(acpi_cpu_devclass) failed\n");
410 return;
411 }
412 for (i = 0; i < n; i++)
413 bus_generic_probe(devices[i]);
414 for (i = 0; i < n; i++)
415 bus_generic_attach(devices[i]);
416 free(devices, M_TEMP);
417 }
418
419 SYSINIT(acpi_cpu, SI_SUB_CONFIGURE, SI_ORDER_MIDDLE,
420 acpi_cpu_postattach, NULL);
421
422 static void
423 disable_idle(struct acpi_cpu_softc *sc)
424 {
425 cpuset_t cpuset;
426
427 CPU_SETOF(sc->cpu_pcpu->pc_cpuid, &cpuset);
428 sc->cpu_disable_idle = TRUE;
429
430 /*
431 * Ensure that the CPU is not in idle state or in acpi_cpu_idle().
432 * Note that this code depends on the fact that the rendezvous IPI
433 * can not penetrate context where interrupts are disabled and acpi_cpu_idle
434 * is called and executed in such a context with interrupts being re-enabled
435 * right before return.
436 */
437 smp_rendezvous_cpus(cpuset, smp_no_rendevous_barrier, NULL,
438 smp_no_rendevous_barrier, NULL);
439 }
440
441 static void
442 enable_idle(struct acpi_cpu_softc *sc)
443 {
444
445 sc->cpu_disable_idle = FALSE;
446 }
447
448 static int
449 is_idle_disabled(struct acpi_cpu_softc *sc)
450 {
451
452 return (sc->cpu_disable_idle);
453 }
454
455 /*
456 * Disable any entry to the idle function during suspend and re-enable it
457 * during resume.
458 */
459 static int
460 acpi_cpu_suspend(device_t dev)
461 {
462 int error;
463
464 error = bus_generic_suspend(dev);
465 if (error)
466 return (error);
467 disable_idle(device_get_softc(dev));
468 return (0);
469 }
470
471 static int
472 acpi_cpu_resume(device_t dev)
473 {
474
475 enable_idle(device_get_softc(dev));
476 return (bus_generic_resume(dev));
477 }
478
479 /*
480 * Find the processor associated with a given ACPI ID. By default,
481 * use the MADT to map ACPI IDs to APIC IDs and use that to locate a
482 * processor. Some systems have inconsistent ASL and MADT however.
483 * For these systems the cpu_unordered tunable can be set in which
484 * case we assume that Processor objects are listed in the same order
485 * in both the MADT and ASL.
486 */
487 static int
488 acpi_pcpu_get_id(device_t dev, uint32_t *acpi_id, uint32_t *cpu_id)
489 {
490 struct pcpu *pc;
491 uint32_t i, idx;
492
493 KASSERT(acpi_id != NULL, ("Null acpi_id"));
494 KASSERT(cpu_id != NULL, ("Null cpu_id"));
495 idx = device_get_unit(dev);
496
497 /*
498 * If pc_acpi_id for CPU 0 is not initialized (e.g. a non-APIC
499 * UP box) use the ACPI ID from the first processor we find.
500 */
501 if (idx == 0 && mp_ncpus == 1) {
502 pc = pcpu_find(0);
503 if (pc->pc_acpi_id == 0xffffffff)
504 pc->pc_acpi_id = *acpi_id;
505 *cpu_id = 0;
506 return (0);
507 }
508
509 CPU_FOREACH(i) {
510 pc = pcpu_find(i);
511 KASSERT(pc != NULL, ("no pcpu data for %d", i));
512 if (cpu_unordered) {
513 if (idx-- == 0) {
514 /*
515 * If pc_acpi_id doesn't match the ACPI ID from the
516 * ASL, prefer the MADT-derived value.
517 */
518 if (pc->pc_acpi_id != *acpi_id)
519 *acpi_id = pc->pc_acpi_id;
520 *cpu_id = pc->pc_cpuid;
521 return (0);
522 }
523 } else {
524 if (pc->pc_acpi_id == *acpi_id) {
525 if (bootverbose)
526 device_printf(dev,
527 "Processor %s (ACPI ID %u) -> APIC ID %d\n",
528 acpi_name(acpi_get_handle(dev)), *acpi_id,
529 pc->pc_cpuid);
530 *cpu_id = pc->pc_cpuid;
531 return (0);
532 }
533 }
534 }
535
536 if (bootverbose)
537 printf("ACPI: Processor %s (ACPI ID %u) ignored\n",
538 acpi_name(acpi_get_handle(dev)), *acpi_id);
539
540 return (ESRCH);
541 }
542
543 static struct resource_list *
544 acpi_cpu_get_rlist(device_t dev, device_t child)
545 {
546 struct acpi_cpu_device *ad;
547
548 ad = device_get_ivars(child);
549 if (ad == NULL)
550 return (NULL);
551 return (&ad->ad_rl);
552 }
553
554 static device_t
555 acpi_cpu_add_child(device_t dev, u_int order, const char *name, int unit)
556 {
557 struct acpi_cpu_device *ad;
558 device_t child;
559
560 if ((ad = malloc(sizeof(*ad), M_TEMP, M_NOWAIT | M_ZERO)) == NULL)
561 return (NULL);
562
563 resource_list_init(&ad->ad_rl);
564
565 child = device_add_child_ordered(dev, order, name, unit);
566 if (child != NULL)
567 device_set_ivars(child, ad);
568 else
569 free(ad, M_TEMP);
570 return (child);
571 }
572
573 static int
574 acpi_cpu_read_ivar(device_t dev, device_t child, int index, uintptr_t *result)
575 {
576 struct acpi_cpu_softc *sc;
577
578 sc = device_get_softc(dev);
579 switch (index) {
580 case ACPI_IVAR_HANDLE:
581 *result = (uintptr_t)sc->cpu_handle;
582 break;
583 case CPU_IVAR_PCPU:
584 *result = (uintptr_t)sc->cpu_pcpu;
585 break;
586 #if defined(__amd64__) || defined(__i386__)
587 case CPU_IVAR_NOMINAL_MHZ:
588 if (tsc_is_invariant) {
589 *result = (uintptr_t)(atomic_load_acq_64(&tsc_freq) / 1000000);
590 break;
591 }
592 /* FALLTHROUGH */
593 #endif
594 default:
595 return (ENOENT);
596 }
597 return (0);
598 }
599
600 static int
601 acpi_cpu_shutdown(device_t dev)
602 {
603 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
604
605 /* Allow children to shutdown first. */
606 bus_generic_shutdown(dev);
607
608 /*
609 * Disable any entry to the idle function.
610 */
611 disable_idle(device_get_softc(dev));
612
613 /*
614 * CPU devices are not truely detached and remain referenced,
615 * so their resources are not freed.
616 */
617
618 return_VALUE (0);
619 }
620
621 static void
622 acpi_cpu_cx_probe(struct acpi_cpu_softc *sc)
623 {
624 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
625
626 /* Use initial sleep value of 1 sec. to start with lowest idle state. */
627 sc->cpu_prev_sleep = 1000000;
628 sc->cpu_cx_lowest = 0;
629 sc->cpu_cx_lowest_lim = 0;
630
631 /*
632 * Check for the ACPI 2.0 _CST sleep states object. If we can't find
633 * any, we'll revert to generic FADT/P_BLK Cx control method which will
634 * be handled by acpi_cpu_startup. We need to defer to after having
635 * probed all the cpus in the system before probing for generic Cx
636 * states as we may already have found cpus with valid _CST packages
637 */
638 if (!cpu_cx_generic && acpi_cpu_cx_cst(sc) != 0) {
639 /*
640 * We were unable to find a _CST package for this cpu or there
641 * was an error parsing it. Switch back to generic mode.
642 */
643 cpu_cx_generic = TRUE;
644 if (bootverbose)
645 device_printf(sc->cpu_dev, "switching to generic Cx mode\n");
646 }
647
648 /*
649 * TODO: _CSD Package should be checked here.
650 */
651 }
652
653 static void
654 acpi_cpu_generic_cx_probe(struct acpi_cpu_softc *sc)
655 {
656 ACPI_GENERIC_ADDRESS gas;
657 struct acpi_cx *cx_ptr;
658
659 sc->cpu_cx_count = 0;
660 cx_ptr = sc->cpu_cx_states;
661
662 /* Use initial sleep value of 1 sec. to start with lowest idle state. */
663 sc->cpu_prev_sleep = 1000000;
664
665 /* C1 has been required since just after ACPI 1.0 */
666 cx_ptr->type = ACPI_STATE_C1;
667 cx_ptr->trans_lat = 0;
668 cx_ptr++;
669 sc->cpu_non_c2 = sc->cpu_cx_count;
670 sc->cpu_non_c3 = sc->cpu_cx_count;
671 sc->cpu_cx_count++;
672 cpu_deepest_sleep = 1;
673
674 /*
675 * The spec says P_BLK must be 6 bytes long. However, some systems
676 * use it to indicate a fractional set of features present so we
677 * take 5 as C2. Some may also have a value of 7 to indicate
678 * another C3 but most use _CST for this (as required) and having
679 * "only" C1-C3 is not a hardship.
680 */
681 if (sc->cpu_p_blk_len < 5)
682 return;
683
684 /* Validate and allocate resources for C2 (P_LVL2). */
685 gas.SpaceId = ACPI_ADR_SPACE_SYSTEM_IO;
686 gas.BitWidth = 8;
687 if (AcpiGbl_FADT.C2Latency <= 100) {
688 gas.Address = sc->cpu_p_blk + 4;
689 cx_ptr->res_rid = 0;
690 acpi_bus_alloc_gas(sc->cpu_dev, &cx_ptr->res_type, &cx_ptr->res_rid,
691 &gas, &cx_ptr->p_lvlx, RF_SHAREABLE);
692 if (cx_ptr->p_lvlx != NULL) {
693 cx_ptr->type = ACPI_STATE_C2;
694 cx_ptr->trans_lat = AcpiGbl_FADT.C2Latency;
695 cx_ptr++;
696 sc->cpu_non_c3 = sc->cpu_cx_count;
697 sc->cpu_cx_count++;
698 cpu_deepest_sleep = 2;
699 }
700 }
701 if (sc->cpu_p_blk_len < 6)
702 return;
703
704 /* Validate and allocate resources for C3 (P_LVL3). */
705 if (AcpiGbl_FADT.C3Latency <= 1000 && !(cpu_quirks & CPU_QUIRK_NO_C3)) {
706 gas.Address = sc->cpu_p_blk + 5;
707 cx_ptr->res_rid = 1;
708 acpi_bus_alloc_gas(sc->cpu_dev, &cx_ptr->res_type, &cx_ptr->res_rid,
709 &gas, &cx_ptr->p_lvlx, RF_SHAREABLE);
710 if (cx_ptr->p_lvlx != NULL) {
711 cx_ptr->type = ACPI_STATE_C3;
712 cx_ptr->trans_lat = AcpiGbl_FADT.C3Latency;
713 cx_ptr++;
714 sc->cpu_cx_count++;
715 cpu_deepest_sleep = 3;
716 }
717 }
718 }
719
720 /*
721 * Parse a _CST package and set up its Cx states. Since the _CST object
722 * can change dynamically, our notify handler may call this function
723 * to clean up and probe the new _CST package.
724 */
725 static int
726 acpi_cpu_cx_cst(struct acpi_cpu_softc *sc)
727 {
728 struct acpi_cx *cx_ptr;
729 ACPI_STATUS status;
730 ACPI_BUFFER buf;
731 ACPI_OBJECT *top;
732 ACPI_OBJECT *pkg;
733 uint32_t count;
734 int i;
735
736 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
737
738 buf.Pointer = NULL;
739 buf.Length = ACPI_ALLOCATE_BUFFER;
740 status = AcpiEvaluateObject(sc->cpu_handle, "_CST", NULL, &buf);
741 if (ACPI_FAILURE(status))
742 return (ENXIO);
743
744 /* _CST is a package with a count and at least one Cx package. */
745 top = (ACPI_OBJECT *)buf.Pointer;
746 if (!ACPI_PKG_VALID(top, 2) || acpi_PkgInt32(top, 0, &count) != 0) {
747 device_printf(sc->cpu_dev, "invalid _CST package\n");
748 AcpiOsFree(buf.Pointer);
749 return (ENXIO);
750 }
751 if (count != top->Package.Count - 1) {
752 device_printf(sc->cpu_dev, "invalid _CST state count (%d != %d)\n",
753 count, top->Package.Count - 1);
754 count = top->Package.Count - 1;
755 }
756 if (count > MAX_CX_STATES) {
757 device_printf(sc->cpu_dev, "_CST has too many states (%d)\n", count);
758 count = MAX_CX_STATES;
759 }
760
761 sc->cpu_non_c2 = 0;
762 sc->cpu_non_c3 = 0;
763 sc->cpu_cx_count = 0;
764 cx_ptr = sc->cpu_cx_states;
765
766 /*
767 * C1 has been required since just after ACPI 1.0.
768 * Reserve the first slot for it.
769 */
770 cx_ptr->type = ACPI_STATE_C0;
771 cx_ptr++;
772 sc->cpu_cx_count++;
773 cpu_deepest_sleep = 1;
774
775 /* Set up all valid states. */
776 for (i = 0; i < count; i++) {
777 pkg = &top->Package.Elements[i + 1];
778 if (!ACPI_PKG_VALID(pkg, 4) ||
779 acpi_PkgInt32(pkg, 1, &cx_ptr->type) != 0 ||
780 acpi_PkgInt32(pkg, 2, &cx_ptr->trans_lat) != 0 ||
781 acpi_PkgInt32(pkg, 3, &cx_ptr->power) != 0) {
782
783 device_printf(sc->cpu_dev, "skipping invalid Cx state package\n");
784 continue;
785 }
786
787 /* Validate the state to see if we should use it. */
788 switch (cx_ptr->type) {
789 case ACPI_STATE_C1:
790 if (sc->cpu_cx_states[0].type == ACPI_STATE_C0) {
791 /* This is the first C1 state. Use the reserved slot. */
792 sc->cpu_cx_states[0] = *cx_ptr;
793 } else {
794 sc->cpu_non_c2 = sc->cpu_cx_count;
795 sc->cpu_non_c3 = sc->cpu_cx_count;
796 cx_ptr++;
797 sc->cpu_cx_count++;
798 }
799 continue;
800 case ACPI_STATE_C2:
801 sc->cpu_non_c3 = sc->cpu_cx_count;
802 if (cpu_deepest_sleep < 2)
803 cpu_deepest_sleep = 2;
804 break;
805 case ACPI_STATE_C3:
806 default:
807 if ((cpu_quirks & CPU_QUIRK_NO_C3) != 0) {
808 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
809 "acpi_cpu%d: C3[%d] not available.\n",
810 device_get_unit(sc->cpu_dev), i));
811 continue;
812 } else
813 cpu_deepest_sleep = 3;
814 break;
815 }
816
817 /* Free up any previous register. */
818 if (cx_ptr->p_lvlx != NULL) {
819 bus_release_resource(sc->cpu_dev, cx_ptr->res_type, cx_ptr->res_rid,
820 cx_ptr->p_lvlx);
821 cx_ptr->p_lvlx = NULL;
822 }
823
824 /* Allocate the control register for C2 or C3. */
825 cx_ptr->res_rid = sc->cpu_cx_count;
826 acpi_PkgGas(sc->cpu_dev, pkg, 0, &cx_ptr->res_type, &cx_ptr->res_rid,
827 &cx_ptr->p_lvlx, RF_SHAREABLE);
828 if (cx_ptr->p_lvlx) {
829 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
830 "acpi_cpu%d: Got C%d - %d latency\n",
831 device_get_unit(sc->cpu_dev), cx_ptr->type,
832 cx_ptr->trans_lat));
833 cx_ptr++;
834 sc->cpu_cx_count++;
835 }
836 }
837 AcpiOsFree(buf.Pointer);
838
839 /* If C1 state was not found, we need one now. */
840 cx_ptr = sc->cpu_cx_states;
841 if (cx_ptr->type == ACPI_STATE_C0) {
842 cx_ptr->type = ACPI_STATE_C1;
843 cx_ptr->trans_lat = 0;
844 }
845
846 return (0);
847 }
848
849 /*
850 * Call this *after* all CPUs have been attached.
851 */
852 static void
853 acpi_cpu_startup(void *arg)
854 {
855 struct acpi_cpu_softc *sc;
856 int i;
857
858 /* Get set of CPU devices */
859 devclass_get_devices(acpi_cpu_devclass, &cpu_devices, &cpu_ndevices);
860
861 /*
862 * Setup any quirks that might necessary now that we have probed
863 * all the CPUs
864 */
865 acpi_cpu_quirks();
866
867 if (cpu_cx_generic) {
868 /*
869 * We are using generic Cx mode, probe for available Cx states
870 * for all processors.
871 */
872 for (i = 0; i < cpu_ndevices; i++) {
873 sc = device_get_softc(cpu_devices[i]);
874 acpi_cpu_generic_cx_probe(sc);
875 }
876 } else {
877 /*
878 * We are using _CST mode, remove C3 state if necessary.
879 * As we now know for sure that we will be using _CST mode
880 * install our notify handler.
881 */
882 for (i = 0; i < cpu_ndevices; i++) {
883 sc = device_get_softc(cpu_devices[i]);
884 if (cpu_quirks & CPU_QUIRK_NO_C3) {
885 sc->cpu_cx_count = min(sc->cpu_cx_count, sc->cpu_non_c3 + 1);
886 }
887 AcpiInstallNotifyHandler(sc->cpu_handle, ACPI_DEVICE_NOTIFY,
888 acpi_cpu_notify, sc);
889 }
890 }
891
892 /* Perform Cx final initialization. */
893 for (i = 0; i < cpu_ndevices; i++) {
894 sc = device_get_softc(cpu_devices[i]);
895 acpi_cpu_startup_cx(sc);
896 }
897
898 /* Add a sysctl handler to handle global Cx lowest setting */
899 SYSCTL_ADD_PROC(&cpu_sysctl_ctx, SYSCTL_CHILDREN(cpu_sysctl_tree),
900 OID_AUTO, "cx_lowest", CTLTYPE_STRING | CTLFLAG_RW,
901 NULL, 0, acpi_cpu_global_cx_lowest_sysctl, "A",
902 "Global lowest Cx sleep state to use");
903
904 /* Take over idling from cpu_idle_default(). */
905 cpu_cx_lowest_lim = 0;
906 for (i = 0; i < cpu_ndevices; i++) {
907 sc = device_get_softc(cpu_devices[i]);
908 enable_idle(sc);
909 }
910 cpu_idle_hook = acpi_cpu_idle;
911 }
912
913 static void
914 acpi_cpu_cx_list(struct acpi_cpu_softc *sc)
915 {
916 struct sbuf sb;
917 int i;
918
919 /*
920 * Set up the list of Cx states
921 */
922 sbuf_new(&sb, sc->cpu_cx_supported, sizeof(sc->cpu_cx_supported),
923 SBUF_FIXEDLEN);
924 for (i = 0; i < sc->cpu_cx_count; i++)
925 sbuf_printf(&sb, "C%d/%d/%d ", i + 1, sc->cpu_cx_states[i].type,
926 sc->cpu_cx_states[i].trans_lat);
927 sbuf_trim(&sb);
928 sbuf_finish(&sb);
929 }
930
931 static void
932 acpi_cpu_startup_cx(struct acpi_cpu_softc *sc)
933 {
934 acpi_cpu_cx_list(sc);
935
936 SYSCTL_ADD_STRING(&sc->cpu_sysctl_ctx,
937 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->cpu_dev)),
938 OID_AUTO, "cx_supported", CTLFLAG_RD,
939 sc->cpu_cx_supported, 0,
940 "Cx/microsecond values for supported Cx states");
941 SYSCTL_ADD_PROC(&sc->cpu_sysctl_ctx,
942 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->cpu_dev)),
943 OID_AUTO, "cx_lowest", CTLTYPE_STRING | CTLFLAG_RW,
944 (void *)sc, 0, acpi_cpu_cx_lowest_sysctl, "A",
945 "lowest Cx sleep state to use");
946 SYSCTL_ADD_PROC(&sc->cpu_sysctl_ctx,
947 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->cpu_dev)),
948 OID_AUTO, "cx_usage", CTLTYPE_STRING | CTLFLAG_RD,
949 (void *)sc, 0, acpi_cpu_usage_sysctl, "A",
950 "percent usage for each Cx state");
951
952 /* Signal platform that we can handle _CST notification. */
953 if (!cpu_cx_generic && cpu_cst_cnt != 0) {
954 ACPI_LOCK(acpi);
955 AcpiOsWritePort(cpu_smi_cmd, cpu_cst_cnt, 8);
956 ACPI_UNLOCK(acpi);
957 }
958 }
959
960 /*
961 * Idle the CPU in the lowest state possible. This function is called with
962 * interrupts disabled. Note that once it re-enables interrupts, a task
963 * switch can occur so do not access shared data (i.e. the softc) after
964 * interrupts are re-enabled.
965 */
966 static void
967 acpi_cpu_idle(sbintime_t sbt)
968 {
969 struct acpi_cpu_softc *sc;
970 struct acpi_cx *cx_next;
971 uint64_t cputicks;
972 uint32_t start_time, end_time;
973 int bm_active, cx_next_idx, i, us;
974
975 /*
976 * Look up our CPU id to get our softc. If it's NULL, we'll use C1
977 * since there is no ACPI processor object for this CPU. This occurs
978 * for logical CPUs in the HTT case.
979 */
980 sc = cpu_softc[PCPU_GET(cpuid)];
981 if (sc == NULL) {
982 acpi_cpu_c1();
983 return;
984 }
985
986 /* If disabled, take the safe path. */
987 if (is_idle_disabled(sc)) {
988 acpi_cpu_c1();
989 return;
990 }
991
992 /* Find the lowest state that has small enough latency. */
993 us = sc->cpu_prev_sleep;
994 if (sbt >= 0 && us > (sbt >> 12))
995 us = (sbt >> 12);
996 cx_next_idx = 0;
997 if (cpu_disable_c2_sleep)
998 i = min(sc->cpu_cx_lowest, sc->cpu_non_c2);
999 else if (cpu_disable_c3_sleep)
1000 i = min(sc->cpu_cx_lowest, sc->cpu_non_c3);
1001 else
1002 i = sc->cpu_cx_lowest;
1003 for (; i >= 0; i--) {
1004 if (sc->cpu_cx_states[i].trans_lat * 3 <= us) {
1005 cx_next_idx = i;
1006 break;
1007 }
1008 }
1009
1010 /*
1011 * Check for bus master activity. If there was activity, clear
1012 * the bit and use the lowest non-C3 state. Note that the USB
1013 * driver polling for new devices keeps this bit set all the
1014 * time if USB is loaded.
1015 */
1016 if ((cpu_quirks & CPU_QUIRK_NO_BM_CTRL) == 0 &&
1017 cx_next_idx > sc->cpu_non_c3) {
1018 AcpiReadBitRegister(ACPI_BITREG_BUS_MASTER_STATUS, &bm_active);
1019 if (bm_active != 0) {
1020 AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_STATUS, 1);
1021 cx_next_idx = sc->cpu_non_c3;
1022 }
1023 }
1024
1025 /* Select the next state and update statistics. */
1026 cx_next = &sc->cpu_cx_states[cx_next_idx];
1027 sc->cpu_cx_stats[cx_next_idx]++;
1028 KASSERT(cx_next->type != ACPI_STATE_C0, ("acpi_cpu_idle: C0 sleep"));
1029
1030 /*
1031 * Execute HLT (or equivalent) and wait for an interrupt. We can't
1032 * precisely calculate the time spent in C1 since the place we wake up
1033 * is an ISR. Assume we slept no more then half of quantum, unless
1034 * we are called inside critical section, delaying context switch.
1035 */
1036 if (cx_next->type == ACPI_STATE_C1) {
1037 cputicks = cpu_ticks();
1038 acpi_cpu_c1();
1039 end_time = ((cpu_ticks() - cputicks) << 20) / cpu_tickrate();
1040 if (curthread->td_critnest == 0)
1041 end_time = min(end_time, 500000 / hz);
1042 sc->cpu_prev_sleep = (sc->cpu_prev_sleep * 3 + end_time) / 4;
1043 return;
1044 }
1045
1046 /*
1047 * For C3, disable bus master arbitration and enable bus master wake
1048 * if BM control is available, otherwise flush the CPU cache.
1049 */
1050 if (cx_next->type == ACPI_STATE_C3) {
1051 if ((cpu_quirks & CPU_QUIRK_NO_BM_CTRL) == 0) {
1052 AcpiWriteBitRegister(ACPI_BITREG_ARB_DISABLE, 1);
1053 AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_RLD, 1);
1054 } else
1055 ACPI_FLUSH_CPU_CACHE();
1056 }
1057
1058 /*
1059 * Read from P_LVLx to enter C2(+), checking time spent asleep.
1060 * Use the ACPI timer for measuring sleep time. Since we need to
1061 * get the time very close to the CPU start/stop clock logic, this
1062 * is the only reliable time source.
1063 */
1064 if (cx_next->type == ACPI_STATE_C3) {
1065 AcpiHwRead(&start_time, &AcpiGbl_FADT.XPmTimerBlock);
1066 cputicks = 0;
1067 } else {
1068 start_time = 0;
1069 cputicks = cpu_ticks();
1070 }
1071 CPU_GET_REG(cx_next->p_lvlx, 1);
1072
1073 /*
1074 * Read the end time twice. Since it may take an arbitrary time
1075 * to enter the idle state, the first read may be executed before
1076 * the processor has stopped. Doing it again provides enough
1077 * margin that we are certain to have a correct value.
1078 */
1079 AcpiHwRead(&end_time, &AcpiGbl_FADT.XPmTimerBlock);
1080 if (cx_next->type == ACPI_STATE_C3) {
1081 AcpiHwRead(&end_time, &AcpiGbl_FADT.XPmTimerBlock);
1082 end_time = acpi_TimerDelta(end_time, start_time);
1083 } else
1084 end_time = ((cpu_ticks() - cputicks) << 20) / cpu_tickrate();
1085
1086 /* Enable bus master arbitration and disable bus master wakeup. */
1087 if (cx_next->type == ACPI_STATE_C3 &&
1088 (cpu_quirks & CPU_QUIRK_NO_BM_CTRL) == 0) {
1089 AcpiWriteBitRegister(ACPI_BITREG_ARB_DISABLE, 0);
1090 AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_RLD, 0);
1091 }
1092 ACPI_ENABLE_IRQS();
1093
1094 sc->cpu_prev_sleep = (sc->cpu_prev_sleep * 3 + PM_USEC(end_time)) / 4;
1095 }
1096
1097 /*
1098 * Re-evaluate the _CST object when we are notified that it changed.
1099 */
1100 static void
1101 acpi_cpu_notify(ACPI_HANDLE h, UINT32 notify, void *context)
1102 {
1103 struct acpi_cpu_softc *sc = (struct acpi_cpu_softc *)context;
1104
1105 if (notify != ACPI_NOTIFY_CX_STATES)
1106 return;
1107
1108 /*
1109 * C-state data for target CPU is going to be in flux while we execute
1110 * acpi_cpu_cx_cst, so disable entering acpi_cpu_idle.
1111 * Also, it may happen that multiple ACPI taskqueues may concurrently
1112 * execute notifications for the same CPU. ACPI_SERIAL is used to
1113 * protect against that.
1114 */
1115 ACPI_SERIAL_BEGIN(cpu);
1116 disable_idle(sc);
1117
1118 /* Update the list of Cx states. */
1119 acpi_cpu_cx_cst(sc);
1120 acpi_cpu_cx_list(sc);
1121 acpi_cpu_set_cx_lowest(sc);
1122
1123 enable_idle(sc);
1124 ACPI_SERIAL_END(cpu);
1125
1126 acpi_UserNotify("PROCESSOR", sc->cpu_handle, notify);
1127 }
1128
1129 static void
1130 acpi_cpu_quirks(void)
1131 {
1132 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
1133
1134 /*
1135 * Bus mastering arbitration control is needed to keep caches coherent
1136 * while sleeping in C3. If it's not present but a working flush cache
1137 * instruction is present, flush the caches before entering C3 instead.
1138 * Otherwise, just disable C3 completely.
1139 */
1140 if (AcpiGbl_FADT.Pm2ControlBlock == 0 ||
1141 AcpiGbl_FADT.Pm2ControlLength == 0) {
1142 if ((AcpiGbl_FADT.Flags & ACPI_FADT_WBINVD) &&
1143 (AcpiGbl_FADT.Flags & ACPI_FADT_WBINVD_FLUSH) == 0) {
1144 cpu_quirks |= CPU_QUIRK_NO_BM_CTRL;
1145 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1146 "acpi_cpu: no BM control, using flush cache method\n"));
1147 } else {
1148 cpu_quirks |= CPU_QUIRK_NO_C3;
1149 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1150 "acpi_cpu: no BM control, C3 not available\n"));
1151 }
1152 }
1153
1154 /*
1155 * If we are using generic Cx mode, C3 on multiple CPUs requires using
1156 * the expensive flush cache instruction.
1157 */
1158 if (cpu_cx_generic && mp_ncpus > 1) {
1159 cpu_quirks |= CPU_QUIRK_NO_BM_CTRL;
1160 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1161 "acpi_cpu: SMP, using flush cache mode for C3\n"));
1162 }
1163
1164 /* Look for various quirks of the PIIX4 part. */
1165 acpi_cpu_quirks_piix4();
1166 }
1167
1168 static void
1169 acpi_cpu_quirks_piix4(void)
1170 {
1171 #ifdef __i386__
1172 device_t acpi_dev;
1173 uint32_t val;
1174
1175 acpi_dev = pci_find_device(PCI_VENDOR_INTEL, PCI_DEVICE_82371AB_3);
1176 if (acpi_dev != NULL) {
1177 switch (pci_get_revid(acpi_dev)) {
1178 /*
1179 * Disable C3 support for all PIIX4 chipsets. Some of these parts
1180 * do not report the BMIDE status to the BM status register and
1181 * others have a livelock bug if Type-F DMA is enabled. Linux
1182 * works around the BMIDE bug by reading the BM status directly
1183 * but we take the simpler approach of disabling C3 for these
1184 * parts.
1185 *
1186 * See erratum #18 ("C3 Power State/BMIDE and Type-F DMA
1187 * Livelock") from the January 2002 PIIX4 specification update.
1188 * Applies to all PIIX4 models.
1189 *
1190 * Also, make sure that all interrupts cause a "Stop Break"
1191 * event to exit from C2 state.
1192 * Also, BRLD_EN_BM (ACPI_BITREG_BUS_MASTER_RLD in ACPI-speak)
1193 * should be set to zero, otherwise it causes C2 to short-sleep.
1194 * PIIX4 doesn't properly support C3 and bus master activity
1195 * need not break out of C2.
1196 */
1197 case PCI_REVISION_A_STEP:
1198 case PCI_REVISION_B_STEP:
1199 case PCI_REVISION_4E:
1200 case PCI_REVISION_4M:
1201 cpu_quirks |= CPU_QUIRK_NO_C3;
1202 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1203 "acpi_cpu: working around PIIX4 bug, disabling C3\n"));
1204
1205 val = pci_read_config(acpi_dev, PIIX4_DEVACTB_REG, 4);
1206 if ((val & PIIX4_STOP_BREAK_MASK) != PIIX4_STOP_BREAK_MASK) {
1207 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1208 "acpi_cpu: PIIX4: enabling IRQs to generate Stop Break\n"));
1209 val |= PIIX4_STOP_BREAK_MASK;
1210 pci_write_config(acpi_dev, PIIX4_DEVACTB_REG, val, 4);
1211 }
1212 AcpiReadBitRegister(ACPI_BITREG_BUS_MASTER_RLD, &val);
1213 if (val) {
1214 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1215 "acpi_cpu: PIIX4: reset BRLD_EN_BM\n"));
1216 AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_RLD, 0);
1217 }
1218 break;
1219 default:
1220 break;
1221 }
1222 }
1223 #endif
1224 }
1225
1226 static int
1227 acpi_cpu_usage_sysctl(SYSCTL_HANDLER_ARGS)
1228 {
1229 struct acpi_cpu_softc *sc;
1230 struct sbuf sb;
1231 char buf[128];
1232 int i;
1233 uintmax_t fract, sum, whole;
1234
1235 sc = (struct acpi_cpu_softc *) arg1;
1236 sum = 0;
1237 for (i = 0; i < sc->cpu_cx_count; i++)
1238 sum += sc->cpu_cx_stats[i];
1239 sbuf_new(&sb, buf, sizeof(buf), SBUF_FIXEDLEN);
1240 for (i = 0; i < sc->cpu_cx_count; i++) {
1241 if (sum > 0) {
1242 whole = (uintmax_t)sc->cpu_cx_stats[i] * 100;
1243 fract = (whole % sum) * 100;
1244 sbuf_printf(&sb, "%u.%02u%% ", (u_int)(whole / sum),
1245 (u_int)(fract / sum));
1246 } else
1247 sbuf_printf(&sb, "0.00%% ");
1248 }
1249 sbuf_printf(&sb, "last %dus", sc->cpu_prev_sleep);
1250 sbuf_trim(&sb);
1251 sbuf_finish(&sb);
1252 sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req);
1253 sbuf_delete(&sb);
1254
1255 return (0);
1256 }
1257
1258 static int
1259 acpi_cpu_set_cx_lowest(struct acpi_cpu_softc *sc)
1260 {
1261 int i;
1262
1263 ACPI_SERIAL_ASSERT(cpu);
1264 sc->cpu_cx_lowest = min(sc->cpu_cx_lowest_lim, sc->cpu_cx_count - 1);
1265
1266 /* If not disabling, cache the new lowest non-C3 state. */
1267 sc->cpu_non_c3 = 0;
1268 for (i = sc->cpu_cx_lowest; i >= 0; i--) {
1269 if (sc->cpu_cx_states[i].type < ACPI_STATE_C3) {
1270 sc->cpu_non_c3 = i;
1271 break;
1272 }
1273 }
1274
1275 /* Reset the statistics counters. */
1276 bzero(sc->cpu_cx_stats, sizeof(sc->cpu_cx_stats));
1277 return (0);
1278 }
1279
1280 static int
1281 acpi_cpu_cx_lowest_sysctl(SYSCTL_HANDLER_ARGS)
1282 {
1283 struct acpi_cpu_softc *sc;
1284 char state[8];
1285 int val, error;
1286
1287 sc = (struct acpi_cpu_softc *) arg1;
1288 snprintf(state, sizeof(state), "C%d", sc->cpu_cx_lowest_lim + 1);
1289 error = sysctl_handle_string(oidp, state, sizeof(state), req);
1290 if (error != 0 || req->newptr == NULL)
1291 return (error);
1292 if (strlen(state) < 2 || toupper(state[0]) != 'C')
1293 return (EINVAL);
1294 if (strcasecmp(state, "Cmax") == 0)
1295 val = MAX_CX_STATES;
1296 else {
1297 val = (int) strtol(state + 1, NULL, 10);
1298 if (val < 1 || val > MAX_CX_STATES)
1299 return (EINVAL);
1300 }
1301
1302 ACPI_SERIAL_BEGIN(cpu);
1303 sc->cpu_cx_lowest_lim = val - 1;
1304 acpi_cpu_set_cx_lowest(sc);
1305 ACPI_SERIAL_END(cpu);
1306
1307 return (0);
1308 }
1309
1310 static int
1311 acpi_cpu_global_cx_lowest_sysctl(SYSCTL_HANDLER_ARGS)
1312 {
1313 struct acpi_cpu_softc *sc;
1314 char state[8];
1315 int val, error, i;
1316
1317 snprintf(state, sizeof(state), "C%d", cpu_cx_lowest_lim + 1);
1318 error = sysctl_handle_string(oidp, state, sizeof(state), req);
1319 if (error != 0 || req->newptr == NULL)
1320 return (error);
1321 if (strlen(state) < 2 || toupper(state[0]) != 'C')
1322 return (EINVAL);
1323 if (strcasecmp(state, "Cmax") == 0)
1324 val = MAX_CX_STATES;
1325 else {
1326 val = (int) strtol(state + 1, NULL, 10);
1327 if (val < 1 || val > MAX_CX_STATES)
1328 return (EINVAL);
1329 }
1330
1331 /* Update the new lowest useable Cx state for all CPUs. */
1332 ACPI_SERIAL_BEGIN(cpu);
1333 cpu_cx_lowest_lim = val - 1;
1334 for (i = 0; i < cpu_ndevices; i++) {
1335 sc = device_get_softc(cpu_devices[i]);
1336 sc->cpu_cx_lowest_lim = cpu_cx_lowest_lim;
1337 acpi_cpu_set_cx_lowest(sc);
1338 }
1339 ACPI_SERIAL_END(cpu);
1340
1341 return (0);
1342 }
Cache object: a3bf009163b1fdf6b87603f9fd6f19da
|