1 /*-
2 * Copyright (c) 2003-2005 Nate Lawson (SDG)
3 * Copyright (c) 2001 Michael Smith
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD: releng/12.0/sys/dev/acpica/acpi_cpu.c 333334 2018-05-07 21:09:17Z imp $");
30
31 #include "opt_acpi.h"
32 #include <sys/param.h>
33 #include <sys/bus.h>
34 #include <sys/cpu.h>
35 #include <sys/kernel.h>
36 #include <sys/malloc.h>
37 #include <sys/module.h>
38 #include <sys/pcpu.h>
39 #include <sys/power.h>
40 #include <sys/proc.h>
41 #include <sys/sched.h>
42 #include <sys/sbuf.h>
43 #include <sys/smp.h>
44
45 #include <dev/pci/pcivar.h>
46 #include <machine/atomic.h>
47 #include <machine/bus.h>
48 #if defined(__amd64__) || defined(__i386__)
49 #include <machine/clock.h>
50 #include <machine/specialreg.h>
51 #include <machine/md_var.h>
52 #endif
53 #include <sys/rman.h>
54
55 #include <contrib/dev/acpica/include/acpi.h>
56 #include <contrib/dev/acpica/include/accommon.h>
57
58 #include <dev/acpica/acpivar.h>
59
60 /*
61 * Support for ACPI Processor devices, including C[1-3] sleep states.
62 */
63
64 /* Hooks for the ACPI CA debugging infrastructure */
65 #define _COMPONENT ACPI_PROCESSOR
66 ACPI_MODULE_NAME("PROCESSOR")
67
68 struct acpi_cx {
69 struct resource *p_lvlx; /* Register to read to enter state. */
70 uint32_t type; /* C1-3 (C4 and up treated as C3). */
71 uint32_t trans_lat; /* Transition latency (usec). */
72 uint32_t power; /* Power consumed (mW). */
73 int res_type; /* Resource type for p_lvlx. */
74 int res_rid; /* Resource ID for p_lvlx. */
75 bool do_mwait;
76 uint32_t mwait_hint;
77 bool mwait_hw_coord;
78 bool mwait_bm_avoidance;
79 };
80 #define MAX_CX_STATES 8
81
82 struct acpi_cpu_softc {
83 device_t cpu_dev;
84 ACPI_HANDLE cpu_handle;
85 struct pcpu *cpu_pcpu;
86 uint32_t cpu_acpi_id; /* ACPI processor id */
87 uint32_t cpu_p_blk; /* ACPI P_BLK location */
88 uint32_t cpu_p_blk_len; /* P_BLK length (must be 6). */
89 struct acpi_cx cpu_cx_states[MAX_CX_STATES];
90 int cpu_cx_count; /* Number of valid Cx states. */
91 int cpu_prev_sleep;/* Last idle sleep duration. */
92 int cpu_features; /* Child driver supported features. */
93 /* Runtime state. */
94 int cpu_non_c2; /* Index of lowest non-C2 state. */
95 int cpu_non_c3; /* Index of lowest non-C3 state. */
96 u_int cpu_cx_stats[MAX_CX_STATES];/* Cx usage history. */
97 /* Values for sysctl. */
98 struct sysctl_ctx_list cpu_sysctl_ctx;
99 struct sysctl_oid *cpu_sysctl_tree;
100 int cpu_cx_lowest;
101 int cpu_cx_lowest_lim;
102 int cpu_disable_idle; /* Disable entry to idle function */
103 char cpu_cx_supported[64];
104 };
105
106 struct acpi_cpu_device {
107 struct resource_list ad_rl;
108 };
109
110 #define CPU_GET_REG(reg, width) \
111 (bus_space_read_ ## width(rman_get_bustag((reg)), \
112 rman_get_bushandle((reg)), 0))
113 #define CPU_SET_REG(reg, width, val) \
114 (bus_space_write_ ## width(rman_get_bustag((reg)), \
115 rman_get_bushandle((reg)), 0, (val)))
116
117 #define PM_USEC(x) ((x) >> 2) /* ~4 clocks per usec (3.57955 Mhz) */
118
119 #define ACPI_NOTIFY_CX_STATES 0x81 /* _CST changed. */
120
121 #define CPU_QUIRK_NO_C3 (1<<0) /* C3-type states are not usable. */
122 #define CPU_QUIRK_NO_BM_CTRL (1<<2) /* No bus mastering control. */
123
124 #define PCI_VENDOR_INTEL 0x8086
125 #define PCI_DEVICE_82371AB_3 0x7113 /* PIIX4 chipset for quirks. */
126 #define PCI_REVISION_A_STEP 0
127 #define PCI_REVISION_B_STEP 1
128 #define PCI_REVISION_4E 2
129 #define PCI_REVISION_4M 3
130 #define PIIX4_DEVACTB_REG 0x58
131 #define PIIX4_BRLD_EN_IRQ0 (1<<0)
132 #define PIIX4_BRLD_EN_IRQ (1<<1)
133 #define PIIX4_BRLD_EN_IRQ8 (1<<5)
134 #define PIIX4_STOP_BREAK_MASK (PIIX4_BRLD_EN_IRQ0 | PIIX4_BRLD_EN_IRQ | PIIX4_BRLD_EN_IRQ8)
135 #define PIIX4_PCNTRL_BST_EN (1<<10)
136
137 #define CST_FFH_VENDOR_INTEL 1
138 #define CST_FFH_INTEL_CL_C1IO 1
139 #define CST_FFH_INTEL_CL_MWAIT 2
140 #define CST_FFH_MWAIT_HW_COORD 0x0001
141 #define CST_FFH_MWAIT_BM_AVOID 0x0002
142
143 #define CPUDEV_DEVICE_ID "ACPI0007"
144
145 /* Allow users to ignore processor orders in MADT. */
146 static int cpu_unordered;
147 SYSCTL_INT(_debug_acpi, OID_AUTO, cpu_unordered, CTLFLAG_RDTUN,
148 &cpu_unordered, 0,
149 "Do not use the MADT to match ACPI Processor objects to CPUs.");
150
151 /* Knob to disable acpi_cpu devices */
152 bool acpi_cpu_disabled = false;
153
154 /* Platform hardware resource information. */
155 static uint32_t cpu_smi_cmd; /* Value to write to SMI_CMD. */
156 static uint8_t cpu_cst_cnt; /* Indicate we are _CST aware. */
157 static int cpu_quirks; /* Indicate any hardware bugs. */
158
159 /* Values for sysctl. */
160 static struct sysctl_ctx_list cpu_sysctl_ctx;
161 static struct sysctl_oid *cpu_sysctl_tree;
162 static int cpu_cx_generic;
163 static int cpu_cx_lowest_lim;
164
165 static device_t *cpu_devices;
166 static int cpu_ndevices;
167 static struct acpi_cpu_softc **cpu_softc;
168 ACPI_SERIAL_DECL(cpu, "ACPI CPU");
169
170 static int acpi_cpu_probe(device_t dev);
171 static int acpi_cpu_attach(device_t dev);
172 static int acpi_cpu_suspend(device_t dev);
173 static int acpi_cpu_resume(device_t dev);
174 static int acpi_pcpu_get_id(device_t dev, uint32_t *acpi_id,
175 uint32_t *cpu_id);
176 static struct resource_list *acpi_cpu_get_rlist(device_t dev, device_t child);
177 static device_t acpi_cpu_add_child(device_t dev, u_int order, const char *name,
178 int unit);
179 static int acpi_cpu_read_ivar(device_t dev, device_t child, int index,
180 uintptr_t *result);
181 static int acpi_cpu_shutdown(device_t dev);
182 static void acpi_cpu_cx_probe(struct acpi_cpu_softc *sc);
183 static void acpi_cpu_generic_cx_probe(struct acpi_cpu_softc *sc);
184 static int acpi_cpu_cx_cst(struct acpi_cpu_softc *sc);
185 static void acpi_cpu_startup(void *arg);
186 static void acpi_cpu_startup_cx(struct acpi_cpu_softc *sc);
187 static void acpi_cpu_cx_list(struct acpi_cpu_softc *sc);
188 #if defined(__i386__) || defined(__amd64__)
189 static void acpi_cpu_idle(sbintime_t sbt);
190 #endif
191 static void acpi_cpu_notify(ACPI_HANDLE h, UINT32 notify, void *context);
192 static void acpi_cpu_quirks(void);
193 static void acpi_cpu_quirks_piix4(void);
194 static int acpi_cpu_usage_sysctl(SYSCTL_HANDLER_ARGS);
195 static int acpi_cpu_usage_counters_sysctl(SYSCTL_HANDLER_ARGS);
196 static int acpi_cpu_set_cx_lowest(struct acpi_cpu_softc *sc);
197 static int acpi_cpu_cx_lowest_sysctl(SYSCTL_HANDLER_ARGS);
198 static int acpi_cpu_global_cx_lowest_sysctl(SYSCTL_HANDLER_ARGS);
199 #if defined(__i386__) || defined(__amd64__)
200 static int acpi_cpu_method_sysctl(SYSCTL_HANDLER_ARGS);
201 #endif
202
203 static device_method_t acpi_cpu_methods[] = {
204 /* Device interface */
205 DEVMETHOD(device_probe, acpi_cpu_probe),
206 DEVMETHOD(device_attach, acpi_cpu_attach),
207 DEVMETHOD(device_detach, bus_generic_detach),
208 DEVMETHOD(device_shutdown, acpi_cpu_shutdown),
209 DEVMETHOD(device_suspend, acpi_cpu_suspend),
210 DEVMETHOD(device_resume, acpi_cpu_resume),
211
212 /* Bus interface */
213 DEVMETHOD(bus_add_child, acpi_cpu_add_child),
214 DEVMETHOD(bus_read_ivar, acpi_cpu_read_ivar),
215 DEVMETHOD(bus_get_resource_list, acpi_cpu_get_rlist),
216 DEVMETHOD(bus_get_resource, bus_generic_rl_get_resource),
217 DEVMETHOD(bus_set_resource, bus_generic_rl_set_resource),
218 DEVMETHOD(bus_alloc_resource, bus_generic_rl_alloc_resource),
219 DEVMETHOD(bus_release_resource, bus_generic_rl_release_resource),
220 DEVMETHOD(bus_activate_resource, bus_generic_activate_resource),
221 DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource),
222 DEVMETHOD(bus_setup_intr, bus_generic_setup_intr),
223 DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr),
224
225 DEVMETHOD_END
226 };
227
228 static driver_t acpi_cpu_driver = {
229 "cpu",
230 acpi_cpu_methods,
231 sizeof(struct acpi_cpu_softc),
232 };
233
234 static devclass_t acpi_cpu_devclass;
235 DRIVER_MODULE(cpu, acpi, acpi_cpu_driver, acpi_cpu_devclass, 0, 0);
236 MODULE_DEPEND(cpu, acpi, 1, 1, 1);
237
238 static int
239 acpi_cpu_probe(device_t dev)
240 {
241 static char *cpudev_ids[] = { CPUDEV_DEVICE_ID, NULL };
242 int acpi_id, cpu_id;
243 ACPI_BUFFER buf;
244 ACPI_HANDLE handle;
245 ACPI_OBJECT *obj;
246 ACPI_STATUS status;
247 ACPI_OBJECT_TYPE type;
248
249 if (acpi_disabled("cpu") || acpi_cpu_disabled)
250 return (ENXIO);
251 type = acpi_get_type(dev);
252 if (type != ACPI_TYPE_PROCESSOR && type != ACPI_TYPE_DEVICE)
253 return (ENXIO);
254 if (type == ACPI_TYPE_DEVICE &&
255 ACPI_ID_PROBE(device_get_parent(dev), dev, cpudev_ids) == NULL)
256 return (ENXIO);
257
258 handle = acpi_get_handle(dev);
259 if (cpu_softc == NULL)
260 cpu_softc = malloc(sizeof(struct acpi_cpu_softc *) *
261 (mp_maxid + 1), M_TEMP /* XXX */, M_WAITOK | M_ZERO);
262
263 if (type == ACPI_TYPE_PROCESSOR) {
264 /* Get our Processor object. */
265 buf.Pointer = NULL;
266 buf.Length = ACPI_ALLOCATE_BUFFER;
267 status = AcpiEvaluateObject(handle, NULL, NULL, &buf);
268 if (ACPI_FAILURE(status)) {
269 device_printf(dev, "probe failed to get Processor obj - %s\n",
270 AcpiFormatException(status));
271 return (ENXIO);
272 }
273 obj = (ACPI_OBJECT *)buf.Pointer;
274 if (obj->Type != ACPI_TYPE_PROCESSOR) {
275 device_printf(dev, "Processor object has bad type %d\n",
276 obj->Type);
277 AcpiOsFree(obj);
278 return (ENXIO);
279 }
280
281 /*
282 * Find the processor associated with our unit. We could use the
283 * ProcId as a key, however, some boxes do not have the same values
284 * in their Processor object as the ProcId values in the MADT.
285 */
286 acpi_id = obj->Processor.ProcId;
287 AcpiOsFree(obj);
288 } else {
289 status = acpi_GetInteger(handle, "_UID", &acpi_id);
290 if (ACPI_FAILURE(status)) {
291 device_printf(dev, "Device object has bad value - %s\n",
292 AcpiFormatException(status));
293 return (ENXIO);
294 }
295 }
296 if (acpi_pcpu_get_id(dev, &acpi_id, &cpu_id) != 0)
297 return (ENXIO);
298
299 /*
300 * Check if we already probed this processor. We scan the bus twice
301 * so it's possible we've already seen this one.
302 */
303 if (cpu_softc[cpu_id] != NULL)
304 return (ENXIO);
305
306 /* Mark this processor as in-use and save our derived id for attach. */
307 cpu_softc[cpu_id] = (void *)1;
308 acpi_set_private(dev, (void*)(intptr_t)cpu_id);
309 device_set_desc(dev, "ACPI CPU");
310
311 if (!bootverbose && device_get_unit(dev) != 0) {
312 device_quiet(dev);
313 device_quiet_children(dev);
314 }
315
316 return (0);
317 }
318
319 static int
320 acpi_cpu_attach(device_t dev)
321 {
322 ACPI_BUFFER buf;
323 ACPI_OBJECT arg, *obj;
324 ACPI_OBJECT_LIST arglist;
325 struct pcpu *pcpu_data;
326 struct acpi_cpu_softc *sc;
327 struct acpi_softc *acpi_sc;
328 ACPI_STATUS status;
329 u_int features;
330 int cpu_id, drv_count, i;
331 driver_t **drivers;
332 uint32_t cap_set[3];
333
334 /* UUID needed by _OSC evaluation */
335 static uint8_t cpu_oscuuid[16] = { 0x16, 0xA6, 0x77, 0x40, 0x0C, 0x29,
336 0xBE, 0x47, 0x9E, 0xBD, 0xD8, 0x70,
337 0x58, 0x71, 0x39, 0x53 };
338
339 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
340
341 sc = device_get_softc(dev);
342 sc->cpu_dev = dev;
343 sc->cpu_handle = acpi_get_handle(dev);
344 cpu_id = (int)(intptr_t)acpi_get_private(dev);
345 cpu_softc[cpu_id] = sc;
346 pcpu_data = pcpu_find(cpu_id);
347 pcpu_data->pc_device = dev;
348 sc->cpu_pcpu = pcpu_data;
349 cpu_smi_cmd = AcpiGbl_FADT.SmiCommand;
350 cpu_cst_cnt = AcpiGbl_FADT.CstControl;
351
352 if (acpi_get_type(dev) == ACPI_TYPE_PROCESSOR) {
353 buf.Pointer = NULL;
354 buf.Length = ACPI_ALLOCATE_BUFFER;
355 status = AcpiEvaluateObject(sc->cpu_handle, NULL, NULL, &buf);
356 if (ACPI_FAILURE(status)) {
357 device_printf(dev, "attach failed to get Processor obj - %s\n",
358 AcpiFormatException(status));
359 return (ENXIO);
360 }
361 obj = (ACPI_OBJECT *)buf.Pointer;
362 sc->cpu_p_blk = obj->Processor.PblkAddress;
363 sc->cpu_p_blk_len = obj->Processor.PblkLength;
364 sc->cpu_acpi_id = obj->Processor.ProcId;
365 AcpiOsFree(obj);
366 } else {
367 KASSERT(acpi_get_type(dev) == ACPI_TYPE_DEVICE,
368 ("Unexpected ACPI object"));
369 status = acpi_GetInteger(sc->cpu_handle, "_UID", &sc->cpu_acpi_id);
370 if (ACPI_FAILURE(status)) {
371 device_printf(dev, "Device object has bad value - %s\n",
372 AcpiFormatException(status));
373 return (ENXIO);
374 }
375 sc->cpu_p_blk = 0;
376 sc->cpu_p_blk_len = 0;
377 }
378 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "acpi_cpu%d: P_BLK at %#x/%d\n",
379 device_get_unit(dev), sc->cpu_p_blk, sc->cpu_p_blk_len));
380
381 /*
382 * If this is the first cpu we attach, create and initialize the generic
383 * resources that will be used by all acpi cpu devices.
384 */
385 if (device_get_unit(dev) == 0) {
386 /* Assume we won't be using generic Cx mode by default */
387 cpu_cx_generic = FALSE;
388
389 /* Install hw.acpi.cpu sysctl tree */
390 acpi_sc = acpi_device_get_parent_softc(dev);
391 sysctl_ctx_init(&cpu_sysctl_ctx);
392 cpu_sysctl_tree = SYSCTL_ADD_NODE(&cpu_sysctl_ctx,
393 SYSCTL_CHILDREN(acpi_sc->acpi_sysctl_tree), OID_AUTO, "cpu",
394 CTLFLAG_RD, 0, "node for CPU children");
395 }
396
397 /*
398 * Before calling any CPU methods, collect child driver feature hints
399 * and notify ACPI of them. We support unified SMP power control
400 * so advertise this ourselves. Note this is not the same as independent
401 * SMP control where each CPU can have different settings.
402 */
403 sc->cpu_features = ACPI_CAP_SMP_SAME | ACPI_CAP_SMP_SAME_C3 |
404 ACPI_CAP_C1_IO_HALT;
405
406 #if defined(__i386__) || defined(__amd64__)
407 /*
408 * Ask for MWAIT modes if not disabled and interrupts work
409 * reasonable with MWAIT.
410 */
411 if (!acpi_disabled("mwait") && cpu_mwait_usable())
412 sc->cpu_features |= ACPI_CAP_SMP_C1_NATIVE | ACPI_CAP_SMP_C3_NATIVE;
413 #endif
414
415 if (devclass_get_drivers(acpi_cpu_devclass, &drivers, &drv_count) == 0) {
416 for (i = 0; i < drv_count; i++) {
417 if (ACPI_GET_FEATURES(drivers[i], &features) == 0)
418 sc->cpu_features |= features;
419 }
420 free(drivers, M_TEMP);
421 }
422
423 /*
424 * CPU capabilities are specified in
425 * Intel Processor Vendor-Specific ACPI Interface Specification.
426 */
427 if (sc->cpu_features) {
428 cap_set[1] = sc->cpu_features;
429 status = acpi_EvaluateOSC(sc->cpu_handle, cpu_oscuuid, 1, 2, cap_set,
430 cap_set, false);
431 if (ACPI_SUCCESS(status)) {
432 if (cap_set[0] != 0)
433 device_printf(dev, "_OSC returned status %#x\n", cap_set[0]);
434 }
435 else {
436 arglist.Pointer = &arg;
437 arglist.Count = 1;
438 arg.Type = ACPI_TYPE_BUFFER;
439 arg.Buffer.Length = sizeof(cap_set);
440 arg.Buffer.Pointer = (uint8_t *)cap_set;
441 cap_set[0] = 1; /* revision */
442 cap_set[1] = 1; /* number of capabilities integers */
443 cap_set[2] = sc->cpu_features;
444 AcpiEvaluateObject(sc->cpu_handle, "_PDC", &arglist, NULL);
445 }
446 }
447
448 /* Probe for Cx state support. */
449 acpi_cpu_cx_probe(sc);
450
451 return (0);
452 }
453
454 static void
455 acpi_cpu_postattach(void *unused __unused)
456 {
457 device_t *devices;
458 int err;
459 int i, n;
460 int attached;
461
462 err = devclass_get_devices(acpi_cpu_devclass, &devices, &n);
463 if (err != 0) {
464 printf("devclass_get_devices(acpi_cpu_devclass) failed\n");
465 return;
466 }
467 attached = 0;
468 for (i = 0; i < n; i++)
469 if (device_is_attached(devices[i]) &&
470 device_get_driver(devices[i]) == &acpi_cpu_driver)
471 attached = 1;
472 for (i = 0; i < n; i++)
473 bus_generic_probe(devices[i]);
474 for (i = 0; i < n; i++)
475 bus_generic_attach(devices[i]);
476 free(devices, M_TEMP);
477
478 if (attached) {
479 #ifdef EARLY_AP_STARTUP
480 acpi_cpu_startup(NULL);
481 #else
482 /* Queue post cpu-probing task handler */
483 AcpiOsExecute(OSL_NOTIFY_HANDLER, acpi_cpu_startup, NULL);
484 #endif
485 }
486 }
487
488 SYSINIT(acpi_cpu, SI_SUB_CONFIGURE, SI_ORDER_MIDDLE,
489 acpi_cpu_postattach, NULL);
490
491 static void
492 disable_idle(struct acpi_cpu_softc *sc)
493 {
494 cpuset_t cpuset;
495
496 CPU_SETOF(sc->cpu_pcpu->pc_cpuid, &cpuset);
497 sc->cpu_disable_idle = TRUE;
498
499 /*
500 * Ensure that the CPU is not in idle state or in acpi_cpu_idle().
501 * Note that this code depends on the fact that the rendezvous IPI
502 * can not penetrate context where interrupts are disabled and acpi_cpu_idle
503 * is called and executed in such a context with interrupts being re-enabled
504 * right before return.
505 */
506 smp_rendezvous_cpus(cpuset, smp_no_rendezvous_barrier, NULL,
507 smp_no_rendezvous_barrier, NULL);
508 }
509
510 static void
511 enable_idle(struct acpi_cpu_softc *sc)
512 {
513
514 sc->cpu_disable_idle = FALSE;
515 }
516
517 #if defined(__i386__) || defined(__amd64__)
518 static int
519 is_idle_disabled(struct acpi_cpu_softc *sc)
520 {
521
522 return (sc->cpu_disable_idle);
523 }
524 #endif
525
526 /*
527 * Disable any entry to the idle function during suspend and re-enable it
528 * during resume.
529 */
530 static int
531 acpi_cpu_suspend(device_t dev)
532 {
533 int error;
534
535 error = bus_generic_suspend(dev);
536 if (error)
537 return (error);
538 disable_idle(device_get_softc(dev));
539 return (0);
540 }
541
542 static int
543 acpi_cpu_resume(device_t dev)
544 {
545
546 enable_idle(device_get_softc(dev));
547 return (bus_generic_resume(dev));
548 }
549
550 /*
551 * Find the processor associated with a given ACPI ID. By default,
552 * use the MADT to map ACPI IDs to APIC IDs and use that to locate a
553 * processor. Some systems have inconsistent ASL and MADT however.
554 * For these systems the cpu_unordered tunable can be set in which
555 * case we assume that Processor objects are listed in the same order
556 * in both the MADT and ASL.
557 */
558 static int
559 acpi_pcpu_get_id(device_t dev, uint32_t *acpi_id, uint32_t *cpu_id)
560 {
561 struct pcpu *pc;
562 uint32_t i, idx;
563
564 KASSERT(acpi_id != NULL, ("Null acpi_id"));
565 KASSERT(cpu_id != NULL, ("Null cpu_id"));
566 idx = device_get_unit(dev);
567
568 /*
569 * If pc_acpi_id for CPU 0 is not initialized (e.g. a non-APIC
570 * UP box) use the ACPI ID from the first processor we find.
571 */
572 if (idx == 0 && mp_ncpus == 1) {
573 pc = pcpu_find(0);
574 if (pc->pc_acpi_id == 0xffffffff)
575 pc->pc_acpi_id = *acpi_id;
576 *cpu_id = 0;
577 return (0);
578 }
579
580 CPU_FOREACH(i) {
581 pc = pcpu_find(i);
582 KASSERT(pc != NULL, ("no pcpu data for %d", i));
583 if (cpu_unordered) {
584 if (idx-- == 0) {
585 /*
586 * If pc_acpi_id doesn't match the ACPI ID from the
587 * ASL, prefer the MADT-derived value.
588 */
589 if (pc->pc_acpi_id != *acpi_id)
590 *acpi_id = pc->pc_acpi_id;
591 *cpu_id = pc->pc_cpuid;
592 return (0);
593 }
594 } else {
595 if (pc->pc_acpi_id == *acpi_id) {
596 if (bootverbose)
597 device_printf(dev,
598 "Processor %s (ACPI ID %u) -> APIC ID %d\n",
599 acpi_name(acpi_get_handle(dev)), *acpi_id,
600 pc->pc_cpuid);
601 *cpu_id = pc->pc_cpuid;
602 return (0);
603 }
604 }
605 }
606
607 if (bootverbose)
608 printf("ACPI: Processor %s (ACPI ID %u) ignored\n",
609 acpi_name(acpi_get_handle(dev)), *acpi_id);
610
611 return (ESRCH);
612 }
613
614 static struct resource_list *
615 acpi_cpu_get_rlist(device_t dev, device_t child)
616 {
617 struct acpi_cpu_device *ad;
618
619 ad = device_get_ivars(child);
620 if (ad == NULL)
621 return (NULL);
622 return (&ad->ad_rl);
623 }
624
625 static device_t
626 acpi_cpu_add_child(device_t dev, u_int order, const char *name, int unit)
627 {
628 struct acpi_cpu_device *ad;
629 device_t child;
630
631 if ((ad = malloc(sizeof(*ad), M_TEMP, M_NOWAIT | M_ZERO)) == NULL)
632 return (NULL);
633
634 resource_list_init(&ad->ad_rl);
635
636 child = device_add_child_ordered(dev, order, name, unit);
637 if (child != NULL)
638 device_set_ivars(child, ad);
639 else
640 free(ad, M_TEMP);
641 return (child);
642 }
643
644 static int
645 acpi_cpu_read_ivar(device_t dev, device_t child, int index, uintptr_t *result)
646 {
647 struct acpi_cpu_softc *sc;
648
649 sc = device_get_softc(dev);
650 switch (index) {
651 case ACPI_IVAR_HANDLE:
652 *result = (uintptr_t)sc->cpu_handle;
653 break;
654 case CPU_IVAR_PCPU:
655 *result = (uintptr_t)sc->cpu_pcpu;
656 break;
657 #if defined(__amd64__) || defined(__i386__)
658 case CPU_IVAR_NOMINAL_MHZ:
659 if (tsc_is_invariant) {
660 *result = (uintptr_t)(atomic_load_acq_64(&tsc_freq) / 1000000);
661 break;
662 }
663 /* FALLTHROUGH */
664 #endif
665 default:
666 return (ENOENT);
667 }
668 return (0);
669 }
670
671 static int
672 acpi_cpu_shutdown(device_t dev)
673 {
674 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
675
676 /* Allow children to shutdown first. */
677 bus_generic_shutdown(dev);
678
679 /*
680 * Disable any entry to the idle function.
681 */
682 disable_idle(device_get_softc(dev));
683
684 /*
685 * CPU devices are not truly detached and remain referenced,
686 * so their resources are not freed.
687 */
688
689 return_VALUE (0);
690 }
691
692 static void
693 acpi_cpu_cx_probe(struct acpi_cpu_softc *sc)
694 {
695 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
696
697 /* Use initial sleep value of 1 sec. to start with lowest idle state. */
698 sc->cpu_prev_sleep = 1000000;
699 sc->cpu_cx_lowest = 0;
700 sc->cpu_cx_lowest_lim = 0;
701
702 /*
703 * Check for the ACPI 2.0 _CST sleep states object. If we can't find
704 * any, we'll revert to generic FADT/P_BLK Cx control method which will
705 * be handled by acpi_cpu_startup. We need to defer to after having
706 * probed all the cpus in the system before probing for generic Cx
707 * states as we may already have found cpus with valid _CST packages
708 */
709 if (!cpu_cx_generic && acpi_cpu_cx_cst(sc) != 0) {
710 /*
711 * We were unable to find a _CST package for this cpu or there
712 * was an error parsing it. Switch back to generic mode.
713 */
714 cpu_cx_generic = TRUE;
715 if (bootverbose)
716 device_printf(sc->cpu_dev, "switching to generic Cx mode\n");
717 }
718
719 /*
720 * TODO: _CSD Package should be checked here.
721 */
722 }
723
724 static void
725 acpi_cpu_generic_cx_probe(struct acpi_cpu_softc *sc)
726 {
727 ACPI_GENERIC_ADDRESS gas;
728 struct acpi_cx *cx_ptr;
729
730 sc->cpu_cx_count = 0;
731 cx_ptr = sc->cpu_cx_states;
732
733 /* Use initial sleep value of 1 sec. to start with lowest idle state. */
734 sc->cpu_prev_sleep = 1000000;
735
736 /* C1 has been required since just after ACPI 1.0 */
737 cx_ptr->type = ACPI_STATE_C1;
738 cx_ptr->trans_lat = 0;
739 cx_ptr++;
740 sc->cpu_non_c2 = sc->cpu_cx_count;
741 sc->cpu_non_c3 = sc->cpu_cx_count;
742 sc->cpu_cx_count++;
743
744 /*
745 * The spec says P_BLK must be 6 bytes long. However, some systems
746 * use it to indicate a fractional set of features present so we
747 * take 5 as C2. Some may also have a value of 7 to indicate
748 * another C3 but most use _CST for this (as required) and having
749 * "only" C1-C3 is not a hardship.
750 */
751 if (sc->cpu_p_blk_len < 5)
752 return;
753
754 /* Validate and allocate resources for C2 (P_LVL2). */
755 gas.SpaceId = ACPI_ADR_SPACE_SYSTEM_IO;
756 gas.BitWidth = 8;
757 if (AcpiGbl_FADT.C2Latency <= 100) {
758 gas.Address = sc->cpu_p_blk + 4;
759 cx_ptr->res_rid = 0;
760 acpi_bus_alloc_gas(sc->cpu_dev, &cx_ptr->res_type, &cx_ptr->res_rid,
761 &gas, &cx_ptr->p_lvlx, RF_SHAREABLE);
762 if (cx_ptr->p_lvlx != NULL) {
763 cx_ptr->type = ACPI_STATE_C2;
764 cx_ptr->trans_lat = AcpiGbl_FADT.C2Latency;
765 cx_ptr++;
766 sc->cpu_non_c3 = sc->cpu_cx_count;
767 sc->cpu_cx_count++;
768 }
769 }
770 if (sc->cpu_p_blk_len < 6)
771 return;
772
773 /* Validate and allocate resources for C3 (P_LVL3). */
774 if (AcpiGbl_FADT.C3Latency <= 1000 && !(cpu_quirks & CPU_QUIRK_NO_C3)) {
775 gas.Address = sc->cpu_p_blk + 5;
776 cx_ptr->res_rid = 1;
777 acpi_bus_alloc_gas(sc->cpu_dev, &cx_ptr->res_type, &cx_ptr->res_rid,
778 &gas, &cx_ptr->p_lvlx, RF_SHAREABLE);
779 if (cx_ptr->p_lvlx != NULL) {
780 cx_ptr->type = ACPI_STATE_C3;
781 cx_ptr->trans_lat = AcpiGbl_FADT.C3Latency;
782 cx_ptr++;
783 sc->cpu_cx_count++;
784 }
785 }
786 }
787
788 #if defined(__i386__) || defined(__amd64__)
789 static void
790 acpi_cpu_cx_cst_mwait(struct acpi_cx *cx_ptr, uint64_t address, int accsize)
791 {
792
793 cx_ptr->do_mwait = true;
794 cx_ptr->mwait_hint = address & 0xffffffff;
795 cx_ptr->mwait_hw_coord = (accsize & CST_FFH_MWAIT_HW_COORD) != 0;
796 cx_ptr->mwait_bm_avoidance = (accsize & CST_FFH_MWAIT_BM_AVOID) != 0;
797 }
798 #endif
799
800 static void
801 acpi_cpu_cx_cst_free_plvlx(device_t cpu_dev, struct acpi_cx *cx_ptr)
802 {
803
804 if (cx_ptr->p_lvlx == NULL)
805 return;
806 bus_release_resource(cpu_dev, cx_ptr->res_type, cx_ptr->res_rid,
807 cx_ptr->p_lvlx);
808 cx_ptr->p_lvlx = NULL;
809 }
810
811 /*
812 * Parse a _CST package and set up its Cx states. Since the _CST object
813 * can change dynamically, our notify handler may call this function
814 * to clean up and probe the new _CST package.
815 */
816 static int
817 acpi_cpu_cx_cst(struct acpi_cpu_softc *sc)
818 {
819 struct acpi_cx *cx_ptr;
820 ACPI_STATUS status;
821 ACPI_BUFFER buf;
822 ACPI_OBJECT *top;
823 ACPI_OBJECT *pkg;
824 uint32_t count;
825 int i;
826 #if defined(__i386__) || defined(__amd64__)
827 uint64_t address;
828 int vendor, class, accsize;
829 #endif
830
831 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
832
833 buf.Pointer = NULL;
834 buf.Length = ACPI_ALLOCATE_BUFFER;
835 status = AcpiEvaluateObject(sc->cpu_handle, "_CST", NULL, &buf);
836 if (ACPI_FAILURE(status))
837 return (ENXIO);
838
839 /* _CST is a package with a count and at least one Cx package. */
840 top = (ACPI_OBJECT *)buf.Pointer;
841 if (!ACPI_PKG_VALID(top, 2) || acpi_PkgInt32(top, 0, &count) != 0) {
842 device_printf(sc->cpu_dev, "invalid _CST package\n");
843 AcpiOsFree(buf.Pointer);
844 return (ENXIO);
845 }
846 if (count != top->Package.Count - 1) {
847 device_printf(sc->cpu_dev, "invalid _CST state count (%d != %d)\n",
848 count, top->Package.Count - 1);
849 count = top->Package.Count - 1;
850 }
851 if (count > MAX_CX_STATES) {
852 device_printf(sc->cpu_dev, "_CST has too many states (%d)\n", count);
853 count = MAX_CX_STATES;
854 }
855
856 sc->cpu_non_c2 = 0;
857 sc->cpu_non_c3 = 0;
858 sc->cpu_cx_count = 0;
859 cx_ptr = sc->cpu_cx_states;
860
861 /*
862 * C1 has been required since just after ACPI 1.0.
863 * Reserve the first slot for it.
864 */
865 cx_ptr->type = ACPI_STATE_C0;
866 cx_ptr++;
867 sc->cpu_cx_count++;
868
869 /* Set up all valid states. */
870 for (i = 0; i < count; i++) {
871 pkg = &top->Package.Elements[i + 1];
872 if (!ACPI_PKG_VALID(pkg, 4) ||
873 acpi_PkgInt32(pkg, 1, &cx_ptr->type) != 0 ||
874 acpi_PkgInt32(pkg, 2, &cx_ptr->trans_lat) != 0 ||
875 acpi_PkgInt32(pkg, 3, &cx_ptr->power) != 0) {
876
877 device_printf(sc->cpu_dev, "skipping invalid Cx state package\n");
878 continue;
879 }
880
881 /* Validate the state to see if we should use it. */
882 switch (cx_ptr->type) {
883 case ACPI_STATE_C1:
884 acpi_cpu_cx_cst_free_plvlx(sc->cpu_dev, cx_ptr);
885 #if defined(__i386__) || defined(__amd64__)
886 if (acpi_PkgFFH_IntelCpu(pkg, 0, &vendor, &class, &address,
887 &accsize) == 0 && vendor == CST_FFH_VENDOR_INTEL) {
888 if (class == CST_FFH_INTEL_CL_C1IO) {
889 /* C1 I/O then Halt */
890 cx_ptr->res_rid = sc->cpu_cx_count;
891 bus_set_resource(sc->cpu_dev, SYS_RES_IOPORT,
892 cx_ptr->res_rid, address, 1);
893 cx_ptr->p_lvlx = bus_alloc_resource_any(sc->cpu_dev,
894 SYS_RES_IOPORT, &cx_ptr->res_rid, RF_ACTIVE |
895 RF_SHAREABLE);
896 if (cx_ptr->p_lvlx == NULL) {
897 bus_delete_resource(sc->cpu_dev, SYS_RES_IOPORT,
898 cx_ptr->res_rid);
899 device_printf(sc->cpu_dev,
900 "C1 I/O failed to allocate port %d, "
901 "degrading to C1 Halt", (int)address);
902 }
903 } else if (class == CST_FFH_INTEL_CL_MWAIT) {
904 acpi_cpu_cx_cst_mwait(cx_ptr, address, accsize);
905 }
906 }
907 #endif
908 if (sc->cpu_cx_states[0].type == ACPI_STATE_C0) {
909 /* This is the first C1 state. Use the reserved slot. */
910 sc->cpu_cx_states[0] = *cx_ptr;
911 } else {
912 sc->cpu_non_c2 = sc->cpu_cx_count;
913 sc->cpu_non_c3 = sc->cpu_cx_count;
914 cx_ptr++;
915 sc->cpu_cx_count++;
916 }
917 continue;
918 case ACPI_STATE_C2:
919 sc->cpu_non_c3 = sc->cpu_cx_count;
920 break;
921 case ACPI_STATE_C3:
922 default:
923 if ((cpu_quirks & CPU_QUIRK_NO_C3) != 0) {
924 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
925 "acpi_cpu%d: C3[%d] not available.\n",
926 device_get_unit(sc->cpu_dev), i));
927 continue;
928 }
929 break;
930 }
931
932 /* Free up any previous register. */
933 acpi_cpu_cx_cst_free_plvlx(sc->cpu_dev, cx_ptr);
934
935 /* Allocate the control register for C2 or C3. */
936 #if defined(__i386__) || defined(__amd64__)
937 if (acpi_PkgFFH_IntelCpu(pkg, 0, &vendor, &class, &address,
938 &accsize) == 0 && vendor == CST_FFH_VENDOR_INTEL &&
939 class == CST_FFH_INTEL_CL_MWAIT) {
940 /* Native C State Instruction use (mwait) */
941 acpi_cpu_cx_cst_mwait(cx_ptr, address, accsize);
942 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
943 "acpi_cpu%d: Got C%d/mwait - %d latency\n",
944 device_get_unit(sc->cpu_dev), cx_ptr->type, cx_ptr->trans_lat));
945 cx_ptr++;
946 sc->cpu_cx_count++;
947 } else
948 #endif
949 {
950 cx_ptr->res_rid = sc->cpu_cx_count;
951 acpi_PkgGas(sc->cpu_dev, pkg, 0, &cx_ptr->res_type,
952 &cx_ptr->res_rid, &cx_ptr->p_lvlx, RF_SHAREABLE);
953 if (cx_ptr->p_lvlx) {
954 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
955 "acpi_cpu%d: Got C%d - %d latency\n",
956 device_get_unit(sc->cpu_dev), cx_ptr->type,
957 cx_ptr->trans_lat));
958 cx_ptr++;
959 sc->cpu_cx_count++;
960 }
961 }
962 }
963 AcpiOsFree(buf.Pointer);
964
965 /* If C1 state was not found, we need one now. */
966 cx_ptr = sc->cpu_cx_states;
967 if (cx_ptr->type == ACPI_STATE_C0) {
968 cx_ptr->type = ACPI_STATE_C1;
969 cx_ptr->trans_lat = 0;
970 }
971
972 return (0);
973 }
974
975 /*
976 * Call this *after* all CPUs have been attached.
977 */
978 static void
979 acpi_cpu_startup(void *arg)
980 {
981 struct acpi_cpu_softc *sc;
982 int i;
983
984 /* Get set of CPU devices */
985 devclass_get_devices(acpi_cpu_devclass, &cpu_devices, &cpu_ndevices);
986
987 /*
988 * Setup any quirks that might necessary now that we have probed
989 * all the CPUs
990 */
991 acpi_cpu_quirks();
992
993 if (cpu_cx_generic) {
994 /*
995 * We are using generic Cx mode, probe for available Cx states
996 * for all processors.
997 */
998 for (i = 0; i < cpu_ndevices; i++) {
999 sc = device_get_softc(cpu_devices[i]);
1000 acpi_cpu_generic_cx_probe(sc);
1001 }
1002 } else {
1003 /*
1004 * We are using _CST mode, remove C3 state if necessary.
1005 * As we now know for sure that we will be using _CST mode
1006 * install our notify handler.
1007 */
1008 for (i = 0; i < cpu_ndevices; i++) {
1009 sc = device_get_softc(cpu_devices[i]);
1010 if (cpu_quirks & CPU_QUIRK_NO_C3) {
1011 sc->cpu_cx_count = min(sc->cpu_cx_count, sc->cpu_non_c3 + 1);
1012 }
1013 AcpiInstallNotifyHandler(sc->cpu_handle, ACPI_DEVICE_NOTIFY,
1014 acpi_cpu_notify, sc);
1015 }
1016 }
1017
1018 /* Perform Cx final initialization. */
1019 for (i = 0; i < cpu_ndevices; i++) {
1020 sc = device_get_softc(cpu_devices[i]);
1021 acpi_cpu_startup_cx(sc);
1022 }
1023
1024 /* Add a sysctl handler to handle global Cx lowest setting */
1025 SYSCTL_ADD_PROC(&cpu_sysctl_ctx, SYSCTL_CHILDREN(cpu_sysctl_tree),
1026 OID_AUTO, "cx_lowest", CTLTYPE_STRING | CTLFLAG_RW,
1027 NULL, 0, acpi_cpu_global_cx_lowest_sysctl, "A",
1028 "Global lowest Cx sleep state to use");
1029
1030 /* Take over idling from cpu_idle_default(). */
1031 cpu_cx_lowest_lim = 0;
1032 for (i = 0; i < cpu_ndevices; i++) {
1033 sc = device_get_softc(cpu_devices[i]);
1034 enable_idle(sc);
1035 }
1036 #if defined(__i386__) || defined(__amd64__)
1037 cpu_idle_hook = acpi_cpu_idle;
1038 #endif
1039 }
1040
1041 static void
1042 acpi_cpu_cx_list(struct acpi_cpu_softc *sc)
1043 {
1044 struct sbuf sb;
1045 int i;
1046
1047 /*
1048 * Set up the list of Cx states
1049 */
1050 sbuf_new(&sb, sc->cpu_cx_supported, sizeof(sc->cpu_cx_supported),
1051 SBUF_FIXEDLEN);
1052 for (i = 0; i < sc->cpu_cx_count; i++)
1053 sbuf_printf(&sb, "C%d/%d/%d ", i + 1, sc->cpu_cx_states[i].type,
1054 sc->cpu_cx_states[i].trans_lat);
1055 sbuf_trim(&sb);
1056 sbuf_finish(&sb);
1057 }
1058
1059 static void
1060 acpi_cpu_startup_cx(struct acpi_cpu_softc *sc)
1061 {
1062 acpi_cpu_cx_list(sc);
1063
1064 SYSCTL_ADD_STRING(&sc->cpu_sysctl_ctx,
1065 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->cpu_dev)),
1066 OID_AUTO, "cx_supported", CTLFLAG_RD,
1067 sc->cpu_cx_supported, 0,
1068 "Cx/microsecond values for supported Cx states");
1069 SYSCTL_ADD_PROC(&sc->cpu_sysctl_ctx,
1070 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->cpu_dev)),
1071 OID_AUTO, "cx_lowest", CTLTYPE_STRING | CTLFLAG_RW,
1072 (void *)sc, 0, acpi_cpu_cx_lowest_sysctl, "A",
1073 "lowest Cx sleep state to use");
1074 SYSCTL_ADD_PROC(&sc->cpu_sysctl_ctx,
1075 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->cpu_dev)),
1076 OID_AUTO, "cx_usage", CTLTYPE_STRING | CTLFLAG_RD,
1077 (void *)sc, 0, acpi_cpu_usage_sysctl, "A",
1078 "percent usage for each Cx state");
1079 SYSCTL_ADD_PROC(&sc->cpu_sysctl_ctx,
1080 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->cpu_dev)),
1081 OID_AUTO, "cx_usage_counters", CTLTYPE_STRING | CTLFLAG_RD,
1082 (void *)sc, 0, acpi_cpu_usage_counters_sysctl, "A",
1083 "Cx sleep state counters");
1084 #if defined(__i386__) || defined(__amd64__)
1085 SYSCTL_ADD_PROC(&sc->cpu_sysctl_ctx,
1086 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->cpu_dev)),
1087 OID_AUTO, "cx_method", CTLTYPE_STRING | CTLFLAG_RD,
1088 (void *)sc, 0, acpi_cpu_method_sysctl, "A",
1089 "Cx entrance methods");
1090 #endif
1091
1092 /* Signal platform that we can handle _CST notification. */
1093 if (!cpu_cx_generic && cpu_cst_cnt != 0) {
1094 ACPI_LOCK(acpi);
1095 AcpiOsWritePort(cpu_smi_cmd, cpu_cst_cnt, 8);
1096 ACPI_UNLOCK(acpi);
1097 }
1098 }
1099
1100 #if defined(__i386__) || defined(__amd64__)
1101 /*
1102 * Idle the CPU in the lowest state possible. This function is called with
1103 * interrupts disabled. Note that once it re-enables interrupts, a task
1104 * switch can occur so do not access shared data (i.e. the softc) after
1105 * interrupts are re-enabled.
1106 */
1107 static void
1108 acpi_cpu_idle(sbintime_t sbt)
1109 {
1110 struct acpi_cpu_softc *sc;
1111 struct acpi_cx *cx_next;
1112 uint64_t cputicks;
1113 uint32_t start_time, end_time;
1114 ACPI_STATUS status;
1115 int bm_active, cx_next_idx, i, us;
1116
1117 /*
1118 * Look up our CPU id to get our softc. If it's NULL, we'll use C1
1119 * since there is no ACPI processor object for this CPU. This occurs
1120 * for logical CPUs in the HTT case.
1121 */
1122 sc = cpu_softc[PCPU_GET(cpuid)];
1123 if (sc == NULL) {
1124 acpi_cpu_c1();
1125 return;
1126 }
1127
1128 /* If disabled, take the safe path. */
1129 if (is_idle_disabled(sc)) {
1130 acpi_cpu_c1();
1131 return;
1132 }
1133
1134 /* Find the lowest state that has small enough latency. */
1135 us = sc->cpu_prev_sleep;
1136 if (sbt >= 0 && us > (sbt >> 12))
1137 us = (sbt >> 12);
1138 cx_next_idx = 0;
1139 if (cpu_disable_c2_sleep)
1140 i = min(sc->cpu_cx_lowest, sc->cpu_non_c2);
1141 else if (cpu_disable_c3_sleep)
1142 i = min(sc->cpu_cx_lowest, sc->cpu_non_c3);
1143 else
1144 i = sc->cpu_cx_lowest;
1145 for (; i >= 0; i--) {
1146 if (sc->cpu_cx_states[i].trans_lat * 3 <= us) {
1147 cx_next_idx = i;
1148 break;
1149 }
1150 }
1151
1152 /*
1153 * Check for bus master activity. If there was activity, clear
1154 * the bit and use the lowest non-C3 state. Note that the USB
1155 * driver polling for new devices keeps this bit set all the
1156 * time if USB is loaded.
1157 */
1158 if ((cpu_quirks & CPU_QUIRK_NO_BM_CTRL) == 0 &&
1159 cx_next_idx > sc->cpu_non_c3) {
1160 status = AcpiReadBitRegister(ACPI_BITREG_BUS_MASTER_STATUS, &bm_active);
1161 if (ACPI_SUCCESS(status) && bm_active != 0) {
1162 AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_STATUS, 1);
1163 cx_next_idx = sc->cpu_non_c3;
1164 }
1165 }
1166
1167 /* Select the next state and update statistics. */
1168 cx_next = &sc->cpu_cx_states[cx_next_idx];
1169 sc->cpu_cx_stats[cx_next_idx]++;
1170 KASSERT(cx_next->type != ACPI_STATE_C0, ("acpi_cpu_idle: C0 sleep"));
1171
1172 /*
1173 * Execute HLT (or equivalent) and wait for an interrupt. We can't
1174 * precisely calculate the time spent in C1 since the place we wake up
1175 * is an ISR. Assume we slept no more then half of quantum, unless
1176 * we are called inside critical section, delaying context switch.
1177 */
1178 if (cx_next->type == ACPI_STATE_C1) {
1179 cputicks = cpu_ticks();
1180 if (cx_next->p_lvlx != NULL) {
1181 /* C1 I/O then Halt */
1182 CPU_GET_REG(cx_next->p_lvlx, 1);
1183 }
1184 if (cx_next->do_mwait)
1185 acpi_cpu_idle_mwait(cx_next->mwait_hint);
1186 else
1187 acpi_cpu_c1();
1188 end_time = ((cpu_ticks() - cputicks) << 20) / cpu_tickrate();
1189 if (curthread->td_critnest == 0)
1190 end_time = min(end_time, 500000 / hz);
1191 /* acpi_cpu_c1() returns with interrupts enabled. */
1192 if (cx_next->do_mwait)
1193 ACPI_ENABLE_IRQS();
1194 sc->cpu_prev_sleep = (sc->cpu_prev_sleep * 3 + end_time) / 4;
1195 return;
1196 }
1197
1198 /*
1199 * For C3, disable bus master arbitration and enable bus master wake
1200 * if BM control is available, otherwise flush the CPU cache.
1201 */
1202 if (cx_next->type == ACPI_STATE_C3 || cx_next->mwait_bm_avoidance) {
1203 if ((cpu_quirks & CPU_QUIRK_NO_BM_CTRL) == 0) {
1204 AcpiWriteBitRegister(ACPI_BITREG_ARB_DISABLE, 1);
1205 AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_RLD, 1);
1206 } else
1207 ACPI_FLUSH_CPU_CACHE();
1208 }
1209
1210 /*
1211 * Read from P_LVLx to enter C2(+), checking time spent asleep.
1212 * Use the ACPI timer for measuring sleep time. Since we need to
1213 * get the time very close to the CPU start/stop clock logic, this
1214 * is the only reliable time source.
1215 */
1216 if (cx_next->type == ACPI_STATE_C3) {
1217 AcpiGetTimer(&start_time);
1218 cputicks = 0;
1219 } else {
1220 start_time = 0;
1221 cputicks = cpu_ticks();
1222 }
1223 if (cx_next->do_mwait)
1224 acpi_cpu_idle_mwait(cx_next->mwait_hint);
1225 else
1226 CPU_GET_REG(cx_next->p_lvlx, 1);
1227
1228 /*
1229 * Read the end time twice. Since it may take an arbitrary time
1230 * to enter the idle state, the first read may be executed before
1231 * the processor has stopped. Doing it again provides enough
1232 * margin that we are certain to have a correct value.
1233 */
1234 AcpiGetTimer(&end_time);
1235 if (cx_next->type == ACPI_STATE_C3) {
1236 AcpiGetTimer(&end_time);
1237 AcpiGetTimerDuration(start_time, end_time, &end_time);
1238 } else
1239 end_time = ((cpu_ticks() - cputicks) << 20) / cpu_tickrate();
1240
1241 /* Enable bus master arbitration and disable bus master wakeup. */
1242 if ((cx_next->type == ACPI_STATE_C3 || cx_next->mwait_bm_avoidance) &&
1243 (cpu_quirks & CPU_QUIRK_NO_BM_CTRL) == 0) {
1244 AcpiWriteBitRegister(ACPI_BITREG_ARB_DISABLE, 0);
1245 AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_RLD, 0);
1246 }
1247 ACPI_ENABLE_IRQS();
1248
1249 sc->cpu_prev_sleep = (sc->cpu_prev_sleep * 3 + PM_USEC(end_time)) / 4;
1250 }
1251 #endif
1252
1253 /*
1254 * Re-evaluate the _CST object when we are notified that it changed.
1255 */
1256 static void
1257 acpi_cpu_notify(ACPI_HANDLE h, UINT32 notify, void *context)
1258 {
1259 struct acpi_cpu_softc *sc = (struct acpi_cpu_softc *)context;
1260
1261 if (notify != ACPI_NOTIFY_CX_STATES)
1262 return;
1263
1264 /*
1265 * C-state data for target CPU is going to be in flux while we execute
1266 * acpi_cpu_cx_cst, so disable entering acpi_cpu_idle.
1267 * Also, it may happen that multiple ACPI taskqueues may concurrently
1268 * execute notifications for the same CPU. ACPI_SERIAL is used to
1269 * protect against that.
1270 */
1271 ACPI_SERIAL_BEGIN(cpu);
1272 disable_idle(sc);
1273
1274 /* Update the list of Cx states. */
1275 acpi_cpu_cx_cst(sc);
1276 acpi_cpu_cx_list(sc);
1277 acpi_cpu_set_cx_lowest(sc);
1278
1279 enable_idle(sc);
1280 ACPI_SERIAL_END(cpu);
1281
1282 acpi_UserNotify("PROCESSOR", sc->cpu_handle, notify);
1283 }
1284
1285 static void
1286 acpi_cpu_quirks(void)
1287 {
1288 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
1289
1290 /*
1291 * Bus mastering arbitration control is needed to keep caches coherent
1292 * while sleeping in C3. If it's not present but a working flush cache
1293 * instruction is present, flush the caches before entering C3 instead.
1294 * Otherwise, just disable C3 completely.
1295 */
1296 if (AcpiGbl_FADT.Pm2ControlBlock == 0 ||
1297 AcpiGbl_FADT.Pm2ControlLength == 0) {
1298 if ((AcpiGbl_FADT.Flags & ACPI_FADT_WBINVD) &&
1299 (AcpiGbl_FADT.Flags & ACPI_FADT_WBINVD_FLUSH) == 0) {
1300 cpu_quirks |= CPU_QUIRK_NO_BM_CTRL;
1301 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1302 "acpi_cpu: no BM control, using flush cache method\n"));
1303 } else {
1304 cpu_quirks |= CPU_QUIRK_NO_C3;
1305 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1306 "acpi_cpu: no BM control, C3 not available\n"));
1307 }
1308 }
1309
1310 /*
1311 * If we are using generic Cx mode, C3 on multiple CPUs requires using
1312 * the expensive flush cache instruction.
1313 */
1314 if (cpu_cx_generic && mp_ncpus > 1) {
1315 cpu_quirks |= CPU_QUIRK_NO_BM_CTRL;
1316 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1317 "acpi_cpu: SMP, using flush cache mode for C3\n"));
1318 }
1319
1320 /* Look for various quirks of the PIIX4 part. */
1321 acpi_cpu_quirks_piix4();
1322 }
1323
1324 static void
1325 acpi_cpu_quirks_piix4(void)
1326 {
1327 #ifdef __i386__
1328 device_t acpi_dev;
1329 uint32_t val;
1330 ACPI_STATUS status;
1331
1332 acpi_dev = pci_find_device(PCI_VENDOR_INTEL, PCI_DEVICE_82371AB_3);
1333 if (acpi_dev != NULL) {
1334 switch (pci_get_revid(acpi_dev)) {
1335 /*
1336 * Disable C3 support for all PIIX4 chipsets. Some of these parts
1337 * do not report the BMIDE status to the BM status register and
1338 * others have a livelock bug if Type-F DMA is enabled. Linux
1339 * works around the BMIDE bug by reading the BM status directly
1340 * but we take the simpler approach of disabling C3 for these
1341 * parts.
1342 *
1343 * See erratum #18 ("C3 Power State/BMIDE and Type-F DMA
1344 * Livelock") from the January 2002 PIIX4 specification update.
1345 * Applies to all PIIX4 models.
1346 *
1347 * Also, make sure that all interrupts cause a "Stop Break"
1348 * event to exit from C2 state.
1349 * Also, BRLD_EN_BM (ACPI_BITREG_BUS_MASTER_RLD in ACPI-speak)
1350 * should be set to zero, otherwise it causes C2 to short-sleep.
1351 * PIIX4 doesn't properly support C3 and bus master activity
1352 * need not break out of C2.
1353 */
1354 case PCI_REVISION_A_STEP:
1355 case PCI_REVISION_B_STEP:
1356 case PCI_REVISION_4E:
1357 case PCI_REVISION_4M:
1358 cpu_quirks |= CPU_QUIRK_NO_C3;
1359 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1360 "acpi_cpu: working around PIIX4 bug, disabling C3\n"));
1361
1362 val = pci_read_config(acpi_dev, PIIX4_DEVACTB_REG, 4);
1363 if ((val & PIIX4_STOP_BREAK_MASK) != PIIX4_STOP_BREAK_MASK) {
1364 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1365 "acpi_cpu: PIIX4: enabling IRQs to generate Stop Break\n"));
1366 val |= PIIX4_STOP_BREAK_MASK;
1367 pci_write_config(acpi_dev, PIIX4_DEVACTB_REG, val, 4);
1368 }
1369 status = AcpiReadBitRegister(ACPI_BITREG_BUS_MASTER_RLD, &val);
1370 if (ACPI_SUCCESS(status) && val != 0) {
1371 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1372 "acpi_cpu: PIIX4: reset BRLD_EN_BM\n"));
1373 AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_RLD, 0);
1374 }
1375 break;
1376 default:
1377 break;
1378 }
1379 }
1380 #endif
1381 }
1382
1383 static int
1384 acpi_cpu_usage_sysctl(SYSCTL_HANDLER_ARGS)
1385 {
1386 struct acpi_cpu_softc *sc;
1387 struct sbuf sb;
1388 char buf[128];
1389 int i;
1390 uintmax_t fract, sum, whole;
1391
1392 sc = (struct acpi_cpu_softc *) arg1;
1393 sum = 0;
1394 for (i = 0; i < sc->cpu_cx_count; i++)
1395 sum += sc->cpu_cx_stats[i];
1396 sbuf_new(&sb, buf, sizeof(buf), SBUF_FIXEDLEN);
1397 for (i = 0; i < sc->cpu_cx_count; i++) {
1398 if (sum > 0) {
1399 whole = (uintmax_t)sc->cpu_cx_stats[i] * 100;
1400 fract = (whole % sum) * 100;
1401 sbuf_printf(&sb, "%u.%02u%% ", (u_int)(whole / sum),
1402 (u_int)(fract / sum));
1403 } else
1404 sbuf_printf(&sb, "0.00%% ");
1405 }
1406 sbuf_printf(&sb, "last %dus", sc->cpu_prev_sleep);
1407 sbuf_trim(&sb);
1408 sbuf_finish(&sb);
1409 sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req);
1410 sbuf_delete(&sb);
1411
1412 return (0);
1413 }
1414
1415 /*
1416 * XXX TODO: actually add support to count each entry/exit
1417 * from the Cx states.
1418 */
1419 static int
1420 acpi_cpu_usage_counters_sysctl(SYSCTL_HANDLER_ARGS)
1421 {
1422 struct acpi_cpu_softc *sc;
1423 struct sbuf sb;
1424 char buf[128];
1425 int i;
1426
1427 sc = (struct acpi_cpu_softc *) arg1;
1428
1429 /* Print out the raw counters */
1430 sbuf_new(&sb, buf, sizeof(buf), SBUF_FIXEDLEN);
1431
1432 for (i = 0; i < sc->cpu_cx_count; i++) {
1433 sbuf_printf(&sb, "%u ", sc->cpu_cx_stats[i]);
1434 }
1435
1436 sbuf_trim(&sb);
1437 sbuf_finish(&sb);
1438 sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req);
1439 sbuf_delete(&sb);
1440
1441 return (0);
1442 }
1443
1444 #if defined(__i386__) || defined(__amd64__)
1445 static int
1446 acpi_cpu_method_sysctl(SYSCTL_HANDLER_ARGS)
1447 {
1448 struct acpi_cpu_softc *sc;
1449 struct acpi_cx *cx;
1450 struct sbuf sb;
1451 char buf[128];
1452 int i;
1453
1454 sc = (struct acpi_cpu_softc *)arg1;
1455 sbuf_new(&sb, buf, sizeof(buf), SBUF_FIXEDLEN);
1456 for (i = 0; i < sc->cpu_cx_count; i++) {
1457 cx = &sc->cpu_cx_states[i];
1458 sbuf_printf(&sb, "C%d/", i + 1);
1459 if (cx->do_mwait) {
1460 sbuf_cat(&sb, "mwait");
1461 if (cx->mwait_hw_coord)
1462 sbuf_cat(&sb, "/hwc");
1463 if (cx->mwait_bm_avoidance)
1464 sbuf_cat(&sb, "/bma");
1465 } else if (cx->type == ACPI_STATE_C1) {
1466 sbuf_cat(&sb, "hlt");
1467 } else {
1468 sbuf_cat(&sb, "io");
1469 }
1470 if (cx->type == ACPI_STATE_C1 && cx->p_lvlx != NULL)
1471 sbuf_cat(&sb, "/iohlt");
1472 sbuf_putc(&sb, ' ');
1473 }
1474 sbuf_trim(&sb);
1475 sbuf_finish(&sb);
1476 sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req);
1477 sbuf_delete(&sb);
1478 return (0);
1479 }
1480 #endif
1481
1482 static int
1483 acpi_cpu_set_cx_lowest(struct acpi_cpu_softc *sc)
1484 {
1485 int i;
1486
1487 ACPI_SERIAL_ASSERT(cpu);
1488 sc->cpu_cx_lowest = min(sc->cpu_cx_lowest_lim, sc->cpu_cx_count - 1);
1489
1490 /* If not disabling, cache the new lowest non-C3 state. */
1491 sc->cpu_non_c3 = 0;
1492 for (i = sc->cpu_cx_lowest; i >= 0; i--) {
1493 if (sc->cpu_cx_states[i].type < ACPI_STATE_C3) {
1494 sc->cpu_non_c3 = i;
1495 break;
1496 }
1497 }
1498
1499 /* Reset the statistics counters. */
1500 bzero(sc->cpu_cx_stats, sizeof(sc->cpu_cx_stats));
1501 return (0);
1502 }
1503
1504 static int
1505 acpi_cpu_cx_lowest_sysctl(SYSCTL_HANDLER_ARGS)
1506 {
1507 struct acpi_cpu_softc *sc;
1508 char state[8];
1509 int val, error;
1510
1511 sc = (struct acpi_cpu_softc *) arg1;
1512 snprintf(state, sizeof(state), "C%d", sc->cpu_cx_lowest_lim + 1);
1513 error = sysctl_handle_string(oidp, state, sizeof(state), req);
1514 if (error != 0 || req->newptr == NULL)
1515 return (error);
1516 if (strlen(state) < 2 || toupper(state[0]) != 'C')
1517 return (EINVAL);
1518 if (strcasecmp(state, "Cmax") == 0)
1519 val = MAX_CX_STATES;
1520 else {
1521 val = (int) strtol(state + 1, NULL, 10);
1522 if (val < 1 || val > MAX_CX_STATES)
1523 return (EINVAL);
1524 }
1525
1526 ACPI_SERIAL_BEGIN(cpu);
1527 sc->cpu_cx_lowest_lim = val - 1;
1528 acpi_cpu_set_cx_lowest(sc);
1529 ACPI_SERIAL_END(cpu);
1530
1531 return (0);
1532 }
1533
1534 static int
1535 acpi_cpu_global_cx_lowest_sysctl(SYSCTL_HANDLER_ARGS)
1536 {
1537 struct acpi_cpu_softc *sc;
1538 char state[8];
1539 int val, error, i;
1540
1541 snprintf(state, sizeof(state), "C%d", cpu_cx_lowest_lim + 1);
1542 error = sysctl_handle_string(oidp, state, sizeof(state), req);
1543 if (error != 0 || req->newptr == NULL)
1544 return (error);
1545 if (strlen(state) < 2 || toupper(state[0]) != 'C')
1546 return (EINVAL);
1547 if (strcasecmp(state, "Cmax") == 0)
1548 val = MAX_CX_STATES;
1549 else {
1550 val = (int) strtol(state + 1, NULL, 10);
1551 if (val < 1 || val > MAX_CX_STATES)
1552 return (EINVAL);
1553 }
1554
1555 /* Update the new lowest useable Cx state for all CPUs. */
1556 ACPI_SERIAL_BEGIN(cpu);
1557 cpu_cx_lowest_lim = val - 1;
1558 for (i = 0; i < cpu_ndevices; i++) {
1559 sc = device_get_softc(cpu_devices[i]);
1560 sc->cpu_cx_lowest_lim = cpu_cx_lowest_lim;
1561 acpi_cpu_set_cx_lowest(sc);
1562 }
1563 ACPI_SERIAL_END(cpu);
1564
1565 return (0);
1566 }
Cache object: f37eccde91e5c8122f07520956d8c922
|