1 /*-
2 * Copyright (c) 2003-2005 Nate Lawson (SDG)
3 * Copyright (c) 2001 Michael Smith
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30
31 #include "opt_acpi.h"
32 #include <sys/param.h>
33 #include <sys/bus.h>
34 #include <sys/cpu.h>
35 #include <sys/kernel.h>
36 #include <sys/malloc.h>
37 #include <sys/module.h>
38 #include <sys/pcpu.h>
39 #include <sys/power.h>
40 #include <sys/proc.h>
41 #include <sys/sched.h>
42 #include <sys/sbuf.h>
43 #include <sys/smp.h>
44
45 #include <dev/pci/pcivar.h>
46 #include <machine/atomic.h>
47 #include <machine/bus.h>
48 #if defined(__amd64__) || defined(__i386__)
49 #include <machine/clock.h>
50 #include <machine/specialreg.h>
51 #include <machine/md_var.h>
52 #endif
53 #include <sys/rman.h>
54
55 #include <contrib/dev/acpica/include/acpi.h>
56 #include <contrib/dev/acpica/include/accommon.h>
57
58 #include <dev/acpica/acpivar.h>
59
60 /*
61 * Support for ACPI Processor devices, including C[1-3] sleep states.
62 */
63
64 /* Hooks for the ACPI CA debugging infrastructure */
65 #define _COMPONENT ACPI_PROCESSOR
66 ACPI_MODULE_NAME("PROCESSOR")
67
68 struct acpi_cx {
69 struct resource *p_lvlx; /* Register to read to enter state. */
70 uint32_t type; /* C1-3 (C4 and up treated as C3). */
71 uint32_t trans_lat; /* Transition latency (usec). */
72 uint32_t power; /* Power consumed (mW). */
73 int res_type; /* Resource type for p_lvlx. */
74 int res_rid; /* Resource ID for p_lvlx. */
75 bool do_mwait;
76 uint32_t mwait_hint;
77 bool mwait_hw_coord;
78 bool mwait_bm_avoidance;
79 };
80 #define MAX_CX_STATES 8
81
82 struct acpi_cpu_softc {
83 device_t cpu_dev;
84 ACPI_HANDLE cpu_handle;
85 struct pcpu *cpu_pcpu;
86 uint32_t cpu_acpi_id; /* ACPI processor id */
87 uint32_t cpu_p_blk; /* ACPI P_BLK location */
88 uint32_t cpu_p_blk_len; /* P_BLK length (must be 6). */
89 struct acpi_cx cpu_cx_states[MAX_CX_STATES];
90 int cpu_cx_count; /* Number of valid Cx states. */
91 int cpu_prev_sleep;/* Last idle sleep duration. */
92 int cpu_features; /* Child driver supported features. */
93 /* Runtime state. */
94 int cpu_non_c2; /* Index of lowest non-C2 state. */
95 int cpu_non_c3; /* Index of lowest non-C3 state. */
96 u_int cpu_cx_stats[MAX_CX_STATES];/* Cx usage history. */
97 /* Values for sysctl. */
98 struct sysctl_ctx_list cpu_sysctl_ctx;
99 struct sysctl_oid *cpu_sysctl_tree;
100 int cpu_cx_lowest;
101 int cpu_cx_lowest_lim;
102 int cpu_disable_idle; /* Disable entry to idle function */
103 char cpu_cx_supported[64];
104 };
105
106 struct acpi_cpu_device {
107 struct resource_list ad_rl;
108 };
109
110 #define CPU_GET_REG(reg, width) \
111 (bus_space_read_ ## width(rman_get_bustag((reg)), \
112 rman_get_bushandle((reg)), 0))
113 #define CPU_SET_REG(reg, width, val) \
114 (bus_space_write_ ## width(rman_get_bustag((reg)), \
115 rman_get_bushandle((reg)), 0, (val)))
116
117 #define ACPI_NOTIFY_CX_STATES 0x81 /* _CST changed. */
118
119 #define CPU_QUIRK_NO_C3 (1<<0) /* C3-type states are not usable. */
120 #define CPU_QUIRK_NO_BM_CTRL (1<<2) /* No bus mastering control. */
121
122 #define PCI_VENDOR_INTEL 0x8086
123 #define PCI_DEVICE_82371AB_3 0x7113 /* PIIX4 chipset for quirks. */
124 #define PCI_REVISION_A_STEP 0
125 #define PCI_REVISION_B_STEP 1
126 #define PCI_REVISION_4E 2
127 #define PCI_REVISION_4M 3
128 #define PIIX4_DEVACTB_REG 0x58
129 #define PIIX4_BRLD_EN_IRQ0 (1<<0)
130 #define PIIX4_BRLD_EN_IRQ (1<<1)
131 #define PIIX4_BRLD_EN_IRQ8 (1<<5)
132 #define PIIX4_STOP_BREAK_MASK (PIIX4_BRLD_EN_IRQ0 | PIIX4_BRLD_EN_IRQ | PIIX4_BRLD_EN_IRQ8)
133 #define PIIX4_PCNTRL_BST_EN (1<<10)
134
135 #define CST_FFH_VENDOR_INTEL 1
136 #define CST_FFH_INTEL_CL_C1IO 1
137 #define CST_FFH_INTEL_CL_MWAIT 2
138 #define CST_FFH_MWAIT_HW_COORD 0x0001
139 #define CST_FFH_MWAIT_BM_AVOID 0x0002
140
141 #define CPUDEV_DEVICE_ID "ACPI0007"
142
143 /* Allow users to ignore processor orders in MADT. */
144 static int cpu_unordered;
145 SYSCTL_INT(_debug_acpi, OID_AUTO, cpu_unordered, CTLFLAG_RDTUN,
146 &cpu_unordered, 0,
147 "Do not use the MADT to match ACPI Processor objects to CPUs.");
148
149 /* Knob to disable acpi_cpu devices */
150 bool acpi_cpu_disabled = false;
151
152 /* Platform hardware resource information. */
153 static uint32_t cpu_smi_cmd; /* Value to write to SMI_CMD. */
154 static uint8_t cpu_cst_cnt; /* Indicate we are _CST aware. */
155 static int cpu_quirks; /* Indicate any hardware bugs. */
156
157 /* Values for sysctl. */
158 static struct sysctl_ctx_list cpu_sysctl_ctx;
159 static struct sysctl_oid *cpu_sysctl_tree;
160 static int cpu_cx_generic;
161 static int cpu_cx_lowest_lim;
162
163 static device_t *cpu_devices;
164 static int cpu_ndevices;
165 static struct acpi_cpu_softc **cpu_softc;
166 ACPI_SERIAL_DECL(cpu, "ACPI CPU");
167
168 static int acpi_cpu_probe(device_t dev);
169 static int acpi_cpu_attach(device_t dev);
170 static int acpi_cpu_suspend(device_t dev);
171 static int acpi_cpu_resume(device_t dev);
172 static int acpi_pcpu_get_id(device_t dev, uint32_t *acpi_id,
173 uint32_t *cpu_id);
174 static struct resource_list *acpi_cpu_get_rlist(device_t dev, device_t child);
175 static device_t acpi_cpu_add_child(device_t dev, u_int order, const char *name,
176 int unit);
177 static int acpi_cpu_read_ivar(device_t dev, device_t child, int index,
178 uintptr_t *result);
179 static int acpi_cpu_shutdown(device_t dev);
180 static void acpi_cpu_cx_probe(struct acpi_cpu_softc *sc);
181 static void acpi_cpu_generic_cx_probe(struct acpi_cpu_softc *sc);
182 static int acpi_cpu_cx_cst(struct acpi_cpu_softc *sc);
183 static void acpi_cpu_startup(void *arg);
184 static void acpi_cpu_startup_cx(struct acpi_cpu_softc *sc);
185 static void acpi_cpu_cx_list(struct acpi_cpu_softc *sc);
186 #if defined(__i386__) || defined(__amd64__)
187 static void acpi_cpu_idle(sbintime_t sbt);
188 #endif
189 static void acpi_cpu_notify(ACPI_HANDLE h, UINT32 notify, void *context);
190 static void acpi_cpu_quirks(void);
191 static void acpi_cpu_quirks_piix4(void);
192 static int acpi_cpu_usage_sysctl(SYSCTL_HANDLER_ARGS);
193 static int acpi_cpu_usage_counters_sysctl(SYSCTL_HANDLER_ARGS);
194 static int acpi_cpu_set_cx_lowest(struct acpi_cpu_softc *sc);
195 static int acpi_cpu_cx_lowest_sysctl(SYSCTL_HANDLER_ARGS);
196 static int acpi_cpu_global_cx_lowest_sysctl(SYSCTL_HANDLER_ARGS);
197 #if defined(__i386__) || defined(__amd64__)
198 static int acpi_cpu_method_sysctl(SYSCTL_HANDLER_ARGS);
199 #endif
200
201 static device_method_t acpi_cpu_methods[] = {
202 /* Device interface */
203 DEVMETHOD(device_probe, acpi_cpu_probe),
204 DEVMETHOD(device_attach, acpi_cpu_attach),
205 DEVMETHOD(device_detach, bus_generic_detach),
206 DEVMETHOD(device_shutdown, acpi_cpu_shutdown),
207 DEVMETHOD(device_suspend, acpi_cpu_suspend),
208 DEVMETHOD(device_resume, acpi_cpu_resume),
209
210 /* Bus interface */
211 DEVMETHOD(bus_add_child, acpi_cpu_add_child),
212 DEVMETHOD(bus_read_ivar, acpi_cpu_read_ivar),
213 DEVMETHOD(bus_get_resource_list, acpi_cpu_get_rlist),
214 DEVMETHOD(bus_get_resource, bus_generic_rl_get_resource),
215 DEVMETHOD(bus_set_resource, bus_generic_rl_set_resource),
216 DEVMETHOD(bus_alloc_resource, bus_generic_rl_alloc_resource),
217 DEVMETHOD(bus_release_resource, bus_generic_rl_release_resource),
218 DEVMETHOD(bus_activate_resource, bus_generic_activate_resource),
219 DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource),
220 DEVMETHOD(bus_setup_intr, bus_generic_setup_intr),
221 DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr),
222
223 DEVMETHOD_END
224 };
225
226 static driver_t acpi_cpu_driver = {
227 "cpu",
228 acpi_cpu_methods,
229 sizeof(struct acpi_cpu_softc),
230 };
231
232 static devclass_t acpi_cpu_devclass;
233 DRIVER_MODULE(cpu, acpi, acpi_cpu_driver, acpi_cpu_devclass, 0, 0);
234 MODULE_DEPEND(cpu, acpi, 1, 1, 1);
235
236 static int
237 acpi_cpu_probe(device_t dev)
238 {
239 static char *cpudev_ids[] = { CPUDEV_DEVICE_ID, NULL };
240 int acpi_id, cpu_id;
241 ACPI_BUFFER buf;
242 ACPI_HANDLE handle;
243 ACPI_OBJECT *obj;
244 ACPI_STATUS status;
245 ACPI_OBJECT_TYPE type;
246
247 if (acpi_disabled("cpu") || acpi_cpu_disabled)
248 return (ENXIO);
249 type = acpi_get_type(dev);
250 if (type != ACPI_TYPE_PROCESSOR && type != ACPI_TYPE_DEVICE)
251 return (ENXIO);
252 if (type == ACPI_TYPE_DEVICE &&
253 ACPI_ID_PROBE(device_get_parent(dev), dev, cpudev_ids) == NULL)
254 return (ENXIO);
255
256 handle = acpi_get_handle(dev);
257 if (cpu_softc == NULL)
258 cpu_softc = malloc(sizeof(struct acpi_cpu_softc *) *
259 (mp_maxid + 1), M_TEMP /* XXX */, M_WAITOK | M_ZERO);
260
261 if (type == ACPI_TYPE_PROCESSOR) {
262 /* Get our Processor object. */
263 buf.Pointer = NULL;
264 buf.Length = ACPI_ALLOCATE_BUFFER;
265 status = AcpiEvaluateObject(handle, NULL, NULL, &buf);
266 if (ACPI_FAILURE(status)) {
267 device_printf(dev, "probe failed to get Processor obj - %s\n",
268 AcpiFormatException(status));
269 return (ENXIO);
270 }
271 obj = (ACPI_OBJECT *)buf.Pointer;
272 if (obj->Type != ACPI_TYPE_PROCESSOR) {
273 device_printf(dev, "Processor object has bad type %d\n",
274 obj->Type);
275 AcpiOsFree(obj);
276 return (ENXIO);
277 }
278
279 /*
280 * Find the processor associated with our unit. We could use the
281 * ProcId as a key, however, some boxes do not have the same values
282 * in their Processor object as the ProcId values in the MADT.
283 */
284 acpi_id = obj->Processor.ProcId;
285 AcpiOsFree(obj);
286 } else {
287 status = acpi_GetInteger(handle, "_UID", &acpi_id);
288 if (ACPI_FAILURE(status)) {
289 device_printf(dev, "Device object has bad value - %s\n",
290 AcpiFormatException(status));
291 return (ENXIO);
292 }
293 }
294 if (acpi_pcpu_get_id(dev, &acpi_id, &cpu_id) != 0)
295 return (ENXIO);
296
297 /*
298 * Check if we already probed this processor. We scan the bus twice
299 * so it's possible we've already seen this one.
300 */
301 if (cpu_softc[cpu_id] != NULL)
302 return (ENXIO);
303
304 /* Mark this processor as in-use and save our derived id for attach. */
305 cpu_softc[cpu_id] = (void *)1;
306 acpi_set_private(dev, (void*)(intptr_t)cpu_id);
307 device_set_desc(dev, "ACPI CPU");
308
309 if (!bootverbose && device_get_unit(dev) != 0) {
310 device_quiet(dev);
311 device_quiet_children(dev);
312 }
313
314 return (0);
315 }
316
317 static int
318 acpi_cpu_attach(device_t dev)
319 {
320 ACPI_BUFFER buf;
321 ACPI_OBJECT arg, *obj;
322 ACPI_OBJECT_LIST arglist;
323 struct pcpu *pcpu_data;
324 struct acpi_cpu_softc *sc;
325 struct acpi_softc *acpi_sc;
326 ACPI_STATUS status;
327 u_int features;
328 int cpu_id, drv_count, i;
329 driver_t **drivers;
330 uint32_t cap_set[3];
331
332 /* UUID needed by _OSC evaluation */
333 static uint8_t cpu_oscuuid[16] = { 0x16, 0xA6, 0x77, 0x40, 0x0C, 0x29,
334 0xBE, 0x47, 0x9E, 0xBD, 0xD8, 0x70,
335 0x58, 0x71, 0x39, 0x53 };
336
337 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
338
339 sc = device_get_softc(dev);
340 sc->cpu_dev = dev;
341 sc->cpu_handle = acpi_get_handle(dev);
342 cpu_id = (int)(intptr_t)acpi_get_private(dev);
343 cpu_softc[cpu_id] = sc;
344 pcpu_data = pcpu_find(cpu_id);
345 pcpu_data->pc_device = dev;
346 sc->cpu_pcpu = pcpu_data;
347 cpu_smi_cmd = AcpiGbl_FADT.SmiCommand;
348 cpu_cst_cnt = AcpiGbl_FADT.CstControl;
349
350 if (acpi_get_type(dev) == ACPI_TYPE_PROCESSOR) {
351 buf.Pointer = NULL;
352 buf.Length = ACPI_ALLOCATE_BUFFER;
353 status = AcpiEvaluateObject(sc->cpu_handle, NULL, NULL, &buf);
354 if (ACPI_FAILURE(status)) {
355 device_printf(dev, "attach failed to get Processor obj - %s\n",
356 AcpiFormatException(status));
357 return (ENXIO);
358 }
359 obj = (ACPI_OBJECT *)buf.Pointer;
360 sc->cpu_p_blk = obj->Processor.PblkAddress;
361 sc->cpu_p_blk_len = obj->Processor.PblkLength;
362 sc->cpu_acpi_id = obj->Processor.ProcId;
363 AcpiOsFree(obj);
364 } else {
365 KASSERT(acpi_get_type(dev) == ACPI_TYPE_DEVICE,
366 ("Unexpected ACPI object"));
367 status = acpi_GetInteger(sc->cpu_handle, "_UID", &sc->cpu_acpi_id);
368 if (ACPI_FAILURE(status)) {
369 device_printf(dev, "Device object has bad value - %s\n",
370 AcpiFormatException(status));
371 return (ENXIO);
372 }
373 sc->cpu_p_blk = 0;
374 sc->cpu_p_blk_len = 0;
375 }
376 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "acpi_cpu%d: P_BLK at %#x/%d\n",
377 device_get_unit(dev), sc->cpu_p_blk, sc->cpu_p_blk_len));
378
379 /*
380 * If this is the first cpu we attach, create and initialize the generic
381 * resources that will be used by all acpi cpu devices.
382 */
383 if (device_get_unit(dev) == 0) {
384 /* Assume we won't be using generic Cx mode by default */
385 cpu_cx_generic = FALSE;
386
387 /* Install hw.acpi.cpu sysctl tree */
388 acpi_sc = acpi_device_get_parent_softc(dev);
389 sysctl_ctx_init(&cpu_sysctl_ctx);
390 cpu_sysctl_tree = SYSCTL_ADD_NODE(&cpu_sysctl_ctx,
391 SYSCTL_CHILDREN(acpi_sc->acpi_sysctl_tree), OID_AUTO, "cpu",
392 CTLFLAG_RD, 0, "node for CPU children");
393 }
394
395 /*
396 * Before calling any CPU methods, collect child driver feature hints
397 * and notify ACPI of them. We support unified SMP power control
398 * so advertise this ourselves. Note this is not the same as independent
399 * SMP control where each CPU can have different settings.
400 */
401 sc->cpu_features = ACPI_CAP_SMP_SAME | ACPI_CAP_SMP_SAME_C3 |
402 ACPI_CAP_C1_IO_HALT;
403
404 #if defined(__i386__) || defined(__amd64__)
405 /*
406 * Ask for MWAIT modes if not disabled and interrupts work
407 * reasonable with MWAIT.
408 */
409 if (!acpi_disabled("mwait") && cpu_mwait_usable())
410 sc->cpu_features |= ACPI_CAP_SMP_C1_NATIVE | ACPI_CAP_SMP_C3_NATIVE;
411 #endif
412
413 if (devclass_get_drivers(acpi_cpu_devclass, &drivers, &drv_count) == 0) {
414 for (i = 0; i < drv_count; i++) {
415 if (ACPI_GET_FEATURES(drivers[i], &features) == 0)
416 sc->cpu_features |= features;
417 }
418 free(drivers, M_TEMP);
419 }
420
421 /*
422 * CPU capabilities are specified in
423 * Intel Processor Vendor-Specific ACPI Interface Specification.
424 */
425 if (sc->cpu_features) {
426 cap_set[1] = sc->cpu_features;
427 status = acpi_EvaluateOSC(sc->cpu_handle, cpu_oscuuid, 1, 2, cap_set,
428 cap_set, false);
429 if (ACPI_SUCCESS(status)) {
430 if (cap_set[0] != 0)
431 device_printf(dev, "_OSC returned status %#x\n", cap_set[0]);
432 }
433 else {
434 arglist.Pointer = &arg;
435 arglist.Count = 1;
436 arg.Type = ACPI_TYPE_BUFFER;
437 arg.Buffer.Length = sizeof(cap_set);
438 arg.Buffer.Pointer = (uint8_t *)cap_set;
439 cap_set[0] = 1; /* revision */
440 cap_set[1] = 1; /* number of capabilities integers */
441 cap_set[2] = sc->cpu_features;
442 AcpiEvaluateObject(sc->cpu_handle, "_PDC", &arglist, NULL);
443 }
444 }
445
446 /* Probe for Cx state support. */
447 acpi_cpu_cx_probe(sc);
448
449 return (0);
450 }
451
452 static void
453 acpi_cpu_postattach(void *unused __unused)
454 {
455 device_t *devices;
456 int err;
457 int i, n;
458 int attached;
459
460 err = devclass_get_devices(acpi_cpu_devclass, &devices, &n);
461 if (err != 0) {
462 printf("devclass_get_devices(acpi_cpu_devclass) failed\n");
463 return;
464 }
465 attached = 0;
466 for (i = 0; i < n; i++)
467 if (device_is_attached(devices[i]) &&
468 device_get_driver(devices[i]) == &acpi_cpu_driver)
469 attached = 1;
470 for (i = 0; i < n; i++)
471 bus_generic_probe(devices[i]);
472 for (i = 0; i < n; i++)
473 bus_generic_attach(devices[i]);
474 free(devices, M_TEMP);
475
476 if (attached) {
477 #ifdef EARLY_AP_STARTUP
478 acpi_cpu_startup(NULL);
479 #else
480 /* Queue post cpu-probing task handler */
481 AcpiOsExecute(OSL_NOTIFY_HANDLER, acpi_cpu_startup, NULL);
482 #endif
483 }
484 }
485
486 SYSINIT(acpi_cpu, SI_SUB_CONFIGURE, SI_ORDER_MIDDLE,
487 acpi_cpu_postattach, NULL);
488
489 static void
490 disable_idle(struct acpi_cpu_softc *sc)
491 {
492 cpuset_t cpuset;
493
494 CPU_SETOF(sc->cpu_pcpu->pc_cpuid, &cpuset);
495 sc->cpu_disable_idle = TRUE;
496
497 /*
498 * Ensure that the CPU is not in idle state or in acpi_cpu_idle().
499 * Note that this code depends on the fact that the rendezvous IPI
500 * can not penetrate context where interrupts are disabled and acpi_cpu_idle
501 * is called and executed in such a context with interrupts being re-enabled
502 * right before return.
503 */
504 smp_rendezvous_cpus(cpuset, smp_no_rendezvous_barrier, NULL,
505 smp_no_rendezvous_barrier, NULL);
506 }
507
508 static void
509 enable_idle(struct acpi_cpu_softc *sc)
510 {
511
512 sc->cpu_disable_idle = FALSE;
513 }
514
515 #if defined(__i386__) || defined(__amd64__)
516 static int
517 is_idle_disabled(struct acpi_cpu_softc *sc)
518 {
519
520 return (sc->cpu_disable_idle);
521 }
522 #endif
523
524 /*
525 * Disable any entry to the idle function during suspend and re-enable it
526 * during resume.
527 */
528 static int
529 acpi_cpu_suspend(device_t dev)
530 {
531 int error;
532
533 error = bus_generic_suspend(dev);
534 if (error)
535 return (error);
536 disable_idle(device_get_softc(dev));
537 return (0);
538 }
539
540 static int
541 acpi_cpu_resume(device_t dev)
542 {
543
544 enable_idle(device_get_softc(dev));
545 return (bus_generic_resume(dev));
546 }
547
548 /*
549 * Find the processor associated with a given ACPI ID. By default,
550 * use the MADT to map ACPI IDs to APIC IDs and use that to locate a
551 * processor. Some systems have inconsistent ASL and MADT however.
552 * For these systems the cpu_unordered tunable can be set in which
553 * case we assume that Processor objects are listed in the same order
554 * in both the MADT and ASL.
555 */
556 static int
557 acpi_pcpu_get_id(device_t dev, uint32_t *acpi_id, uint32_t *cpu_id)
558 {
559 struct pcpu *pc;
560 uint32_t i, idx;
561
562 KASSERT(acpi_id != NULL, ("Null acpi_id"));
563 KASSERT(cpu_id != NULL, ("Null cpu_id"));
564 idx = device_get_unit(dev);
565
566 /*
567 * If pc_acpi_id for CPU 0 is not initialized (e.g. a non-APIC
568 * UP box) use the ACPI ID from the first processor we find.
569 */
570 if (idx == 0 && mp_ncpus == 1) {
571 pc = pcpu_find(0);
572 if (pc->pc_acpi_id == 0xffffffff)
573 pc->pc_acpi_id = *acpi_id;
574 *cpu_id = 0;
575 return (0);
576 }
577
578 CPU_FOREACH(i) {
579 pc = pcpu_find(i);
580 KASSERT(pc != NULL, ("no pcpu data for %d", i));
581 if (cpu_unordered) {
582 if (idx-- == 0) {
583 /*
584 * If pc_acpi_id doesn't match the ACPI ID from the
585 * ASL, prefer the MADT-derived value.
586 */
587 if (pc->pc_acpi_id != *acpi_id)
588 *acpi_id = pc->pc_acpi_id;
589 *cpu_id = pc->pc_cpuid;
590 return (0);
591 }
592 } else {
593 if (pc->pc_acpi_id == *acpi_id) {
594 if (bootverbose)
595 device_printf(dev,
596 "Processor %s (ACPI ID %u) -> APIC ID %d\n",
597 acpi_name(acpi_get_handle(dev)), *acpi_id,
598 pc->pc_cpuid);
599 *cpu_id = pc->pc_cpuid;
600 return (0);
601 }
602 }
603 }
604
605 if (bootverbose)
606 printf("ACPI: Processor %s (ACPI ID %u) ignored\n",
607 acpi_name(acpi_get_handle(dev)), *acpi_id);
608
609 return (ESRCH);
610 }
611
612 static struct resource_list *
613 acpi_cpu_get_rlist(device_t dev, device_t child)
614 {
615 struct acpi_cpu_device *ad;
616
617 ad = device_get_ivars(child);
618 if (ad == NULL)
619 return (NULL);
620 return (&ad->ad_rl);
621 }
622
623 static device_t
624 acpi_cpu_add_child(device_t dev, u_int order, const char *name, int unit)
625 {
626 struct acpi_cpu_device *ad;
627 device_t child;
628
629 if ((ad = malloc(sizeof(*ad), M_TEMP, M_NOWAIT | M_ZERO)) == NULL)
630 return (NULL);
631
632 resource_list_init(&ad->ad_rl);
633
634 child = device_add_child_ordered(dev, order, name, unit);
635 if (child != NULL)
636 device_set_ivars(child, ad);
637 else
638 free(ad, M_TEMP);
639 return (child);
640 }
641
642 static int
643 acpi_cpu_read_ivar(device_t dev, device_t child, int index, uintptr_t *result)
644 {
645 struct acpi_cpu_softc *sc;
646
647 sc = device_get_softc(dev);
648 switch (index) {
649 case ACPI_IVAR_HANDLE:
650 *result = (uintptr_t)sc->cpu_handle;
651 break;
652 case CPU_IVAR_PCPU:
653 *result = (uintptr_t)sc->cpu_pcpu;
654 break;
655 #if defined(__amd64__) || defined(__i386__)
656 case CPU_IVAR_NOMINAL_MHZ:
657 if (tsc_is_invariant) {
658 *result = (uintptr_t)(atomic_load_acq_64(&tsc_freq) / 1000000);
659 break;
660 }
661 /* FALLTHROUGH */
662 #endif
663 default:
664 return (ENOENT);
665 }
666 return (0);
667 }
668
669 static int
670 acpi_cpu_shutdown(device_t dev)
671 {
672 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
673
674 /* Allow children to shutdown first. */
675 bus_generic_shutdown(dev);
676
677 /*
678 * Disable any entry to the idle function.
679 */
680 disable_idle(device_get_softc(dev));
681
682 /*
683 * CPU devices are not truly detached and remain referenced,
684 * so their resources are not freed.
685 */
686
687 return_VALUE (0);
688 }
689
690 static void
691 acpi_cpu_cx_probe(struct acpi_cpu_softc *sc)
692 {
693 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
694
695 /* Use initial sleep value of 1 sec. to start with lowest idle state. */
696 sc->cpu_prev_sleep = 1000000;
697 sc->cpu_cx_lowest = 0;
698 sc->cpu_cx_lowest_lim = 0;
699
700 /*
701 * Check for the ACPI 2.0 _CST sleep states object. If we can't find
702 * any, we'll revert to generic FADT/P_BLK Cx control method which will
703 * be handled by acpi_cpu_startup. We need to defer to after having
704 * probed all the cpus in the system before probing for generic Cx
705 * states as we may already have found cpus with valid _CST packages
706 */
707 if (!cpu_cx_generic && acpi_cpu_cx_cst(sc) != 0) {
708 /*
709 * We were unable to find a _CST package for this cpu or there
710 * was an error parsing it. Switch back to generic mode.
711 */
712 cpu_cx_generic = TRUE;
713 if (bootverbose)
714 device_printf(sc->cpu_dev, "switching to generic Cx mode\n");
715 }
716
717 /*
718 * TODO: _CSD Package should be checked here.
719 */
720 }
721
722 static void
723 acpi_cpu_generic_cx_probe(struct acpi_cpu_softc *sc)
724 {
725 ACPI_GENERIC_ADDRESS gas;
726 struct acpi_cx *cx_ptr;
727
728 sc->cpu_cx_count = 0;
729 cx_ptr = sc->cpu_cx_states;
730
731 /* Use initial sleep value of 1 sec. to start with lowest idle state. */
732 sc->cpu_prev_sleep = 1000000;
733
734 /* C1 has been required since just after ACPI 1.0 */
735 cx_ptr->type = ACPI_STATE_C1;
736 cx_ptr->trans_lat = 0;
737 cx_ptr++;
738 sc->cpu_non_c2 = sc->cpu_cx_count;
739 sc->cpu_non_c3 = sc->cpu_cx_count;
740 sc->cpu_cx_count++;
741
742 /*
743 * The spec says P_BLK must be 6 bytes long. However, some systems
744 * use it to indicate a fractional set of features present so we
745 * take 5 as C2. Some may also have a value of 7 to indicate
746 * another C3 but most use _CST for this (as required) and having
747 * "only" C1-C3 is not a hardship.
748 */
749 if (sc->cpu_p_blk_len < 5)
750 return;
751
752 /* Validate and allocate resources for C2 (P_LVL2). */
753 gas.SpaceId = ACPI_ADR_SPACE_SYSTEM_IO;
754 gas.BitWidth = 8;
755 if (AcpiGbl_FADT.C2Latency <= 100) {
756 gas.Address = sc->cpu_p_blk + 4;
757 cx_ptr->res_rid = 0;
758 acpi_bus_alloc_gas(sc->cpu_dev, &cx_ptr->res_type, &cx_ptr->res_rid,
759 &gas, &cx_ptr->p_lvlx, RF_SHAREABLE);
760 if (cx_ptr->p_lvlx != NULL) {
761 cx_ptr->type = ACPI_STATE_C2;
762 cx_ptr->trans_lat = AcpiGbl_FADT.C2Latency;
763 cx_ptr++;
764 sc->cpu_non_c3 = sc->cpu_cx_count;
765 sc->cpu_cx_count++;
766 }
767 }
768 if (sc->cpu_p_blk_len < 6)
769 return;
770
771 /* Validate and allocate resources for C3 (P_LVL3). */
772 if (AcpiGbl_FADT.C3Latency <= 1000 && !(cpu_quirks & CPU_QUIRK_NO_C3)) {
773 gas.Address = sc->cpu_p_blk + 5;
774 cx_ptr->res_rid = 1;
775 acpi_bus_alloc_gas(sc->cpu_dev, &cx_ptr->res_type, &cx_ptr->res_rid,
776 &gas, &cx_ptr->p_lvlx, RF_SHAREABLE);
777 if (cx_ptr->p_lvlx != NULL) {
778 cx_ptr->type = ACPI_STATE_C3;
779 cx_ptr->trans_lat = AcpiGbl_FADT.C3Latency;
780 cx_ptr++;
781 sc->cpu_cx_count++;
782 }
783 }
784 }
785
786 #if defined(__i386__) || defined(__amd64__)
787 static void
788 acpi_cpu_cx_cst_mwait(struct acpi_cx *cx_ptr, uint64_t address, int accsize)
789 {
790
791 cx_ptr->do_mwait = true;
792 cx_ptr->mwait_hint = address & 0xffffffff;
793 cx_ptr->mwait_hw_coord = (accsize & CST_FFH_MWAIT_HW_COORD) != 0;
794 cx_ptr->mwait_bm_avoidance = (accsize & CST_FFH_MWAIT_BM_AVOID) != 0;
795 }
796 #endif
797
798 static void
799 acpi_cpu_cx_cst_free_plvlx(device_t cpu_dev, struct acpi_cx *cx_ptr)
800 {
801
802 if (cx_ptr->p_lvlx == NULL)
803 return;
804 bus_release_resource(cpu_dev, cx_ptr->res_type, cx_ptr->res_rid,
805 cx_ptr->p_lvlx);
806 cx_ptr->p_lvlx = NULL;
807 }
808
809 /*
810 * Parse a _CST package and set up its Cx states. Since the _CST object
811 * can change dynamically, our notify handler may call this function
812 * to clean up and probe the new _CST package.
813 */
814 static int
815 acpi_cpu_cx_cst(struct acpi_cpu_softc *sc)
816 {
817 struct acpi_cx *cx_ptr;
818 ACPI_STATUS status;
819 ACPI_BUFFER buf;
820 ACPI_OBJECT *top;
821 ACPI_OBJECT *pkg;
822 uint32_t count;
823 int i;
824 #if defined(__i386__) || defined(__amd64__)
825 uint64_t address;
826 int vendor, class, accsize;
827 #endif
828
829 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
830
831 buf.Pointer = NULL;
832 buf.Length = ACPI_ALLOCATE_BUFFER;
833 status = AcpiEvaluateObject(sc->cpu_handle, "_CST", NULL, &buf);
834 if (ACPI_FAILURE(status))
835 return (ENXIO);
836
837 /* _CST is a package with a count and at least one Cx package. */
838 top = (ACPI_OBJECT *)buf.Pointer;
839 if (!ACPI_PKG_VALID(top, 2) || acpi_PkgInt32(top, 0, &count) != 0) {
840 device_printf(sc->cpu_dev, "invalid _CST package\n");
841 AcpiOsFree(buf.Pointer);
842 return (ENXIO);
843 }
844 if (count != top->Package.Count - 1) {
845 device_printf(sc->cpu_dev, "invalid _CST state count (%d != %d)\n",
846 count, top->Package.Count - 1);
847 count = top->Package.Count - 1;
848 }
849 if (count > MAX_CX_STATES) {
850 device_printf(sc->cpu_dev, "_CST has too many states (%d)\n", count);
851 count = MAX_CX_STATES;
852 }
853
854 sc->cpu_non_c2 = 0;
855 sc->cpu_non_c3 = 0;
856 sc->cpu_cx_count = 0;
857 cx_ptr = sc->cpu_cx_states;
858
859 /*
860 * C1 has been required since just after ACPI 1.0.
861 * Reserve the first slot for it.
862 */
863 cx_ptr->type = ACPI_STATE_C0;
864 cx_ptr++;
865 sc->cpu_cx_count++;
866
867 /* Set up all valid states. */
868 for (i = 0; i < count; i++) {
869 pkg = &top->Package.Elements[i + 1];
870 if (!ACPI_PKG_VALID(pkg, 4) ||
871 acpi_PkgInt32(pkg, 1, &cx_ptr->type) != 0 ||
872 acpi_PkgInt32(pkg, 2, &cx_ptr->trans_lat) != 0 ||
873 acpi_PkgInt32(pkg, 3, &cx_ptr->power) != 0) {
874
875 device_printf(sc->cpu_dev, "skipping invalid Cx state package\n");
876 continue;
877 }
878
879 /* Validate the state to see if we should use it. */
880 switch (cx_ptr->type) {
881 case ACPI_STATE_C1:
882 acpi_cpu_cx_cst_free_plvlx(sc->cpu_dev, cx_ptr);
883 #if defined(__i386__) || defined(__amd64__)
884 if (acpi_PkgFFH_IntelCpu(pkg, 0, &vendor, &class, &address,
885 &accsize) == 0 && vendor == CST_FFH_VENDOR_INTEL) {
886 if (class == CST_FFH_INTEL_CL_C1IO) {
887 /* C1 I/O then Halt */
888 cx_ptr->res_rid = sc->cpu_cx_count;
889 bus_set_resource(sc->cpu_dev, SYS_RES_IOPORT,
890 cx_ptr->res_rid, address, 1);
891 cx_ptr->p_lvlx = bus_alloc_resource_any(sc->cpu_dev,
892 SYS_RES_IOPORT, &cx_ptr->res_rid, RF_ACTIVE |
893 RF_SHAREABLE);
894 if (cx_ptr->p_lvlx == NULL) {
895 bus_delete_resource(sc->cpu_dev, SYS_RES_IOPORT,
896 cx_ptr->res_rid);
897 device_printf(sc->cpu_dev,
898 "C1 I/O failed to allocate port %d, "
899 "degrading to C1 Halt", (int)address);
900 }
901 } else if (class == CST_FFH_INTEL_CL_MWAIT) {
902 acpi_cpu_cx_cst_mwait(cx_ptr, address, accsize);
903 }
904 }
905 #endif
906 if (sc->cpu_cx_states[0].type == ACPI_STATE_C0) {
907 /* This is the first C1 state. Use the reserved slot. */
908 sc->cpu_cx_states[0] = *cx_ptr;
909 } else {
910 sc->cpu_non_c2 = sc->cpu_cx_count;
911 sc->cpu_non_c3 = sc->cpu_cx_count;
912 cx_ptr++;
913 sc->cpu_cx_count++;
914 }
915 continue;
916 case ACPI_STATE_C2:
917 sc->cpu_non_c3 = sc->cpu_cx_count;
918 break;
919 case ACPI_STATE_C3:
920 default:
921 if ((cpu_quirks & CPU_QUIRK_NO_C3) != 0) {
922 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
923 "acpi_cpu%d: C3[%d] not available.\n",
924 device_get_unit(sc->cpu_dev), i));
925 continue;
926 }
927 break;
928 }
929
930 /* Free up any previous register. */
931 acpi_cpu_cx_cst_free_plvlx(sc->cpu_dev, cx_ptr);
932
933 /* Allocate the control register for C2 or C3. */
934 #if defined(__i386__) || defined(__amd64__)
935 if (acpi_PkgFFH_IntelCpu(pkg, 0, &vendor, &class, &address,
936 &accsize) == 0 && vendor == CST_FFH_VENDOR_INTEL &&
937 class == CST_FFH_INTEL_CL_MWAIT) {
938 /* Native C State Instruction use (mwait) */
939 acpi_cpu_cx_cst_mwait(cx_ptr, address, accsize);
940 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
941 "acpi_cpu%d: Got C%d/mwait - %d latency\n",
942 device_get_unit(sc->cpu_dev), cx_ptr->type, cx_ptr->trans_lat));
943 cx_ptr++;
944 sc->cpu_cx_count++;
945 } else
946 #endif
947 {
948 cx_ptr->res_rid = sc->cpu_cx_count;
949 acpi_PkgGas(sc->cpu_dev, pkg, 0, &cx_ptr->res_type,
950 &cx_ptr->res_rid, &cx_ptr->p_lvlx, RF_SHAREABLE);
951 if (cx_ptr->p_lvlx) {
952 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
953 "acpi_cpu%d: Got C%d - %d latency\n",
954 device_get_unit(sc->cpu_dev), cx_ptr->type,
955 cx_ptr->trans_lat));
956 cx_ptr++;
957 sc->cpu_cx_count++;
958 }
959 }
960 }
961 AcpiOsFree(buf.Pointer);
962
963 /* If C1 state was not found, we need one now. */
964 cx_ptr = sc->cpu_cx_states;
965 if (cx_ptr->type == ACPI_STATE_C0) {
966 cx_ptr->type = ACPI_STATE_C1;
967 cx_ptr->trans_lat = 0;
968 }
969
970 return (0);
971 }
972
973 /*
974 * Call this *after* all CPUs have been attached.
975 */
976 static void
977 acpi_cpu_startup(void *arg)
978 {
979 struct acpi_cpu_softc *sc;
980 int i;
981
982 /* Get set of CPU devices */
983 devclass_get_devices(acpi_cpu_devclass, &cpu_devices, &cpu_ndevices);
984
985 /*
986 * Setup any quirks that might necessary now that we have probed
987 * all the CPUs
988 */
989 acpi_cpu_quirks();
990
991 if (cpu_cx_generic) {
992 /*
993 * We are using generic Cx mode, probe for available Cx states
994 * for all processors.
995 */
996 for (i = 0; i < cpu_ndevices; i++) {
997 sc = device_get_softc(cpu_devices[i]);
998 acpi_cpu_generic_cx_probe(sc);
999 }
1000 } else {
1001 /*
1002 * We are using _CST mode, remove C3 state if necessary.
1003 * As we now know for sure that we will be using _CST mode
1004 * install our notify handler.
1005 */
1006 for (i = 0; i < cpu_ndevices; i++) {
1007 sc = device_get_softc(cpu_devices[i]);
1008 if (cpu_quirks & CPU_QUIRK_NO_C3) {
1009 sc->cpu_cx_count = min(sc->cpu_cx_count, sc->cpu_non_c3 + 1);
1010 }
1011 AcpiInstallNotifyHandler(sc->cpu_handle, ACPI_DEVICE_NOTIFY,
1012 acpi_cpu_notify, sc);
1013 }
1014 }
1015
1016 /* Perform Cx final initialization. */
1017 for (i = 0; i < cpu_ndevices; i++) {
1018 sc = device_get_softc(cpu_devices[i]);
1019 acpi_cpu_startup_cx(sc);
1020 }
1021
1022 /* Add a sysctl handler to handle global Cx lowest setting */
1023 SYSCTL_ADD_PROC(&cpu_sysctl_ctx, SYSCTL_CHILDREN(cpu_sysctl_tree),
1024 OID_AUTO, "cx_lowest", CTLTYPE_STRING | CTLFLAG_RW,
1025 NULL, 0, acpi_cpu_global_cx_lowest_sysctl, "A",
1026 "Global lowest Cx sleep state to use");
1027
1028 /* Take over idling from cpu_idle_default(). */
1029 cpu_cx_lowest_lim = 0;
1030 for (i = 0; i < cpu_ndevices; i++) {
1031 sc = device_get_softc(cpu_devices[i]);
1032 enable_idle(sc);
1033 }
1034 #if defined(__i386__) || defined(__amd64__)
1035 cpu_idle_hook = acpi_cpu_idle;
1036 #endif
1037 }
1038
1039 static void
1040 acpi_cpu_cx_list(struct acpi_cpu_softc *sc)
1041 {
1042 struct sbuf sb;
1043 int i;
1044
1045 /*
1046 * Set up the list of Cx states
1047 */
1048 sbuf_new(&sb, sc->cpu_cx_supported, sizeof(sc->cpu_cx_supported),
1049 SBUF_FIXEDLEN);
1050 for (i = 0; i < sc->cpu_cx_count; i++)
1051 sbuf_printf(&sb, "C%d/%d/%d ", i + 1, sc->cpu_cx_states[i].type,
1052 sc->cpu_cx_states[i].trans_lat);
1053 sbuf_trim(&sb);
1054 sbuf_finish(&sb);
1055 }
1056
1057 static void
1058 acpi_cpu_startup_cx(struct acpi_cpu_softc *sc)
1059 {
1060 acpi_cpu_cx_list(sc);
1061
1062 SYSCTL_ADD_STRING(&sc->cpu_sysctl_ctx,
1063 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->cpu_dev)),
1064 OID_AUTO, "cx_supported", CTLFLAG_RD,
1065 sc->cpu_cx_supported, 0,
1066 "Cx/microsecond values for supported Cx states");
1067 SYSCTL_ADD_PROC(&sc->cpu_sysctl_ctx,
1068 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->cpu_dev)),
1069 OID_AUTO, "cx_lowest", CTLTYPE_STRING | CTLFLAG_RW,
1070 (void *)sc, 0, acpi_cpu_cx_lowest_sysctl, "A",
1071 "lowest Cx sleep state to use");
1072 SYSCTL_ADD_PROC(&sc->cpu_sysctl_ctx,
1073 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->cpu_dev)),
1074 OID_AUTO, "cx_usage", CTLTYPE_STRING | CTLFLAG_RD,
1075 (void *)sc, 0, acpi_cpu_usage_sysctl, "A",
1076 "percent usage for each Cx state");
1077 SYSCTL_ADD_PROC(&sc->cpu_sysctl_ctx,
1078 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->cpu_dev)),
1079 OID_AUTO, "cx_usage_counters", CTLTYPE_STRING | CTLFLAG_RD,
1080 (void *)sc, 0, acpi_cpu_usage_counters_sysctl, "A",
1081 "Cx sleep state counters");
1082 #if defined(__i386__) || defined(__amd64__)
1083 SYSCTL_ADD_PROC(&sc->cpu_sysctl_ctx,
1084 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->cpu_dev)),
1085 OID_AUTO, "cx_method", CTLTYPE_STRING | CTLFLAG_RD,
1086 (void *)sc, 0, acpi_cpu_method_sysctl, "A",
1087 "Cx entrance methods");
1088 #endif
1089
1090 /* Signal platform that we can handle _CST notification. */
1091 if (!cpu_cx_generic && cpu_cst_cnt != 0) {
1092 ACPI_LOCK(acpi);
1093 AcpiOsWritePort(cpu_smi_cmd, cpu_cst_cnt, 8);
1094 ACPI_UNLOCK(acpi);
1095 }
1096 }
1097
1098 #if defined(__i386__) || defined(__amd64__)
1099 /*
1100 * Idle the CPU in the lowest state possible. This function is called with
1101 * interrupts disabled. Note that once it re-enables interrupts, a task
1102 * switch can occur so do not access shared data (i.e. the softc) after
1103 * interrupts are re-enabled.
1104 */
1105 static void
1106 acpi_cpu_idle(sbintime_t sbt)
1107 {
1108 struct acpi_cpu_softc *sc;
1109 struct acpi_cx *cx_next;
1110 uint64_t start_ticks, end_ticks;
1111 uint32_t start_time, end_time;
1112 ACPI_STATUS status;
1113 int bm_active, cx_next_idx, i, us;
1114
1115 /*
1116 * Look up our CPU id to get our softc. If it's NULL, we'll use C1
1117 * since there is no ACPI processor object for this CPU. This occurs
1118 * for logical CPUs in the HTT case.
1119 */
1120 sc = cpu_softc[PCPU_GET(cpuid)];
1121 if (sc == NULL) {
1122 acpi_cpu_c1();
1123 return;
1124 }
1125
1126 /* If disabled, take the safe path. */
1127 if (is_idle_disabled(sc)) {
1128 acpi_cpu_c1();
1129 return;
1130 }
1131
1132 /* Find the lowest state that has small enough latency. */
1133 us = sc->cpu_prev_sleep;
1134 if (sbt >= 0 && us > (sbt >> 12))
1135 us = (sbt >> 12);
1136 cx_next_idx = 0;
1137 if (cpu_disable_c2_sleep)
1138 i = min(sc->cpu_cx_lowest, sc->cpu_non_c2);
1139 else if (cpu_disable_c3_sleep)
1140 i = min(sc->cpu_cx_lowest, sc->cpu_non_c3);
1141 else
1142 i = sc->cpu_cx_lowest;
1143 for (; i >= 0; i--) {
1144 if (sc->cpu_cx_states[i].trans_lat * 3 <= us) {
1145 cx_next_idx = i;
1146 break;
1147 }
1148 }
1149
1150 /*
1151 * Check for bus master activity. If there was activity, clear
1152 * the bit and use the lowest non-C3 state. Note that the USB
1153 * driver polling for new devices keeps this bit set all the
1154 * time if USB is loaded.
1155 */
1156 cx_next = &sc->cpu_cx_states[cx_next_idx];
1157 if ((cpu_quirks & CPU_QUIRK_NO_BM_CTRL) == 0 &&
1158 cx_next_idx > sc->cpu_non_c3 &&
1159 (!cx_next->do_mwait || cx_next->mwait_bm_avoidance)) {
1160 status = AcpiReadBitRegister(ACPI_BITREG_BUS_MASTER_STATUS, &bm_active);
1161 if (ACPI_SUCCESS(status) && bm_active != 0) {
1162 AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_STATUS, 1);
1163 cx_next_idx = sc->cpu_non_c3;
1164 cx_next = &sc->cpu_cx_states[cx_next_idx];
1165 }
1166 }
1167
1168 /* Select the next state and update statistics. */
1169 sc->cpu_cx_stats[cx_next_idx]++;
1170 KASSERT(cx_next->type != ACPI_STATE_C0, ("acpi_cpu_idle: C0 sleep"));
1171
1172 /*
1173 * Execute HLT (or equivalent) and wait for an interrupt. We can't
1174 * precisely calculate the time spent in C1 since the place we wake up
1175 * is an ISR. Assume we slept no more then half of quantum, unless
1176 * we are called inside critical section, delaying context switch.
1177 */
1178 if (cx_next->type == ACPI_STATE_C1) {
1179 start_ticks = cpu_ticks();
1180 if (cx_next->p_lvlx != NULL) {
1181 /* C1 I/O then Halt */
1182 CPU_GET_REG(cx_next->p_lvlx, 1);
1183 }
1184 if (cx_next->do_mwait)
1185 acpi_cpu_idle_mwait(cx_next->mwait_hint);
1186 else
1187 acpi_cpu_c1();
1188 end_ticks = cpu_ticks();
1189 /* acpi_cpu_c1() returns with interrupts enabled. */
1190 if (cx_next->do_mwait)
1191 ACPI_ENABLE_IRQS();
1192 end_time = ((end_ticks - start_ticks) << 20) / cpu_tickrate();
1193 if (!cx_next->do_mwait && curthread->td_critnest == 0)
1194 end_time = min(end_time, 500000 / hz);
1195 sc->cpu_prev_sleep = (sc->cpu_prev_sleep * 3 + end_time) / 4;
1196 return;
1197 }
1198
1199 /*
1200 * For C3, disable bus master arbitration and enable bus master wake
1201 * if BM control is available, otherwise flush the CPU cache.
1202 */
1203 if (cx_next->type == ACPI_STATE_C3) {
1204 if ((cpu_quirks & CPU_QUIRK_NO_BM_CTRL) == 0) {
1205 AcpiWriteBitRegister(ACPI_BITREG_ARB_DISABLE, 1);
1206 AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_RLD, 1);
1207 } else
1208 ACPI_FLUSH_CPU_CACHE();
1209 }
1210
1211 /*
1212 * Read from P_LVLx to enter C2(+), checking time spent asleep.
1213 * Use the ACPI timer for measuring sleep time. Since we need to
1214 * get the time very close to the CPU start/stop clock logic, this
1215 * is the only reliable time source.
1216 */
1217 if (cx_next->type == ACPI_STATE_C3) {
1218 AcpiGetTimer(&start_time);
1219 start_ticks = 0;
1220 } else {
1221 start_time = 0;
1222 start_ticks = cpu_ticks();
1223 }
1224 if (cx_next->do_mwait) {
1225 acpi_cpu_idle_mwait(cx_next->mwait_hint);
1226 } else {
1227 CPU_GET_REG(cx_next->p_lvlx, 1);
1228 /*
1229 * Read the end time twice. Since it may take an arbitrary time
1230 * to enter the idle state, the first read may be executed before
1231 * the processor has stopped. Doing it again provides enough
1232 * margin that we are certain to have a correct value.
1233 */
1234 AcpiGetTimer(&end_time);
1235 }
1236
1237 if (cx_next->type == ACPI_STATE_C3)
1238 AcpiGetTimer(&end_time);
1239 else
1240 end_ticks = cpu_ticks();
1241
1242 /* Enable bus master arbitration and disable bus master wakeup. */
1243 if (cx_next->type == ACPI_STATE_C3 &&
1244 (cpu_quirks & CPU_QUIRK_NO_BM_CTRL) == 0) {
1245 AcpiWriteBitRegister(ACPI_BITREG_ARB_DISABLE, 0);
1246 AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_RLD, 0);
1247 }
1248 ACPI_ENABLE_IRQS();
1249
1250 if (cx_next->type == ACPI_STATE_C3)
1251 AcpiGetTimerDuration(start_time, end_time, &end_time);
1252 else
1253 end_time = ((end_ticks - start_ticks) << 20) / cpu_tickrate();
1254 sc->cpu_prev_sleep = (sc->cpu_prev_sleep * 3 + end_time) / 4;
1255 }
1256 #endif
1257
1258 /*
1259 * Re-evaluate the _CST object when we are notified that it changed.
1260 */
1261 static void
1262 acpi_cpu_notify(ACPI_HANDLE h, UINT32 notify, void *context)
1263 {
1264 struct acpi_cpu_softc *sc = (struct acpi_cpu_softc *)context;
1265
1266 if (notify != ACPI_NOTIFY_CX_STATES)
1267 return;
1268
1269 /*
1270 * C-state data for target CPU is going to be in flux while we execute
1271 * acpi_cpu_cx_cst, so disable entering acpi_cpu_idle.
1272 * Also, it may happen that multiple ACPI taskqueues may concurrently
1273 * execute notifications for the same CPU. ACPI_SERIAL is used to
1274 * protect against that.
1275 */
1276 ACPI_SERIAL_BEGIN(cpu);
1277 disable_idle(sc);
1278
1279 /* Update the list of Cx states. */
1280 acpi_cpu_cx_cst(sc);
1281 acpi_cpu_cx_list(sc);
1282 acpi_cpu_set_cx_lowest(sc);
1283
1284 enable_idle(sc);
1285 ACPI_SERIAL_END(cpu);
1286
1287 acpi_UserNotify("PROCESSOR", sc->cpu_handle, notify);
1288 }
1289
1290 static void
1291 acpi_cpu_quirks(void)
1292 {
1293 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
1294
1295 /*
1296 * Bus mastering arbitration control is needed to keep caches coherent
1297 * while sleeping in C3. If it's not present but a working flush cache
1298 * instruction is present, flush the caches before entering C3 instead.
1299 * Otherwise, just disable C3 completely.
1300 */
1301 if (AcpiGbl_FADT.Pm2ControlBlock == 0 ||
1302 AcpiGbl_FADT.Pm2ControlLength == 0) {
1303 if ((AcpiGbl_FADT.Flags & ACPI_FADT_WBINVD) &&
1304 (AcpiGbl_FADT.Flags & ACPI_FADT_WBINVD_FLUSH) == 0) {
1305 cpu_quirks |= CPU_QUIRK_NO_BM_CTRL;
1306 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1307 "acpi_cpu: no BM control, using flush cache method\n"));
1308 } else {
1309 cpu_quirks |= CPU_QUIRK_NO_C3;
1310 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1311 "acpi_cpu: no BM control, C3 not available\n"));
1312 }
1313 }
1314
1315 /*
1316 * If we are using generic Cx mode, C3 on multiple CPUs requires using
1317 * the expensive flush cache instruction.
1318 */
1319 if (cpu_cx_generic && mp_ncpus > 1) {
1320 cpu_quirks |= CPU_QUIRK_NO_BM_CTRL;
1321 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1322 "acpi_cpu: SMP, using flush cache mode for C3\n"));
1323 }
1324
1325 /* Look for various quirks of the PIIX4 part. */
1326 acpi_cpu_quirks_piix4();
1327 }
1328
1329 static void
1330 acpi_cpu_quirks_piix4(void)
1331 {
1332 #ifdef __i386__
1333 device_t acpi_dev;
1334 uint32_t val;
1335 ACPI_STATUS status;
1336
1337 acpi_dev = pci_find_device(PCI_VENDOR_INTEL, PCI_DEVICE_82371AB_3);
1338 if (acpi_dev != NULL) {
1339 switch (pci_get_revid(acpi_dev)) {
1340 /*
1341 * Disable C3 support for all PIIX4 chipsets. Some of these parts
1342 * do not report the BMIDE status to the BM status register and
1343 * others have a livelock bug if Type-F DMA is enabled. Linux
1344 * works around the BMIDE bug by reading the BM status directly
1345 * but we take the simpler approach of disabling C3 for these
1346 * parts.
1347 *
1348 * See erratum #18 ("C3 Power State/BMIDE and Type-F DMA
1349 * Livelock") from the January 2002 PIIX4 specification update.
1350 * Applies to all PIIX4 models.
1351 *
1352 * Also, make sure that all interrupts cause a "Stop Break"
1353 * event to exit from C2 state.
1354 * Also, BRLD_EN_BM (ACPI_BITREG_BUS_MASTER_RLD in ACPI-speak)
1355 * should be set to zero, otherwise it causes C2 to short-sleep.
1356 * PIIX4 doesn't properly support C3 and bus master activity
1357 * need not break out of C2.
1358 */
1359 case PCI_REVISION_A_STEP:
1360 case PCI_REVISION_B_STEP:
1361 case PCI_REVISION_4E:
1362 case PCI_REVISION_4M:
1363 cpu_quirks |= CPU_QUIRK_NO_C3;
1364 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1365 "acpi_cpu: working around PIIX4 bug, disabling C3\n"));
1366
1367 val = pci_read_config(acpi_dev, PIIX4_DEVACTB_REG, 4);
1368 if ((val & PIIX4_STOP_BREAK_MASK) != PIIX4_STOP_BREAK_MASK) {
1369 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1370 "acpi_cpu: PIIX4: enabling IRQs to generate Stop Break\n"));
1371 val |= PIIX4_STOP_BREAK_MASK;
1372 pci_write_config(acpi_dev, PIIX4_DEVACTB_REG, val, 4);
1373 }
1374 status = AcpiReadBitRegister(ACPI_BITREG_BUS_MASTER_RLD, &val);
1375 if (ACPI_SUCCESS(status) && val != 0) {
1376 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1377 "acpi_cpu: PIIX4: reset BRLD_EN_BM\n"));
1378 AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_RLD, 0);
1379 }
1380 break;
1381 default:
1382 break;
1383 }
1384 }
1385 #endif
1386 }
1387
1388 static int
1389 acpi_cpu_usage_sysctl(SYSCTL_HANDLER_ARGS)
1390 {
1391 struct acpi_cpu_softc *sc;
1392 struct sbuf sb;
1393 char buf[128];
1394 int i;
1395 uintmax_t fract, sum, whole;
1396
1397 sc = (struct acpi_cpu_softc *) arg1;
1398 sum = 0;
1399 for (i = 0; i < sc->cpu_cx_count; i++)
1400 sum += sc->cpu_cx_stats[i];
1401 sbuf_new(&sb, buf, sizeof(buf), SBUF_FIXEDLEN);
1402 for (i = 0; i < sc->cpu_cx_count; i++) {
1403 if (sum > 0) {
1404 whole = (uintmax_t)sc->cpu_cx_stats[i] * 100;
1405 fract = (whole % sum) * 100;
1406 sbuf_printf(&sb, "%u.%02u%% ", (u_int)(whole / sum),
1407 (u_int)(fract / sum));
1408 } else
1409 sbuf_printf(&sb, "0.00%% ");
1410 }
1411 sbuf_printf(&sb, "last %dus", sc->cpu_prev_sleep);
1412 sbuf_trim(&sb);
1413 sbuf_finish(&sb);
1414 sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req);
1415 sbuf_delete(&sb);
1416
1417 return (0);
1418 }
1419
1420 /*
1421 * XXX TODO: actually add support to count each entry/exit
1422 * from the Cx states.
1423 */
1424 static int
1425 acpi_cpu_usage_counters_sysctl(SYSCTL_HANDLER_ARGS)
1426 {
1427 struct acpi_cpu_softc *sc;
1428 struct sbuf sb;
1429 char buf[128];
1430 int i;
1431
1432 sc = (struct acpi_cpu_softc *) arg1;
1433
1434 /* Print out the raw counters */
1435 sbuf_new(&sb, buf, sizeof(buf), SBUF_FIXEDLEN);
1436
1437 for (i = 0; i < sc->cpu_cx_count; i++) {
1438 sbuf_printf(&sb, "%u ", sc->cpu_cx_stats[i]);
1439 }
1440
1441 sbuf_trim(&sb);
1442 sbuf_finish(&sb);
1443 sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req);
1444 sbuf_delete(&sb);
1445
1446 return (0);
1447 }
1448
1449 #if defined(__i386__) || defined(__amd64__)
1450 static int
1451 acpi_cpu_method_sysctl(SYSCTL_HANDLER_ARGS)
1452 {
1453 struct acpi_cpu_softc *sc;
1454 struct acpi_cx *cx;
1455 struct sbuf sb;
1456 char buf[128];
1457 int i;
1458
1459 sc = (struct acpi_cpu_softc *)arg1;
1460 sbuf_new(&sb, buf, sizeof(buf), SBUF_FIXEDLEN);
1461 for (i = 0; i < sc->cpu_cx_count; i++) {
1462 cx = &sc->cpu_cx_states[i];
1463 sbuf_printf(&sb, "C%d/", i + 1);
1464 if (cx->do_mwait) {
1465 sbuf_cat(&sb, "mwait");
1466 if (cx->mwait_hw_coord)
1467 sbuf_cat(&sb, "/hwc");
1468 if (cx->mwait_bm_avoidance)
1469 sbuf_cat(&sb, "/bma");
1470 } else if (cx->type == ACPI_STATE_C1) {
1471 sbuf_cat(&sb, "hlt");
1472 } else {
1473 sbuf_cat(&sb, "io");
1474 }
1475 if (cx->type == ACPI_STATE_C1 && cx->p_lvlx != NULL)
1476 sbuf_cat(&sb, "/iohlt");
1477 sbuf_putc(&sb, ' ');
1478 }
1479 sbuf_trim(&sb);
1480 sbuf_finish(&sb);
1481 sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req);
1482 sbuf_delete(&sb);
1483 return (0);
1484 }
1485 #endif
1486
1487 static int
1488 acpi_cpu_set_cx_lowest(struct acpi_cpu_softc *sc)
1489 {
1490 int i;
1491
1492 ACPI_SERIAL_ASSERT(cpu);
1493 sc->cpu_cx_lowest = min(sc->cpu_cx_lowest_lim, sc->cpu_cx_count - 1);
1494
1495 /* If not disabling, cache the new lowest non-C3 state. */
1496 sc->cpu_non_c3 = 0;
1497 for (i = sc->cpu_cx_lowest; i >= 0; i--) {
1498 if (sc->cpu_cx_states[i].type < ACPI_STATE_C3) {
1499 sc->cpu_non_c3 = i;
1500 break;
1501 }
1502 }
1503
1504 /* Reset the statistics counters. */
1505 bzero(sc->cpu_cx_stats, sizeof(sc->cpu_cx_stats));
1506 return (0);
1507 }
1508
1509 static int
1510 acpi_cpu_cx_lowest_sysctl(SYSCTL_HANDLER_ARGS)
1511 {
1512 struct acpi_cpu_softc *sc;
1513 char state[8];
1514 int val, error;
1515
1516 sc = (struct acpi_cpu_softc *) arg1;
1517 snprintf(state, sizeof(state), "C%d", sc->cpu_cx_lowest_lim + 1);
1518 error = sysctl_handle_string(oidp, state, sizeof(state), req);
1519 if (error != 0 || req->newptr == NULL)
1520 return (error);
1521 if (strlen(state) < 2 || toupper(state[0]) != 'C')
1522 return (EINVAL);
1523 if (strcasecmp(state, "Cmax") == 0)
1524 val = MAX_CX_STATES;
1525 else {
1526 val = (int) strtol(state + 1, NULL, 10);
1527 if (val < 1 || val > MAX_CX_STATES)
1528 return (EINVAL);
1529 }
1530
1531 ACPI_SERIAL_BEGIN(cpu);
1532 sc->cpu_cx_lowest_lim = val - 1;
1533 acpi_cpu_set_cx_lowest(sc);
1534 ACPI_SERIAL_END(cpu);
1535
1536 return (0);
1537 }
1538
1539 static int
1540 acpi_cpu_global_cx_lowest_sysctl(SYSCTL_HANDLER_ARGS)
1541 {
1542 struct acpi_cpu_softc *sc;
1543 char state[8];
1544 int val, error, i;
1545
1546 snprintf(state, sizeof(state), "C%d", cpu_cx_lowest_lim + 1);
1547 error = sysctl_handle_string(oidp, state, sizeof(state), req);
1548 if (error != 0 || req->newptr == NULL)
1549 return (error);
1550 if (strlen(state) < 2 || toupper(state[0]) != 'C')
1551 return (EINVAL);
1552 if (strcasecmp(state, "Cmax") == 0)
1553 val = MAX_CX_STATES;
1554 else {
1555 val = (int) strtol(state + 1, NULL, 10);
1556 if (val < 1 || val > MAX_CX_STATES)
1557 return (EINVAL);
1558 }
1559
1560 /* Update the new lowest useable Cx state for all CPUs. */
1561 ACPI_SERIAL_BEGIN(cpu);
1562 cpu_cx_lowest_lim = val - 1;
1563 for (i = 0; i < cpu_ndevices; i++) {
1564 sc = device_get_softc(cpu_devices[i]);
1565 sc->cpu_cx_lowest_lim = cpu_cx_lowest_lim;
1566 acpi_cpu_set_cx_lowest(sc);
1567 }
1568 ACPI_SERIAL_END(cpu);
1569
1570 return (0);
1571 }
Cache object: 498135d159cdb2aee4592e621c6353c9
|