1 /*-
2 * Copyright (c) 2003-2005 Nate Lawson (SDG)
3 * Copyright (c) 2001 Michael Smith
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30
31 #include "opt_acpi.h"
32 #include <sys/param.h>
33 #include <sys/bus.h>
34 #include <sys/cpu.h>
35 #include <sys/kernel.h>
36 #include <sys/malloc.h>
37 #include <sys/module.h>
38 #include <sys/pcpu.h>
39 #include <sys/power.h>
40 #include <sys/proc.h>
41 #include <sys/sched.h>
42 #include <sys/sbuf.h>
43 #include <sys/smp.h>
44
45 #include <dev/pci/pcivar.h>
46 #include <machine/atomic.h>
47 #include <machine/bus.h>
48 #if defined(__amd64__) || defined(__i386__)
49 #include <machine/clock.h>
50 #include <machine/specialreg.h>
51 #include <machine/md_var.h>
52 #endif
53 #include <sys/rman.h>
54
55 #include <contrib/dev/acpica/include/acpi.h>
56 #include <contrib/dev/acpica/include/accommon.h>
57
58 #include <dev/acpica/acpivar.h>
59
60 /*
61 * Support for ACPI Processor devices, including C[1-3] sleep states.
62 */
63
64 /* Hooks for the ACPI CA debugging infrastructure */
65 #define _COMPONENT ACPI_PROCESSOR
66 ACPI_MODULE_NAME("PROCESSOR")
67
68 struct acpi_cx {
69 struct resource *p_lvlx; /* Register to read to enter state. */
70 uint32_t type; /* C1-3 (C4 and up treated as C3). */
71 uint32_t trans_lat; /* Transition latency (usec). */
72 uint32_t power; /* Power consumed (mW). */
73 int res_type; /* Resource type for p_lvlx. */
74 int res_rid; /* Resource ID for p_lvlx. */
75 bool do_mwait;
76 uint32_t mwait_hint;
77 bool mwait_hw_coord;
78 bool mwait_bm_avoidance;
79 };
80 #define MAX_CX_STATES 8
81
82 struct acpi_cpu_softc {
83 device_t cpu_dev;
84 ACPI_HANDLE cpu_handle;
85 struct pcpu *cpu_pcpu;
86 uint32_t cpu_acpi_id; /* ACPI processor id */
87 uint32_t cpu_p_blk; /* ACPI P_BLK location */
88 uint32_t cpu_p_blk_len; /* P_BLK length (must be 6). */
89 struct acpi_cx cpu_cx_states[MAX_CX_STATES];
90 int cpu_cx_count; /* Number of valid Cx states. */
91 int cpu_prev_sleep;/* Last idle sleep duration. */
92 int cpu_features; /* Child driver supported features. */
93 /* Runtime state. */
94 int cpu_non_c2; /* Index of lowest non-C2 state. */
95 int cpu_non_c3; /* Index of lowest non-C3 state. */
96 u_int cpu_cx_stats[MAX_CX_STATES];/* Cx usage history. */
97 /* Values for sysctl. */
98 struct sysctl_ctx_list cpu_sysctl_ctx;
99 struct sysctl_oid *cpu_sysctl_tree;
100 int cpu_cx_lowest;
101 int cpu_cx_lowest_lim;
102 int cpu_disable_idle; /* Disable entry to idle function */
103 char cpu_cx_supported[64];
104 };
105
106 struct acpi_cpu_device {
107 struct resource_list ad_rl;
108 };
109
110 #define CPU_GET_REG(reg, width) \
111 (bus_space_read_ ## width(rman_get_bustag((reg)), \
112 rman_get_bushandle((reg)), 0))
113 #define CPU_SET_REG(reg, width, val) \
114 (bus_space_write_ ## width(rman_get_bustag((reg)), \
115 rman_get_bushandle((reg)), 0, (val)))
116
117 #define ACPI_NOTIFY_CX_STATES 0x81 /* _CST changed. */
118
119 #define CPU_QUIRK_NO_C3 (1<<0) /* C3-type states are not usable. */
120 #define CPU_QUIRK_NO_BM_CTRL (1<<2) /* No bus mastering control. */
121
122 #define PCI_VENDOR_INTEL 0x8086
123 #define PCI_DEVICE_82371AB_3 0x7113 /* PIIX4 chipset for quirks. */
124 #define PCI_REVISION_A_STEP 0
125 #define PCI_REVISION_B_STEP 1
126 #define PCI_REVISION_4E 2
127 #define PCI_REVISION_4M 3
128 #define PIIX4_DEVACTB_REG 0x58
129 #define PIIX4_BRLD_EN_IRQ0 (1<<0)
130 #define PIIX4_BRLD_EN_IRQ (1<<1)
131 #define PIIX4_BRLD_EN_IRQ8 (1<<5)
132 #define PIIX4_STOP_BREAK_MASK (PIIX4_BRLD_EN_IRQ0 | PIIX4_BRLD_EN_IRQ | PIIX4_BRLD_EN_IRQ8)
133 #define PIIX4_PCNTRL_BST_EN (1<<10)
134
135 #define CST_FFH_VENDOR_INTEL 1
136 #define CST_FFH_INTEL_CL_C1IO 1
137 #define CST_FFH_INTEL_CL_MWAIT 2
138 #define CST_FFH_MWAIT_HW_COORD 0x0001
139 #define CST_FFH_MWAIT_BM_AVOID 0x0002
140
141 #define CPUDEV_DEVICE_ID "ACPI0007"
142
143 /* Knob to disable acpi_cpu devices */
144 bool acpi_cpu_disabled = false;
145
146 /* Platform hardware resource information. */
147 static uint32_t cpu_smi_cmd; /* Value to write to SMI_CMD. */
148 static uint8_t cpu_cst_cnt; /* Indicate we are _CST aware. */
149 static int cpu_quirks; /* Indicate any hardware bugs. */
150
151 /* Values for sysctl. */
152 static struct sysctl_ctx_list cpu_sysctl_ctx;
153 static struct sysctl_oid *cpu_sysctl_tree;
154 static int cpu_cx_generic;
155 static int cpu_cx_lowest_lim;
156 #if defined(__i386__) || defined(__amd64__)
157 static bool cppc_notify;
158 #endif
159
160 static struct acpi_cpu_softc **cpu_softc;
161 ACPI_SERIAL_DECL(cpu, "ACPI CPU");
162
163 static int acpi_cpu_probe(device_t dev);
164 static int acpi_cpu_attach(device_t dev);
165 static int acpi_cpu_suspend(device_t dev);
166 static int acpi_cpu_resume(device_t dev);
167 static int acpi_pcpu_get_id(device_t dev, uint32_t acpi_id,
168 u_int *cpu_id);
169 static struct resource_list *acpi_cpu_get_rlist(device_t dev, device_t child);
170 static device_t acpi_cpu_add_child(device_t dev, u_int order, const char *name,
171 int unit);
172 static int acpi_cpu_read_ivar(device_t dev, device_t child, int index,
173 uintptr_t *result);
174 static int acpi_cpu_shutdown(device_t dev);
175 static void acpi_cpu_cx_probe(struct acpi_cpu_softc *sc);
176 static void acpi_cpu_generic_cx_probe(struct acpi_cpu_softc *sc);
177 static int acpi_cpu_cx_cst(struct acpi_cpu_softc *sc);
178 static void acpi_cpu_startup(void *arg);
179 static void acpi_cpu_startup_cx(struct acpi_cpu_softc *sc);
180 static void acpi_cpu_cx_list(struct acpi_cpu_softc *sc);
181 #if defined(__i386__) || defined(__amd64__)
182 static void acpi_cpu_idle(sbintime_t sbt);
183 #endif
184 static void acpi_cpu_notify(ACPI_HANDLE h, UINT32 notify, void *context);
185 static void acpi_cpu_quirks(void);
186 static void acpi_cpu_quirks_piix4(void);
187 static int acpi_cpu_usage_sysctl(SYSCTL_HANDLER_ARGS);
188 static int acpi_cpu_usage_counters_sysctl(SYSCTL_HANDLER_ARGS);
189 static int acpi_cpu_set_cx_lowest(struct acpi_cpu_softc *sc);
190 static int acpi_cpu_cx_lowest_sysctl(SYSCTL_HANDLER_ARGS);
191 static int acpi_cpu_global_cx_lowest_sysctl(SYSCTL_HANDLER_ARGS);
192 #if defined(__i386__) || defined(__amd64__)
193 static int acpi_cpu_method_sysctl(SYSCTL_HANDLER_ARGS);
194 #endif
195
196 static device_method_t acpi_cpu_methods[] = {
197 /* Device interface */
198 DEVMETHOD(device_probe, acpi_cpu_probe),
199 DEVMETHOD(device_attach, acpi_cpu_attach),
200 DEVMETHOD(device_detach, bus_generic_detach),
201 DEVMETHOD(device_shutdown, acpi_cpu_shutdown),
202 DEVMETHOD(device_suspend, acpi_cpu_suspend),
203 DEVMETHOD(device_resume, acpi_cpu_resume),
204
205 /* Bus interface */
206 DEVMETHOD(bus_add_child, acpi_cpu_add_child),
207 DEVMETHOD(bus_read_ivar, acpi_cpu_read_ivar),
208 DEVMETHOD(bus_get_resource_list, acpi_cpu_get_rlist),
209 DEVMETHOD(bus_get_resource, bus_generic_rl_get_resource),
210 DEVMETHOD(bus_set_resource, bus_generic_rl_set_resource),
211 DEVMETHOD(bus_alloc_resource, bus_generic_rl_alloc_resource),
212 DEVMETHOD(bus_release_resource, bus_generic_rl_release_resource),
213 DEVMETHOD(bus_activate_resource, bus_generic_activate_resource),
214 DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource),
215 DEVMETHOD(bus_setup_intr, bus_generic_setup_intr),
216 DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr),
217
218 DEVMETHOD_END
219 };
220
221 static driver_t acpi_cpu_driver = {
222 "cpu",
223 acpi_cpu_methods,
224 sizeof(struct acpi_cpu_softc),
225 };
226
227 DRIVER_MODULE(cpu, acpi, acpi_cpu_driver, 0, 0);
228 MODULE_DEPEND(cpu, acpi, 1, 1, 1);
229
230 static int
231 acpi_cpu_probe(device_t dev)
232 {
233 static char *cpudev_ids[] = { CPUDEV_DEVICE_ID, NULL };
234 int acpi_id, cpu_id;
235 ACPI_BUFFER buf;
236 ACPI_HANDLE handle;
237 ACPI_OBJECT *obj;
238 ACPI_STATUS status;
239 ACPI_OBJECT_TYPE type;
240
241 if (acpi_disabled("cpu") || acpi_cpu_disabled)
242 return (ENXIO);
243 type = acpi_get_type(dev);
244 if (type != ACPI_TYPE_PROCESSOR && type != ACPI_TYPE_DEVICE)
245 return (ENXIO);
246 if (type == ACPI_TYPE_DEVICE &&
247 ACPI_ID_PROBE(device_get_parent(dev), dev, cpudev_ids, NULL) >= 0)
248 return (ENXIO);
249
250 handle = acpi_get_handle(dev);
251 if (cpu_softc == NULL)
252 cpu_softc = malloc(sizeof(struct acpi_cpu_softc *) *
253 (mp_maxid + 1), M_TEMP /* XXX */, M_WAITOK | M_ZERO);
254
255 if (type == ACPI_TYPE_PROCESSOR) {
256 /* Get our Processor object. */
257 buf.Pointer = NULL;
258 buf.Length = ACPI_ALLOCATE_BUFFER;
259 status = AcpiEvaluateObject(handle, NULL, NULL, &buf);
260 if (ACPI_FAILURE(status)) {
261 device_printf(dev, "probe failed to get Processor obj - %s\n",
262 AcpiFormatException(status));
263 return (ENXIO);
264 }
265 obj = (ACPI_OBJECT *)buf.Pointer;
266 if (obj->Type != ACPI_TYPE_PROCESSOR) {
267 device_printf(dev, "Processor object has bad type %d\n",
268 obj->Type);
269 AcpiOsFree(obj);
270 return (ENXIO);
271 }
272
273 /*
274 * Find the processor associated with our unit. We could use the
275 * ProcId as a key, however, some boxes do not have the same values
276 * in their Processor object as the ProcId values in the MADT.
277 */
278 acpi_id = obj->Processor.ProcId;
279 AcpiOsFree(obj);
280 } else {
281 status = acpi_GetInteger(handle, "_UID", &acpi_id);
282 if (ACPI_FAILURE(status)) {
283 device_printf(dev, "Device object has bad value - %s\n",
284 AcpiFormatException(status));
285 return (ENXIO);
286 }
287 }
288 if (acpi_pcpu_get_id(dev, acpi_id, &cpu_id) != 0) {
289 if (bootverbose && (type != ACPI_TYPE_PROCESSOR || acpi_id != 255))
290 printf("ACPI: Processor %s (ACPI ID %u) ignored\n",
291 acpi_name(acpi_get_handle(dev)), acpi_id);
292 return (ENXIO);
293 }
294
295 if (device_set_unit(dev, cpu_id) != 0)
296 return (ENXIO);
297
298 device_set_desc(dev, "ACPI CPU");
299
300 if (!bootverbose && device_get_unit(dev) != 0) {
301 device_quiet(dev);
302 device_quiet_children(dev);
303 }
304
305 return (BUS_PROBE_DEFAULT);
306 }
307
308 static int
309 acpi_cpu_attach(device_t dev)
310 {
311 ACPI_BUFFER buf;
312 ACPI_OBJECT arg, *obj;
313 ACPI_OBJECT_LIST arglist;
314 struct pcpu *pcpu_data;
315 struct acpi_cpu_softc *sc;
316 struct acpi_softc *acpi_sc;
317 ACPI_STATUS status;
318 u_int features;
319 int cpu_id, drv_count, i;
320 driver_t **drivers;
321 uint32_t cap_set[3];
322
323 /* UUID needed by _OSC evaluation */
324 static uint8_t cpu_oscuuid[16] = { 0x16, 0xA6, 0x77, 0x40, 0x0C, 0x29,
325 0xBE, 0x47, 0x9E, 0xBD, 0xD8, 0x70,
326 0x58, 0x71, 0x39, 0x53 };
327
328 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
329
330 sc = device_get_softc(dev);
331 sc->cpu_dev = dev;
332 sc->cpu_handle = acpi_get_handle(dev);
333 cpu_id = device_get_unit(dev);
334 cpu_softc[cpu_id] = sc;
335 pcpu_data = pcpu_find(cpu_id);
336 pcpu_data->pc_device = dev;
337 sc->cpu_pcpu = pcpu_data;
338 cpu_smi_cmd = AcpiGbl_FADT.SmiCommand;
339 cpu_cst_cnt = AcpiGbl_FADT.CstControl;
340
341 if (acpi_get_type(dev) == ACPI_TYPE_PROCESSOR) {
342 buf.Pointer = NULL;
343 buf.Length = ACPI_ALLOCATE_BUFFER;
344 status = AcpiEvaluateObject(sc->cpu_handle, NULL, NULL, &buf);
345 if (ACPI_FAILURE(status)) {
346 device_printf(dev, "attach failed to get Processor obj - %s\n",
347 AcpiFormatException(status));
348 return (ENXIO);
349 }
350 obj = (ACPI_OBJECT *)buf.Pointer;
351 sc->cpu_p_blk = obj->Processor.PblkAddress;
352 sc->cpu_p_blk_len = obj->Processor.PblkLength;
353 sc->cpu_acpi_id = obj->Processor.ProcId;
354 AcpiOsFree(obj);
355 } else {
356 KASSERT(acpi_get_type(dev) == ACPI_TYPE_DEVICE,
357 ("Unexpected ACPI object"));
358 status = acpi_GetInteger(sc->cpu_handle, "_UID", &sc->cpu_acpi_id);
359 if (ACPI_FAILURE(status)) {
360 device_printf(dev, "Device object has bad value - %s\n",
361 AcpiFormatException(status));
362 return (ENXIO);
363 }
364 sc->cpu_p_blk = 0;
365 sc->cpu_p_blk_len = 0;
366 }
367 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "acpi_cpu%d: P_BLK at %#x/%d\n",
368 device_get_unit(dev), sc->cpu_p_blk, sc->cpu_p_blk_len));
369
370 /*
371 * If this is the first cpu we attach, create and initialize the generic
372 * resources that will be used by all acpi cpu devices.
373 */
374 if (device_get_unit(dev) == 0) {
375 /* Assume we won't be using generic Cx mode by default */
376 cpu_cx_generic = FALSE;
377
378 /* Install hw.acpi.cpu sysctl tree */
379 acpi_sc = acpi_device_get_parent_softc(dev);
380 sysctl_ctx_init(&cpu_sysctl_ctx);
381 cpu_sysctl_tree = SYSCTL_ADD_NODE(&cpu_sysctl_ctx,
382 SYSCTL_CHILDREN(acpi_sc->acpi_sysctl_tree), OID_AUTO, "cpu",
383 CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "node for CPU children");
384
385 #if defined(__i386__) || defined(__amd64__)
386 /* Add sysctl handler to control registering for CPPC notifications */
387 cppc_notify = 1;
388 SYSCTL_ADD_BOOL(&cpu_sysctl_ctx, SYSCTL_CHILDREN(cpu_sysctl_tree),
389 OID_AUTO, "cppc_notify", CTLFLAG_RDTUN | CTLFLAG_MPSAFE,
390 &cppc_notify, 0, "Register for CPPC Notifications");
391 #endif
392 }
393
394 /*
395 * Before calling any CPU methods, collect child driver feature hints
396 * and notify ACPI of them. We support unified SMP power control
397 * so advertise this ourselves. Note this is not the same as independent
398 * SMP control where each CPU can have different settings.
399 */
400 sc->cpu_features = ACPI_CAP_SMP_SAME | ACPI_CAP_SMP_SAME_C3 |
401 ACPI_CAP_C1_IO_HALT;
402
403 #if defined(__i386__) || defined(__amd64__)
404 /*
405 * Ask for MWAIT modes if not disabled and interrupts work
406 * reasonable with MWAIT.
407 */
408 if (!acpi_disabled("mwait") && cpu_mwait_usable())
409 sc->cpu_features |= ACPI_CAP_SMP_C1_NATIVE | ACPI_CAP_SMP_C3_NATIVE;
410
411 /*
412 * Work around a lingering SMM bug which leads to freezes when handling
413 * CPPC notifications. Tell the SMM we will handle any CPPC notifications.
414 */
415 if ((cpu_power_eax & CPUTPM1_HWP_NOTIFICATION) && cppc_notify)
416 sc->cpu_features |= ACPI_CAP_INTR_CPPC;
417 #endif
418
419 if (devclass_get_drivers(device_get_devclass(dev), &drivers,
420 &drv_count) == 0) {
421 for (i = 0; i < drv_count; i++) {
422 if (ACPI_GET_FEATURES(drivers[i], &features) == 0)
423 sc->cpu_features |= features;
424 }
425 free(drivers, M_TEMP);
426 }
427
428 /*
429 * CPU capabilities are specified in
430 * Intel Processor Vendor-Specific ACPI Interface Specification.
431 */
432 if (sc->cpu_features) {
433 cap_set[1] = sc->cpu_features;
434 status = acpi_EvaluateOSC(sc->cpu_handle, cpu_oscuuid, 1, 2, cap_set,
435 cap_set, false);
436 if (ACPI_SUCCESS(status)) {
437 if (cap_set[0] != 0)
438 device_printf(dev, "_OSC returned status %#x\n", cap_set[0]);
439 }
440 else {
441 arglist.Pointer = &arg;
442 arglist.Count = 1;
443 arg.Type = ACPI_TYPE_BUFFER;
444 arg.Buffer.Length = sizeof(cap_set);
445 arg.Buffer.Pointer = (uint8_t *)cap_set;
446 cap_set[0] = 1; /* revision */
447 cap_set[1] = 1; /* number of capabilities integers */
448 cap_set[2] = sc->cpu_features;
449 AcpiEvaluateObject(sc->cpu_handle, "_PDC", &arglist, NULL);
450 }
451 }
452
453 /* Probe for Cx state support. */
454 acpi_cpu_cx_probe(sc);
455
456 return (0);
457 }
458
459 static void
460 acpi_cpu_postattach(void *unused __unused)
461 {
462 struct acpi_cpu_softc *sc;
463 int attached = 0, i;
464
465 if (cpu_softc == NULL)
466 return;
467
468 bus_topo_lock();
469 CPU_FOREACH(i) {
470 if ((sc = cpu_softc[i]) != NULL)
471 bus_generic_probe(sc->cpu_dev);
472 }
473 CPU_FOREACH(i) {
474 if ((sc = cpu_softc[i]) != NULL) {
475 bus_generic_attach(sc->cpu_dev);
476 attached = 1;
477 }
478 }
479 bus_topo_unlock();
480
481 if (attached) {
482 #ifdef EARLY_AP_STARTUP
483 acpi_cpu_startup(NULL);
484 #else
485 /* Queue post cpu-probing task handler */
486 AcpiOsExecute(OSL_NOTIFY_HANDLER, acpi_cpu_startup, NULL);
487 #endif
488 }
489 }
490
491 SYSINIT(acpi_cpu, SI_SUB_CONFIGURE, SI_ORDER_MIDDLE,
492 acpi_cpu_postattach, NULL);
493
494 static void
495 disable_idle(struct acpi_cpu_softc *sc)
496 {
497 cpuset_t cpuset;
498
499 CPU_SETOF(sc->cpu_pcpu->pc_cpuid, &cpuset);
500 sc->cpu_disable_idle = TRUE;
501
502 /*
503 * Ensure that the CPU is not in idle state or in acpi_cpu_idle().
504 * Note that this code depends on the fact that the rendezvous IPI
505 * can not penetrate context where interrupts are disabled and acpi_cpu_idle
506 * is called and executed in such a context with interrupts being re-enabled
507 * right before return.
508 */
509 smp_rendezvous_cpus(cpuset, smp_no_rendezvous_barrier, NULL,
510 smp_no_rendezvous_barrier, NULL);
511 }
512
513 static void
514 enable_idle(struct acpi_cpu_softc *sc)
515 {
516
517 sc->cpu_disable_idle = FALSE;
518 }
519
520 #if defined(__i386__) || defined(__amd64__)
521 static int
522 is_idle_disabled(struct acpi_cpu_softc *sc)
523 {
524
525 return (sc->cpu_disable_idle);
526 }
527 #endif
528
529 /*
530 * Disable any entry to the idle function during suspend and re-enable it
531 * during resume.
532 */
533 static int
534 acpi_cpu_suspend(device_t dev)
535 {
536 int error;
537
538 error = bus_generic_suspend(dev);
539 if (error)
540 return (error);
541 disable_idle(device_get_softc(dev));
542 return (0);
543 }
544
545 static int
546 acpi_cpu_resume(device_t dev)
547 {
548
549 enable_idle(device_get_softc(dev));
550 return (bus_generic_resume(dev));
551 }
552
553 /*
554 * Find the processor associated with a given ACPI ID.
555 */
556 static int
557 acpi_pcpu_get_id(device_t dev, uint32_t acpi_id, u_int *cpu_id)
558 {
559 struct pcpu *pc;
560 u_int i;
561
562 CPU_FOREACH(i) {
563 pc = pcpu_find(i);
564 if (pc->pc_acpi_id == acpi_id) {
565 *cpu_id = pc->pc_cpuid;
566 return (0);
567 }
568 }
569
570 /*
571 * If pc_acpi_id for CPU 0 is not initialized (e.g. a non-APIC
572 * UP box) use the ACPI ID from the first processor we find.
573 */
574 if (mp_ncpus == 1) {
575 pc = pcpu_find(0);
576 if (pc->pc_acpi_id == 0xffffffff)
577 pc->pc_acpi_id = acpi_id;
578 *cpu_id = 0;
579 return (0);
580 }
581
582 return (ESRCH);
583 }
584
585 static struct resource_list *
586 acpi_cpu_get_rlist(device_t dev, device_t child)
587 {
588 struct acpi_cpu_device *ad;
589
590 ad = device_get_ivars(child);
591 if (ad == NULL)
592 return (NULL);
593 return (&ad->ad_rl);
594 }
595
596 static device_t
597 acpi_cpu_add_child(device_t dev, u_int order, const char *name, int unit)
598 {
599 struct acpi_cpu_device *ad;
600 device_t child;
601
602 if ((ad = malloc(sizeof(*ad), M_TEMP, M_NOWAIT | M_ZERO)) == NULL)
603 return (NULL);
604
605 resource_list_init(&ad->ad_rl);
606
607 child = device_add_child_ordered(dev, order, name, unit);
608 if (child != NULL)
609 device_set_ivars(child, ad);
610 else
611 free(ad, M_TEMP);
612 return (child);
613 }
614
615 static int
616 acpi_cpu_read_ivar(device_t dev, device_t child, int index, uintptr_t *result)
617 {
618 struct acpi_cpu_softc *sc;
619
620 sc = device_get_softc(dev);
621 switch (index) {
622 case ACPI_IVAR_HANDLE:
623 *result = (uintptr_t)sc->cpu_handle;
624 break;
625 case CPU_IVAR_PCPU:
626 *result = (uintptr_t)sc->cpu_pcpu;
627 break;
628 #if defined(__amd64__) || defined(__i386__)
629 case CPU_IVAR_NOMINAL_MHZ:
630 if (tsc_is_invariant) {
631 *result = (uintptr_t)(atomic_load_acq_64(&tsc_freq) / 1000000);
632 break;
633 }
634 /* FALLTHROUGH */
635 #endif
636 default:
637 return (ENOENT);
638 }
639 return (0);
640 }
641
642 static int
643 acpi_cpu_shutdown(device_t dev)
644 {
645 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
646
647 /* Allow children to shutdown first. */
648 bus_generic_shutdown(dev);
649
650 /*
651 * Disable any entry to the idle function.
652 */
653 disable_idle(device_get_softc(dev));
654
655 /*
656 * CPU devices are not truly detached and remain referenced,
657 * so their resources are not freed.
658 */
659
660 return_VALUE (0);
661 }
662
663 static void
664 acpi_cpu_cx_probe(struct acpi_cpu_softc *sc)
665 {
666 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
667
668 /* Use initial sleep value of 1 sec. to start with lowest idle state. */
669 sc->cpu_prev_sleep = 1000000;
670 sc->cpu_cx_lowest = 0;
671 sc->cpu_cx_lowest_lim = 0;
672
673 /*
674 * Check for the ACPI 2.0 _CST sleep states object. If we can't find
675 * any, we'll revert to generic FADT/P_BLK Cx control method which will
676 * be handled by acpi_cpu_startup. We need to defer to after having
677 * probed all the cpus in the system before probing for generic Cx
678 * states as we may already have found cpus with valid _CST packages
679 */
680 if (!cpu_cx_generic && acpi_cpu_cx_cst(sc) != 0) {
681 /*
682 * We were unable to find a _CST package for this cpu or there
683 * was an error parsing it. Switch back to generic mode.
684 */
685 cpu_cx_generic = TRUE;
686 if (bootverbose)
687 device_printf(sc->cpu_dev, "switching to generic Cx mode\n");
688 }
689
690 /*
691 * TODO: _CSD Package should be checked here.
692 */
693 }
694
695 static void
696 acpi_cpu_generic_cx_probe(struct acpi_cpu_softc *sc)
697 {
698 ACPI_GENERIC_ADDRESS gas;
699 struct acpi_cx *cx_ptr;
700
701 sc->cpu_cx_count = 0;
702 cx_ptr = sc->cpu_cx_states;
703
704 /* Use initial sleep value of 1 sec. to start with lowest idle state. */
705 sc->cpu_prev_sleep = 1000000;
706
707 /* C1 has been required since just after ACPI 1.0 */
708 cx_ptr->type = ACPI_STATE_C1;
709 cx_ptr->trans_lat = 0;
710 cx_ptr++;
711 sc->cpu_non_c2 = sc->cpu_cx_count;
712 sc->cpu_non_c3 = sc->cpu_cx_count;
713 sc->cpu_cx_count++;
714
715 /*
716 * The spec says P_BLK must be 6 bytes long. However, some systems
717 * use it to indicate a fractional set of features present so we
718 * take 5 as C2. Some may also have a value of 7 to indicate
719 * another C3 but most use _CST for this (as required) and having
720 * "only" C1-C3 is not a hardship.
721 */
722 if (sc->cpu_p_blk_len < 5)
723 return;
724
725 /* Validate and allocate resources for C2 (P_LVL2). */
726 gas.SpaceId = ACPI_ADR_SPACE_SYSTEM_IO;
727 gas.BitWidth = 8;
728 if (AcpiGbl_FADT.C2Latency <= 100) {
729 gas.Address = sc->cpu_p_blk + 4;
730 cx_ptr->res_rid = 0;
731 acpi_bus_alloc_gas(sc->cpu_dev, &cx_ptr->res_type, &cx_ptr->res_rid,
732 &gas, &cx_ptr->p_lvlx, RF_SHAREABLE);
733 if (cx_ptr->p_lvlx != NULL) {
734 cx_ptr->type = ACPI_STATE_C2;
735 cx_ptr->trans_lat = AcpiGbl_FADT.C2Latency;
736 cx_ptr++;
737 sc->cpu_non_c3 = sc->cpu_cx_count;
738 sc->cpu_cx_count++;
739 }
740 }
741 if (sc->cpu_p_blk_len < 6)
742 return;
743
744 /* Validate and allocate resources for C3 (P_LVL3). */
745 if (AcpiGbl_FADT.C3Latency <= 1000 && !(cpu_quirks & CPU_QUIRK_NO_C3)) {
746 gas.Address = sc->cpu_p_blk + 5;
747 cx_ptr->res_rid = 1;
748 acpi_bus_alloc_gas(sc->cpu_dev, &cx_ptr->res_type, &cx_ptr->res_rid,
749 &gas, &cx_ptr->p_lvlx, RF_SHAREABLE);
750 if (cx_ptr->p_lvlx != NULL) {
751 cx_ptr->type = ACPI_STATE_C3;
752 cx_ptr->trans_lat = AcpiGbl_FADT.C3Latency;
753 cx_ptr++;
754 sc->cpu_cx_count++;
755 }
756 }
757 }
758
759 #if defined(__i386__) || defined(__amd64__)
760 static void
761 acpi_cpu_cx_cst_mwait(struct acpi_cx *cx_ptr, uint64_t address, int accsize)
762 {
763
764 cx_ptr->do_mwait = true;
765 cx_ptr->mwait_hint = address & 0xffffffff;
766 cx_ptr->mwait_hw_coord = (accsize & CST_FFH_MWAIT_HW_COORD) != 0;
767 cx_ptr->mwait_bm_avoidance = (accsize & CST_FFH_MWAIT_BM_AVOID) != 0;
768 }
769 #endif
770
771 static void
772 acpi_cpu_cx_cst_free_plvlx(device_t cpu_dev, struct acpi_cx *cx_ptr)
773 {
774
775 if (cx_ptr->p_lvlx == NULL)
776 return;
777 bus_release_resource(cpu_dev, cx_ptr->res_type, cx_ptr->res_rid,
778 cx_ptr->p_lvlx);
779 cx_ptr->p_lvlx = NULL;
780 }
781
782 /*
783 * Parse a _CST package and set up its Cx states. Since the _CST object
784 * can change dynamically, our notify handler may call this function
785 * to clean up and probe the new _CST package.
786 */
787 static int
788 acpi_cpu_cx_cst(struct acpi_cpu_softc *sc)
789 {
790 struct acpi_cx *cx_ptr;
791 ACPI_STATUS status;
792 ACPI_BUFFER buf;
793 ACPI_OBJECT *top;
794 ACPI_OBJECT *pkg;
795 uint32_t count;
796 int i;
797 #if defined(__i386__) || defined(__amd64__)
798 uint64_t address;
799 int vendor, class, accsize;
800 #endif
801
802 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
803
804 buf.Pointer = NULL;
805 buf.Length = ACPI_ALLOCATE_BUFFER;
806 status = AcpiEvaluateObject(sc->cpu_handle, "_CST", NULL, &buf);
807 if (ACPI_FAILURE(status))
808 return (ENXIO);
809
810 /* _CST is a package with a count and at least one Cx package. */
811 top = (ACPI_OBJECT *)buf.Pointer;
812 if (!ACPI_PKG_VALID(top, 2) || acpi_PkgInt32(top, 0, &count) != 0) {
813 device_printf(sc->cpu_dev, "invalid _CST package\n");
814 AcpiOsFree(buf.Pointer);
815 return (ENXIO);
816 }
817 if (count != top->Package.Count - 1) {
818 device_printf(sc->cpu_dev, "invalid _CST state count (%d != %d)\n",
819 count, top->Package.Count - 1);
820 count = top->Package.Count - 1;
821 }
822 if (count > MAX_CX_STATES) {
823 device_printf(sc->cpu_dev, "_CST has too many states (%d)\n", count);
824 count = MAX_CX_STATES;
825 }
826
827 sc->cpu_non_c2 = 0;
828 sc->cpu_non_c3 = 0;
829 sc->cpu_cx_count = 0;
830 cx_ptr = sc->cpu_cx_states;
831
832 /*
833 * C1 has been required since just after ACPI 1.0.
834 * Reserve the first slot for it.
835 */
836 cx_ptr->type = ACPI_STATE_C0;
837 cx_ptr++;
838 sc->cpu_cx_count++;
839
840 /* Set up all valid states. */
841 for (i = 0; i < count; i++) {
842 pkg = &top->Package.Elements[i + 1];
843 if (!ACPI_PKG_VALID(pkg, 4) ||
844 acpi_PkgInt32(pkg, 1, &cx_ptr->type) != 0 ||
845 acpi_PkgInt32(pkg, 2, &cx_ptr->trans_lat) != 0 ||
846 acpi_PkgInt32(pkg, 3, &cx_ptr->power) != 0) {
847 device_printf(sc->cpu_dev, "skipping invalid Cx state package\n");
848 continue;
849 }
850
851 /* Validate the state to see if we should use it. */
852 switch (cx_ptr->type) {
853 case ACPI_STATE_C1:
854 acpi_cpu_cx_cst_free_plvlx(sc->cpu_dev, cx_ptr);
855 #if defined(__i386__) || defined(__amd64__)
856 if (acpi_PkgFFH_IntelCpu(pkg, 0, &vendor, &class, &address,
857 &accsize) == 0 && vendor == CST_FFH_VENDOR_INTEL) {
858 if (class == CST_FFH_INTEL_CL_C1IO) {
859 /* C1 I/O then Halt */
860 cx_ptr->res_rid = sc->cpu_cx_count;
861 bus_set_resource(sc->cpu_dev, SYS_RES_IOPORT,
862 cx_ptr->res_rid, address, 1);
863 cx_ptr->p_lvlx = bus_alloc_resource_any(sc->cpu_dev,
864 SYS_RES_IOPORT, &cx_ptr->res_rid, RF_ACTIVE |
865 RF_SHAREABLE);
866 if (cx_ptr->p_lvlx == NULL) {
867 bus_delete_resource(sc->cpu_dev, SYS_RES_IOPORT,
868 cx_ptr->res_rid);
869 device_printf(sc->cpu_dev,
870 "C1 I/O failed to allocate port %d, "
871 "degrading to C1 Halt", (int)address);
872 }
873 } else if (class == CST_FFH_INTEL_CL_MWAIT) {
874 acpi_cpu_cx_cst_mwait(cx_ptr, address, accsize);
875 }
876 }
877 #endif
878 if (sc->cpu_cx_states[0].type == ACPI_STATE_C0) {
879 /* This is the first C1 state. Use the reserved slot. */
880 sc->cpu_cx_states[0] = *cx_ptr;
881 } else {
882 sc->cpu_non_c2 = sc->cpu_cx_count;
883 sc->cpu_non_c3 = sc->cpu_cx_count;
884 cx_ptr++;
885 sc->cpu_cx_count++;
886 }
887 continue;
888 case ACPI_STATE_C2:
889 sc->cpu_non_c3 = sc->cpu_cx_count;
890 break;
891 case ACPI_STATE_C3:
892 default:
893 if ((cpu_quirks & CPU_QUIRK_NO_C3) != 0) {
894 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
895 "acpi_cpu%d: C3[%d] not available.\n",
896 device_get_unit(sc->cpu_dev), i));
897 continue;
898 }
899 break;
900 }
901
902 /* Free up any previous register. */
903 acpi_cpu_cx_cst_free_plvlx(sc->cpu_dev, cx_ptr);
904
905 /* Allocate the control register for C2 or C3. */
906 #if defined(__i386__) || defined(__amd64__)
907 if (acpi_PkgFFH_IntelCpu(pkg, 0, &vendor, &class, &address,
908 &accsize) == 0 && vendor == CST_FFH_VENDOR_INTEL &&
909 class == CST_FFH_INTEL_CL_MWAIT) {
910 /* Native C State Instruction use (mwait) */
911 acpi_cpu_cx_cst_mwait(cx_ptr, address, accsize);
912 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
913 "acpi_cpu%d: Got C%d/mwait - %d latency\n",
914 device_get_unit(sc->cpu_dev), cx_ptr->type, cx_ptr->trans_lat));
915 cx_ptr++;
916 sc->cpu_cx_count++;
917 } else
918 #endif
919 {
920 cx_ptr->res_rid = sc->cpu_cx_count;
921 acpi_PkgGas(sc->cpu_dev, pkg, 0, &cx_ptr->res_type,
922 &cx_ptr->res_rid, &cx_ptr->p_lvlx, RF_SHAREABLE);
923 if (cx_ptr->p_lvlx) {
924 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
925 "acpi_cpu%d: Got C%d - %d latency\n",
926 device_get_unit(sc->cpu_dev), cx_ptr->type,
927 cx_ptr->trans_lat));
928 cx_ptr++;
929 sc->cpu_cx_count++;
930 }
931 }
932 }
933 AcpiOsFree(buf.Pointer);
934
935 /* If C1 state was not found, we need one now. */
936 cx_ptr = sc->cpu_cx_states;
937 if (cx_ptr->type == ACPI_STATE_C0) {
938 cx_ptr->type = ACPI_STATE_C1;
939 cx_ptr->trans_lat = 0;
940 }
941
942 return (0);
943 }
944
945 /*
946 * Call this *after* all CPUs have been attached.
947 */
948 static void
949 acpi_cpu_startup(void *arg)
950 {
951 struct acpi_cpu_softc *sc;
952 int i;
953
954 /*
955 * Setup any quirks that might necessary now that we have probed
956 * all the CPUs
957 */
958 acpi_cpu_quirks();
959
960 if (cpu_cx_generic) {
961 /*
962 * We are using generic Cx mode, probe for available Cx states
963 * for all processors.
964 */
965 CPU_FOREACH(i) {
966 if ((sc = cpu_softc[i]) != NULL)
967 acpi_cpu_generic_cx_probe(sc);
968 }
969 } else {
970 /*
971 * We are using _CST mode, remove C3 state if necessary.
972 * As we now know for sure that we will be using _CST mode
973 * install our notify handler.
974 */
975 CPU_FOREACH(i) {
976 if ((sc = cpu_softc[i]) == NULL)
977 continue;
978 if (cpu_quirks & CPU_QUIRK_NO_C3) {
979 sc->cpu_cx_count = min(sc->cpu_cx_count, sc->cpu_non_c3 + 1);
980 }
981 AcpiInstallNotifyHandler(sc->cpu_handle, ACPI_DEVICE_NOTIFY,
982 acpi_cpu_notify, sc);
983 }
984 }
985
986 /* Perform Cx final initialization. */
987 CPU_FOREACH(i) {
988 if ((sc = cpu_softc[i]) != NULL)
989 acpi_cpu_startup_cx(sc);
990 }
991
992 /* Add a sysctl handler to handle global Cx lowest setting */
993 SYSCTL_ADD_PROC(&cpu_sysctl_ctx, SYSCTL_CHILDREN(cpu_sysctl_tree),
994 OID_AUTO, "cx_lowest", CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE,
995 NULL, 0, acpi_cpu_global_cx_lowest_sysctl, "A",
996 "Global lowest Cx sleep state to use");
997
998 /* Take over idling from cpu_idle_default(). */
999 cpu_cx_lowest_lim = 0;
1000 CPU_FOREACH(i) {
1001 if ((sc = cpu_softc[i]) != NULL)
1002 enable_idle(sc);
1003 }
1004 #if defined(__i386__) || defined(__amd64__)
1005 cpu_idle_hook = acpi_cpu_idle;
1006 #endif
1007 }
1008
1009 static void
1010 acpi_cpu_cx_list(struct acpi_cpu_softc *sc)
1011 {
1012 struct sbuf sb;
1013 int i;
1014
1015 /*
1016 * Set up the list of Cx states
1017 */
1018 sbuf_new(&sb, sc->cpu_cx_supported, sizeof(sc->cpu_cx_supported),
1019 SBUF_FIXEDLEN);
1020 for (i = 0; i < sc->cpu_cx_count; i++)
1021 sbuf_printf(&sb, "C%d/%d/%d ", i + 1, sc->cpu_cx_states[i].type,
1022 sc->cpu_cx_states[i].trans_lat);
1023 sbuf_trim(&sb);
1024 sbuf_finish(&sb);
1025 }
1026
1027 static void
1028 acpi_cpu_startup_cx(struct acpi_cpu_softc *sc)
1029 {
1030 acpi_cpu_cx_list(sc);
1031
1032 SYSCTL_ADD_STRING(&sc->cpu_sysctl_ctx,
1033 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->cpu_dev)),
1034 OID_AUTO, "cx_supported", CTLFLAG_RD,
1035 sc->cpu_cx_supported, 0,
1036 "Cx/microsecond values for supported Cx states");
1037 SYSCTL_ADD_PROC(&sc->cpu_sysctl_ctx,
1038 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->cpu_dev)), OID_AUTO,
1039 "cx_lowest", CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE,
1040 (void *)sc, 0, acpi_cpu_cx_lowest_sysctl, "A",
1041 "lowest Cx sleep state to use");
1042 SYSCTL_ADD_PROC(&sc->cpu_sysctl_ctx,
1043 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->cpu_dev)), OID_AUTO,
1044 "cx_usage", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE,
1045 (void *)sc, 0, acpi_cpu_usage_sysctl, "A",
1046 "percent usage for each Cx state");
1047 SYSCTL_ADD_PROC(&sc->cpu_sysctl_ctx,
1048 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->cpu_dev)), OID_AUTO,
1049 "cx_usage_counters", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE,
1050 (void *)sc, 0, acpi_cpu_usage_counters_sysctl, "A",
1051 "Cx sleep state counters");
1052 #if defined(__i386__) || defined(__amd64__)
1053 SYSCTL_ADD_PROC(&sc->cpu_sysctl_ctx,
1054 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->cpu_dev)), OID_AUTO,
1055 "cx_method", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE,
1056 (void *)sc, 0, acpi_cpu_method_sysctl, "A", "Cx entrance methods");
1057 #endif
1058
1059 /* Signal platform that we can handle _CST notification. */
1060 if (!cpu_cx_generic && cpu_cst_cnt != 0) {
1061 ACPI_LOCK(acpi);
1062 AcpiOsWritePort(cpu_smi_cmd, cpu_cst_cnt, 8);
1063 ACPI_UNLOCK(acpi);
1064 }
1065 }
1066
1067 #if defined(__i386__) || defined(__amd64__)
1068 /*
1069 * Idle the CPU in the lowest state possible. This function is called with
1070 * interrupts disabled. Note that once it re-enables interrupts, a task
1071 * switch can occur so do not access shared data (i.e. the softc) after
1072 * interrupts are re-enabled.
1073 */
1074 static void
1075 acpi_cpu_idle(sbintime_t sbt)
1076 {
1077 struct acpi_cpu_softc *sc;
1078 struct acpi_cx *cx_next;
1079 uint64_t start_ticks, end_ticks;
1080 uint32_t start_time, end_time;
1081 ACPI_STATUS status;
1082 int bm_active, cx_next_idx, i, us;
1083
1084 /*
1085 * Look up our CPU id to get our softc. If it's NULL, we'll use C1
1086 * since there is no ACPI processor object for this CPU. This occurs
1087 * for logical CPUs in the HTT case.
1088 */
1089 sc = cpu_softc[PCPU_GET(cpuid)];
1090 if (sc == NULL) {
1091 acpi_cpu_c1();
1092 return;
1093 }
1094
1095 /* If disabled, take the safe path. */
1096 if (is_idle_disabled(sc)) {
1097 acpi_cpu_c1();
1098 return;
1099 }
1100
1101 /* Find the lowest state that has small enough latency. */
1102 us = sc->cpu_prev_sleep;
1103 if (sbt >= 0 && us > (sbt >> 12))
1104 us = (sbt >> 12);
1105 cx_next_idx = 0;
1106 if (cpu_disable_c2_sleep)
1107 i = min(sc->cpu_cx_lowest, sc->cpu_non_c2);
1108 else if (cpu_disable_c3_sleep)
1109 i = min(sc->cpu_cx_lowest, sc->cpu_non_c3);
1110 else
1111 i = sc->cpu_cx_lowest;
1112 for (; i >= 0; i--) {
1113 if (sc->cpu_cx_states[i].trans_lat * 3 <= us) {
1114 cx_next_idx = i;
1115 break;
1116 }
1117 }
1118
1119 /*
1120 * Check for bus master activity. If there was activity, clear
1121 * the bit and use the lowest non-C3 state. Note that the USB
1122 * driver polling for new devices keeps this bit set all the
1123 * time if USB is loaded.
1124 */
1125 cx_next = &sc->cpu_cx_states[cx_next_idx];
1126 if ((cpu_quirks & CPU_QUIRK_NO_BM_CTRL) == 0 &&
1127 cx_next_idx > sc->cpu_non_c3 &&
1128 (!cx_next->do_mwait || cx_next->mwait_bm_avoidance)) {
1129 status = AcpiReadBitRegister(ACPI_BITREG_BUS_MASTER_STATUS, &bm_active);
1130 if (ACPI_SUCCESS(status) && bm_active != 0) {
1131 AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_STATUS, 1);
1132 cx_next_idx = sc->cpu_non_c3;
1133 cx_next = &sc->cpu_cx_states[cx_next_idx];
1134 }
1135 }
1136
1137 /* Select the next state and update statistics. */
1138 sc->cpu_cx_stats[cx_next_idx]++;
1139 KASSERT(cx_next->type != ACPI_STATE_C0, ("acpi_cpu_idle: C0 sleep"));
1140
1141 /*
1142 * Execute HLT (or equivalent) and wait for an interrupt. We can't
1143 * precisely calculate the time spent in C1 since the place we wake up
1144 * is an ISR. Assume we slept no more then half of quantum, unless
1145 * we are called inside critical section, delaying context switch.
1146 */
1147 if (cx_next->type == ACPI_STATE_C1) {
1148 start_ticks = cpu_ticks();
1149 if (cx_next->p_lvlx != NULL) {
1150 /* C1 I/O then Halt */
1151 CPU_GET_REG(cx_next->p_lvlx, 1);
1152 }
1153 if (cx_next->do_mwait)
1154 acpi_cpu_idle_mwait(cx_next->mwait_hint);
1155 else
1156 acpi_cpu_c1();
1157 end_ticks = cpu_ticks();
1158 /* acpi_cpu_c1() returns with interrupts enabled. */
1159 if (cx_next->do_mwait)
1160 ACPI_ENABLE_IRQS();
1161 end_time = ((end_ticks - start_ticks) << 20) / cpu_tickrate();
1162 if (!cx_next->do_mwait && curthread->td_critnest == 0)
1163 end_time = min(end_time, 500000 / hz);
1164 sc->cpu_prev_sleep = (sc->cpu_prev_sleep * 3 + end_time) / 4;
1165 return;
1166 }
1167
1168 /*
1169 * For C3, disable bus master arbitration and enable bus master wake
1170 * if BM control is available, otherwise flush the CPU cache.
1171 */
1172 if (cx_next->type == ACPI_STATE_C3) {
1173 if ((cpu_quirks & CPU_QUIRK_NO_BM_CTRL) == 0) {
1174 AcpiWriteBitRegister(ACPI_BITREG_ARB_DISABLE, 1);
1175 AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_RLD, 1);
1176 } else
1177 ACPI_FLUSH_CPU_CACHE();
1178 }
1179
1180 /*
1181 * Read from P_LVLx to enter C2(+), checking time spent asleep.
1182 * Use the ACPI timer for measuring sleep time. Since we need to
1183 * get the time very close to the CPU start/stop clock logic, this
1184 * is the only reliable time source.
1185 */
1186 if (cx_next->type == ACPI_STATE_C3) {
1187 AcpiGetTimer(&start_time);
1188 start_ticks = 0;
1189 } else {
1190 start_time = 0;
1191 start_ticks = cpu_ticks();
1192 }
1193 if (cx_next->do_mwait) {
1194 acpi_cpu_idle_mwait(cx_next->mwait_hint);
1195 } else {
1196 CPU_GET_REG(cx_next->p_lvlx, 1);
1197 /*
1198 * Read the end time twice. Since it may take an arbitrary time
1199 * to enter the idle state, the first read may be executed before
1200 * the processor has stopped. Doing it again provides enough
1201 * margin that we are certain to have a correct value.
1202 */
1203 AcpiGetTimer(&end_time);
1204 }
1205
1206 if (cx_next->type == ACPI_STATE_C3)
1207 AcpiGetTimer(&end_time);
1208 else
1209 end_ticks = cpu_ticks();
1210
1211 /* Enable bus master arbitration and disable bus master wakeup. */
1212 if (cx_next->type == ACPI_STATE_C3 &&
1213 (cpu_quirks & CPU_QUIRK_NO_BM_CTRL) == 0) {
1214 AcpiWriteBitRegister(ACPI_BITREG_ARB_DISABLE, 0);
1215 AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_RLD, 0);
1216 }
1217 ACPI_ENABLE_IRQS();
1218
1219 if (cx_next->type == ACPI_STATE_C3)
1220 AcpiGetTimerDuration(start_time, end_time, &end_time);
1221 else
1222 end_time = ((end_ticks - start_ticks) << 20) / cpu_tickrate();
1223 sc->cpu_prev_sleep = (sc->cpu_prev_sleep * 3 + end_time) / 4;
1224 }
1225 #endif
1226
1227 /*
1228 * Re-evaluate the _CST object when we are notified that it changed.
1229 */
1230 static void
1231 acpi_cpu_notify(ACPI_HANDLE h, UINT32 notify, void *context)
1232 {
1233 struct acpi_cpu_softc *sc = (struct acpi_cpu_softc *)context;
1234
1235 if (notify != ACPI_NOTIFY_CX_STATES)
1236 return;
1237
1238 /*
1239 * C-state data for target CPU is going to be in flux while we execute
1240 * acpi_cpu_cx_cst, so disable entering acpi_cpu_idle.
1241 * Also, it may happen that multiple ACPI taskqueues may concurrently
1242 * execute notifications for the same CPU. ACPI_SERIAL is used to
1243 * protect against that.
1244 */
1245 ACPI_SERIAL_BEGIN(cpu);
1246 disable_idle(sc);
1247
1248 /* Update the list of Cx states. */
1249 acpi_cpu_cx_cst(sc);
1250 acpi_cpu_cx_list(sc);
1251 acpi_cpu_set_cx_lowest(sc);
1252
1253 enable_idle(sc);
1254 ACPI_SERIAL_END(cpu);
1255
1256 acpi_UserNotify("PROCESSOR", sc->cpu_handle, notify);
1257 }
1258
1259 static void
1260 acpi_cpu_quirks(void)
1261 {
1262 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
1263
1264 /*
1265 * Bus mastering arbitration control is needed to keep caches coherent
1266 * while sleeping in C3. If it's not present but a working flush cache
1267 * instruction is present, flush the caches before entering C3 instead.
1268 * Otherwise, just disable C3 completely.
1269 */
1270 if (AcpiGbl_FADT.Pm2ControlBlock == 0 ||
1271 AcpiGbl_FADT.Pm2ControlLength == 0) {
1272 if ((AcpiGbl_FADT.Flags & ACPI_FADT_WBINVD) &&
1273 (AcpiGbl_FADT.Flags & ACPI_FADT_WBINVD_FLUSH) == 0) {
1274 cpu_quirks |= CPU_QUIRK_NO_BM_CTRL;
1275 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1276 "acpi_cpu: no BM control, using flush cache method\n"));
1277 } else {
1278 cpu_quirks |= CPU_QUIRK_NO_C3;
1279 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1280 "acpi_cpu: no BM control, C3 not available\n"));
1281 }
1282 }
1283
1284 /*
1285 * If we are using generic Cx mode, C3 on multiple CPUs requires using
1286 * the expensive flush cache instruction.
1287 */
1288 if (cpu_cx_generic && mp_ncpus > 1) {
1289 cpu_quirks |= CPU_QUIRK_NO_BM_CTRL;
1290 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1291 "acpi_cpu: SMP, using flush cache mode for C3\n"));
1292 }
1293
1294 /* Look for various quirks of the PIIX4 part. */
1295 acpi_cpu_quirks_piix4();
1296 }
1297
1298 static void
1299 acpi_cpu_quirks_piix4(void)
1300 {
1301 #ifdef __i386__
1302 device_t acpi_dev;
1303 uint32_t val;
1304 ACPI_STATUS status;
1305
1306 acpi_dev = pci_find_device(PCI_VENDOR_INTEL, PCI_DEVICE_82371AB_3);
1307 if (acpi_dev != NULL) {
1308 switch (pci_get_revid(acpi_dev)) {
1309 /*
1310 * Disable C3 support for all PIIX4 chipsets. Some of these parts
1311 * do not report the BMIDE status to the BM status register and
1312 * others have a livelock bug if Type-F DMA is enabled. Linux
1313 * works around the BMIDE bug by reading the BM status directly
1314 * but we take the simpler approach of disabling C3 for these
1315 * parts.
1316 *
1317 * See erratum #18 ("C3 Power State/BMIDE and Type-F DMA
1318 * Livelock") from the January 2002 PIIX4 specification update.
1319 * Applies to all PIIX4 models.
1320 *
1321 * Also, make sure that all interrupts cause a "Stop Break"
1322 * event to exit from C2 state.
1323 * Also, BRLD_EN_BM (ACPI_BITREG_BUS_MASTER_RLD in ACPI-speak)
1324 * should be set to zero, otherwise it causes C2 to short-sleep.
1325 * PIIX4 doesn't properly support C3 and bus master activity
1326 * need not break out of C2.
1327 */
1328 case PCI_REVISION_A_STEP:
1329 case PCI_REVISION_B_STEP:
1330 case PCI_REVISION_4E:
1331 case PCI_REVISION_4M:
1332 cpu_quirks |= CPU_QUIRK_NO_C3;
1333 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1334 "acpi_cpu: working around PIIX4 bug, disabling C3\n"));
1335
1336 val = pci_read_config(acpi_dev, PIIX4_DEVACTB_REG, 4);
1337 if ((val & PIIX4_STOP_BREAK_MASK) != PIIX4_STOP_BREAK_MASK) {
1338 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1339 "acpi_cpu: PIIX4: enabling IRQs to generate Stop Break\n"));
1340 val |= PIIX4_STOP_BREAK_MASK;
1341 pci_write_config(acpi_dev, PIIX4_DEVACTB_REG, val, 4);
1342 }
1343 status = AcpiReadBitRegister(ACPI_BITREG_BUS_MASTER_RLD, &val);
1344 if (ACPI_SUCCESS(status) && val != 0) {
1345 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1346 "acpi_cpu: PIIX4: reset BRLD_EN_BM\n"));
1347 AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_RLD, 0);
1348 }
1349 break;
1350 default:
1351 break;
1352 }
1353 }
1354 #endif
1355 }
1356
1357 static int
1358 acpi_cpu_usage_sysctl(SYSCTL_HANDLER_ARGS)
1359 {
1360 struct acpi_cpu_softc *sc = (struct acpi_cpu_softc *)arg1;
1361 struct sbuf sb;
1362 char buf[128];
1363 int error, i;
1364 uintmax_t fract, sum, whole;
1365
1366 sbuf_new_for_sysctl(&sb, buf, sizeof(buf), req);
1367 sum = 0;
1368 for (i = 0; i < sc->cpu_cx_count; i++)
1369 sum += sc->cpu_cx_stats[i];
1370 for (i = 0; i < sc->cpu_cx_count; i++) {
1371 if (sum > 0) {
1372 whole = (uintmax_t)sc->cpu_cx_stats[i] * 100;
1373 fract = (whole % sum) * 100;
1374 sbuf_printf(&sb, "%u.%02u%% ", (u_int)(whole / sum),
1375 (u_int)(fract / sum));
1376 } else
1377 sbuf_printf(&sb, "0.00%% ");
1378 }
1379 sbuf_printf(&sb, "last %dus", sc->cpu_prev_sleep);
1380 error = sbuf_finish(&sb);
1381 sbuf_delete(&sb);
1382 return (error);
1383 }
1384
1385 /*
1386 * XXX TODO: actually add support to count each entry/exit
1387 * from the Cx states.
1388 */
1389 static int
1390 acpi_cpu_usage_counters_sysctl(SYSCTL_HANDLER_ARGS)
1391 {
1392 struct acpi_cpu_softc *sc = (struct acpi_cpu_softc *)arg1;
1393 struct sbuf sb;
1394 char buf[128];
1395 int error, i;
1396
1397 sbuf_new_for_sysctl(&sb, buf, sizeof(buf), req);
1398 for (i = 0; i < sc->cpu_cx_count; i++) {
1399 if (i > 0)
1400 sbuf_putc(&sb, ' ');
1401 sbuf_printf(&sb, "%u", sc->cpu_cx_stats[i]);
1402 }
1403 error = sbuf_finish(&sb);
1404 sbuf_delete(&sb);
1405 return (error);
1406 }
1407
1408 #if defined(__i386__) || defined(__amd64__)
1409 static int
1410 acpi_cpu_method_sysctl(SYSCTL_HANDLER_ARGS)
1411 {
1412 struct acpi_cpu_softc *sc = (struct acpi_cpu_softc *)arg1;
1413 struct acpi_cx *cx;
1414 struct sbuf sb;
1415 char buf[128];
1416 int error, i;
1417
1418 sbuf_new_for_sysctl(&sb, buf, sizeof(buf), req);
1419 for (i = 0; i < sc->cpu_cx_count; i++) {
1420 cx = &sc->cpu_cx_states[i];
1421 if (i > 0)
1422 sbuf_putc(&sb, ' ');
1423 sbuf_printf(&sb, "C%d/", i + 1);
1424 if (cx->do_mwait) {
1425 sbuf_cat(&sb, "mwait");
1426 if (cx->mwait_hw_coord)
1427 sbuf_cat(&sb, "/hwc");
1428 if (cx->mwait_bm_avoidance)
1429 sbuf_cat(&sb, "/bma");
1430 } else if (cx->type == ACPI_STATE_C1) {
1431 sbuf_cat(&sb, "hlt");
1432 } else {
1433 sbuf_cat(&sb, "io");
1434 }
1435 if (cx->type == ACPI_STATE_C1 && cx->p_lvlx != NULL)
1436 sbuf_cat(&sb, "/iohlt");
1437 }
1438 error = sbuf_finish(&sb);
1439 sbuf_delete(&sb);
1440 return (error);
1441 }
1442 #endif
1443
1444 static int
1445 acpi_cpu_set_cx_lowest(struct acpi_cpu_softc *sc)
1446 {
1447 int i;
1448
1449 ACPI_SERIAL_ASSERT(cpu);
1450 sc->cpu_cx_lowest = min(sc->cpu_cx_lowest_lim, sc->cpu_cx_count - 1);
1451
1452 /* If not disabling, cache the new lowest non-C3 state. */
1453 sc->cpu_non_c3 = 0;
1454 for (i = sc->cpu_cx_lowest; i >= 0; i--) {
1455 if (sc->cpu_cx_states[i].type < ACPI_STATE_C3) {
1456 sc->cpu_non_c3 = i;
1457 break;
1458 }
1459 }
1460
1461 /* Reset the statistics counters. */
1462 bzero(sc->cpu_cx_stats, sizeof(sc->cpu_cx_stats));
1463 return (0);
1464 }
1465
1466 static int
1467 acpi_cpu_cx_lowest_sysctl(SYSCTL_HANDLER_ARGS)
1468 {
1469 struct acpi_cpu_softc *sc;
1470 char state[8];
1471 int val, error;
1472
1473 sc = (struct acpi_cpu_softc *) arg1;
1474 snprintf(state, sizeof(state), "C%d", sc->cpu_cx_lowest_lim + 1);
1475 error = sysctl_handle_string(oidp, state, sizeof(state), req);
1476 if (error != 0 || req->newptr == NULL)
1477 return (error);
1478 if (strlen(state) < 2 || toupper(state[0]) != 'C')
1479 return (EINVAL);
1480 if (strcasecmp(state, "Cmax") == 0)
1481 val = MAX_CX_STATES;
1482 else {
1483 val = (int) strtol(state + 1, NULL, 10);
1484 if (val < 1 || val > MAX_CX_STATES)
1485 return (EINVAL);
1486 }
1487
1488 ACPI_SERIAL_BEGIN(cpu);
1489 sc->cpu_cx_lowest_lim = val - 1;
1490 acpi_cpu_set_cx_lowest(sc);
1491 ACPI_SERIAL_END(cpu);
1492
1493 return (0);
1494 }
1495
1496 static int
1497 acpi_cpu_global_cx_lowest_sysctl(SYSCTL_HANDLER_ARGS)
1498 {
1499 struct acpi_cpu_softc *sc;
1500 char state[8];
1501 int val, error, i;
1502
1503 snprintf(state, sizeof(state), "C%d", cpu_cx_lowest_lim + 1);
1504 error = sysctl_handle_string(oidp, state, sizeof(state), req);
1505 if (error != 0 || req->newptr == NULL)
1506 return (error);
1507 if (strlen(state) < 2 || toupper(state[0]) != 'C')
1508 return (EINVAL);
1509 if (strcasecmp(state, "Cmax") == 0)
1510 val = MAX_CX_STATES;
1511 else {
1512 val = (int) strtol(state + 1, NULL, 10);
1513 if (val < 1 || val > MAX_CX_STATES)
1514 return (EINVAL);
1515 }
1516
1517 /* Update the new lowest useable Cx state for all CPUs. */
1518 ACPI_SERIAL_BEGIN(cpu);
1519 cpu_cx_lowest_lim = val - 1;
1520 CPU_FOREACH(i) {
1521 if ((sc = cpu_softc[i]) == NULL)
1522 continue;
1523 sc->cpu_cx_lowest_lim = cpu_cx_lowest_lim;
1524 acpi_cpu_set_cx_lowest(sc);
1525 }
1526 ACPI_SERIAL_END(cpu);
1527
1528 return (0);
1529 }
Cache object: 185ae416c89ed63027038f92f5047f18
|