1 /*-
2 * Copyright (c) 2003-2005 Nate Lawson (SDG)
3 * Copyright (c) 2001 Michael Smith
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30
31 #include "opt_acpi.h"
32 #include <sys/param.h>
33 #include <sys/bus.h>
34 #include <sys/cpu.h>
35 #include <sys/kernel.h>
36 #include <sys/malloc.h>
37 #include <sys/module.h>
38 #include <sys/rman.h>
39
40 #include <machine/bus.h>
41
42 #include <contrib/dev/acpica/include/acpi.h>
43
44 #include <dev/acpica/acpivar.h>
45 #include <dev/pci/pcivar.h>
46
47 #include "cpufreq_if.h"
48
49 /*
50 * Throttling provides relative frequency control. It involves modulating
51 * the clock so that the CPU is active for only a fraction of the normal
52 * clock cycle. It does not change voltage and so is less efficient than
53 * other mechanisms. Since it is relative, it can be used in addition to
54 * absolute cpufreq drivers. We support the ACPI 2.0 specification.
55 */
56
57 struct acpi_throttle_softc {
58 device_t cpu_dev;
59 ACPI_HANDLE cpu_handle;
60 uint32_t cpu_p_blk; /* ACPI P_BLK location */
61 uint32_t cpu_p_blk_len; /* P_BLK length (must be 6). */
62 struct resource *cpu_p_cnt; /* Throttling control register */
63 int cpu_p_type; /* Resource type for cpu_p_cnt. */
64 uint32_t cpu_thr_state; /* Current throttle setting. */
65 };
66
67 #define THR_GET_REG(reg) \
68 (bus_space_read_4(rman_get_bustag((reg)), \
69 rman_get_bushandle((reg)), 0))
70 #define THR_SET_REG(reg, val) \
71 (bus_space_write_4(rman_get_bustag((reg)), \
72 rman_get_bushandle((reg)), 0, (val)))
73
74 /*
75 * Speeds are stored in counts, from 1 to CPU_MAX_SPEED, and
76 * reported to the user in hundredths of a percent.
77 */
78 #define CPU_MAX_SPEED (1 << cpu_duty_width)
79 #define CPU_SPEED_PERCENT(x) ((10000 * (x)) / CPU_MAX_SPEED)
80 #define CPU_SPEED_PRINTABLE(x) (CPU_SPEED_PERCENT(x) / 10), \
81 (CPU_SPEED_PERCENT(x) % 10)
82 #define CPU_P_CNT_THT_EN (1<<4)
83 #define CPU_QUIRK_NO_THROTTLE (1<<1) /* Throttling is not usable. */
84
85 #define PCI_VENDOR_INTEL 0x8086
86 #define PCI_DEVICE_82371AB_3 0x7113 /* PIIX4 chipset for quirks. */
87 #define PCI_REVISION_A_STEP 0
88 #define PCI_REVISION_B_STEP 1
89
90 static uint32_t cpu_duty_offset; /* Offset in P_CNT of throttle val. */
91 static uint32_t cpu_duty_width; /* Bit width of throttle value. */
92 static int thr_rid; /* Driver-wide resource id. */
93 static int thr_quirks; /* Indicate any hardware bugs. */
94
95 static void acpi_throttle_identify(driver_t *driver, device_t parent);
96 static int acpi_throttle_probe(device_t dev);
97 static int acpi_throttle_attach(device_t dev);
98 static int acpi_throttle_evaluate(struct acpi_throttle_softc *sc);
99 static void acpi_throttle_quirks(struct acpi_throttle_softc *sc);
100 static int acpi_thr_settings(device_t dev, struct cf_setting *sets,
101 int *count);
102 static int acpi_thr_set(device_t dev, const struct cf_setting *set);
103 static int acpi_thr_get(device_t dev, struct cf_setting *set);
104 static int acpi_thr_type(device_t dev, int *type);
105
106 static device_method_t acpi_throttle_methods[] = {
107 /* Device interface */
108 DEVMETHOD(device_identify, acpi_throttle_identify),
109 DEVMETHOD(device_probe, acpi_throttle_probe),
110 DEVMETHOD(device_attach, acpi_throttle_attach),
111
112 /* cpufreq interface */
113 DEVMETHOD(cpufreq_drv_set, acpi_thr_set),
114 DEVMETHOD(cpufreq_drv_get, acpi_thr_get),
115 DEVMETHOD(cpufreq_drv_type, acpi_thr_type),
116 DEVMETHOD(cpufreq_drv_settings, acpi_thr_settings),
117 DEVMETHOD_END
118 };
119
120 static driver_t acpi_throttle_driver = {
121 "acpi_throttle",
122 acpi_throttle_methods,
123 sizeof(struct acpi_throttle_softc),
124 };
125
126 DRIVER_MODULE(acpi_throttle, cpu, acpi_throttle_driver, 0, 0);
127
128 static void
129 acpi_throttle_identify(driver_t *driver, device_t parent)
130 {
131 ACPI_BUFFER buf;
132 ACPI_HANDLE handle;
133 ACPI_OBJECT *obj;
134
135 /* Make sure we're not being doubly invoked. */
136 if (device_find_child(parent, "acpi_throttle", -1))
137 return;
138
139 /* Check for a valid duty width and parent CPU type. */
140 handle = acpi_get_handle(parent);
141 if (handle == NULL)
142 return;
143 if (AcpiGbl_FADT.DutyWidth == 0 ||
144 acpi_get_type(parent) != ACPI_TYPE_PROCESSOR)
145 return;
146
147 /*
148 * Add a child if there's a non-NULL P_BLK and correct length, or
149 * if the _PTC method is present.
150 */
151 buf.Pointer = NULL;
152 buf.Length = ACPI_ALLOCATE_BUFFER;
153 if (ACPI_FAILURE(AcpiEvaluateObject(handle, NULL, NULL, &buf)))
154 return;
155 obj = (ACPI_OBJECT *)buf.Pointer;
156 if ((obj->Processor.PblkAddress && obj->Processor.PblkLength >= 4) ||
157 ACPI_SUCCESS(AcpiEvaluateObject(handle, "_PTC", NULL, NULL))) {
158 if (BUS_ADD_CHILD(parent, 0, "acpi_throttle",
159 device_get_unit(parent)) == NULL)
160 device_printf(parent, "add throttle child failed\n");
161 }
162 AcpiOsFree(obj);
163 }
164
165 static int
166 acpi_throttle_probe(device_t dev)
167 {
168
169 if (resource_disabled("acpi_throttle", 0))
170 return (ENXIO);
171
172 /*
173 * On i386 platforms at least, ACPI throttling is accomplished by
174 * the chipset modulating the STPCLK# pin based on the duty cycle.
175 * Since p4tcc uses the same mechanism (but internal to the CPU),
176 * we disable acpi_throttle when p4tcc is also present.
177 */
178 if (device_find_child(device_get_parent(dev), "p4tcc", -1) &&
179 !resource_disabled("p4tcc", 0))
180 return (ENXIO);
181
182 device_set_desc(dev, "ACPI CPU Throttling");
183 return (0);
184 }
185
186 static int
187 acpi_throttle_attach(device_t dev)
188 {
189 struct acpi_throttle_softc *sc;
190 struct cf_setting set;
191 ACPI_BUFFER buf;
192 ACPI_OBJECT *obj;
193 ACPI_STATUS status;
194 int error;
195
196 sc = device_get_softc(dev);
197 sc->cpu_dev = dev;
198 sc->cpu_handle = acpi_get_handle(dev);
199
200 buf.Pointer = NULL;
201 buf.Length = ACPI_ALLOCATE_BUFFER;
202 status = AcpiEvaluateObject(sc->cpu_handle, NULL, NULL, &buf);
203 if (ACPI_FAILURE(status)) {
204 device_printf(dev, "attach failed to get Processor obj - %s\n",
205 AcpiFormatException(status));
206 return (ENXIO);
207 }
208 obj = (ACPI_OBJECT *)buf.Pointer;
209 sc->cpu_p_blk = obj->Processor.PblkAddress;
210 sc->cpu_p_blk_len = obj->Processor.PblkLength;
211 AcpiOsFree(obj);
212
213 /* If this is the first device probed, check for quirks. */
214 if (device_get_unit(dev) == 0)
215 acpi_throttle_quirks(sc);
216
217 /* Attempt to attach the actual throttling register. */
218 error = acpi_throttle_evaluate(sc);
219 if (error)
220 return (error);
221
222 /*
223 * Set our initial frequency to the highest since some systems
224 * seem to boot with this at the lowest setting.
225 */
226 set.freq = 10000;
227 acpi_thr_set(dev, &set);
228
229 /* Everything went ok, register with cpufreq(4). */
230 cpufreq_register(dev);
231 return (0);
232 }
233
234 static int
235 acpi_throttle_evaluate(struct acpi_throttle_softc *sc)
236 {
237 uint32_t duty_end;
238 ACPI_BUFFER buf;
239 ACPI_OBJECT obj;
240 ACPI_GENERIC_ADDRESS gas;
241 ACPI_STATUS status;
242
243 /* Get throttling parameters from the FADT. 0 means not supported. */
244 if (device_get_unit(sc->cpu_dev) == 0) {
245 cpu_duty_offset = AcpiGbl_FADT.DutyOffset;
246 cpu_duty_width = AcpiGbl_FADT.DutyWidth;
247 }
248 if (cpu_duty_width == 0 || (thr_quirks & CPU_QUIRK_NO_THROTTLE) != 0)
249 return (ENXIO);
250
251 /* Validate the duty offset/width. */
252 duty_end = cpu_duty_offset + cpu_duty_width - 1;
253 if (duty_end > 31) {
254 device_printf(sc->cpu_dev,
255 "CLK_VAL field overflows P_CNT register\n");
256 return (ENXIO);
257 }
258 if (cpu_duty_offset <= 4 && duty_end >= 4) {
259 device_printf(sc->cpu_dev,
260 "CLK_VAL field overlaps THT_EN bit\n");
261 return (ENXIO);
262 }
263
264 /*
265 * If not present, fall back to using the processor's P_BLK to find
266 * the P_CNT register.
267 *
268 * Note that some systems seem to duplicate the P_BLK pointer
269 * across multiple CPUs, so not getting the resource is not fatal.
270 */
271 buf.Pointer = &obj;
272 buf.Length = sizeof(obj);
273 status = AcpiEvaluateObject(sc->cpu_handle, "_PTC", NULL, &buf);
274 if (ACPI_SUCCESS(status)) {
275 if (obj.Buffer.Length < sizeof(ACPI_GENERIC_ADDRESS) + 3) {
276 device_printf(sc->cpu_dev, "_PTC buffer too small\n");
277 return (ENXIO);
278 }
279 memcpy(&gas, obj.Buffer.Pointer + 3, sizeof(gas));
280 acpi_bus_alloc_gas(sc->cpu_dev, &sc->cpu_p_type, &thr_rid,
281 &gas, &sc->cpu_p_cnt, 0);
282 if (sc->cpu_p_cnt != NULL && bootverbose) {
283 device_printf(sc->cpu_dev, "P_CNT from _PTC %#jx\n",
284 gas.Address);
285 }
286 }
287
288 /* If _PTC not present or other failure, try the P_BLK. */
289 if (sc->cpu_p_cnt == NULL) {
290 /*
291 * The spec says P_BLK must be 6 bytes long. However, some
292 * systems use it to indicate a fractional set of features
293 * present so we take anything >= 4.
294 */
295 if (sc->cpu_p_blk_len < 4)
296 return (ENXIO);
297 gas.Address = sc->cpu_p_blk;
298 gas.SpaceId = ACPI_ADR_SPACE_SYSTEM_IO;
299 gas.BitWidth = 32;
300 acpi_bus_alloc_gas(sc->cpu_dev, &sc->cpu_p_type, &thr_rid,
301 &gas, &sc->cpu_p_cnt, 0);
302 if (sc->cpu_p_cnt != NULL) {
303 if (bootverbose)
304 device_printf(sc->cpu_dev,
305 "P_CNT from P_BLK %#x\n", sc->cpu_p_blk);
306 } else {
307 device_printf(sc->cpu_dev, "failed to attach P_CNT\n");
308 return (ENXIO);
309 }
310 }
311 thr_rid++;
312
313 return (0);
314 }
315
316 static void
317 acpi_throttle_quirks(struct acpi_throttle_softc *sc)
318 {
319 #ifdef __i386__
320 device_t acpi_dev;
321
322 /* Look for various quirks of the PIIX4 part. */
323 acpi_dev = pci_find_device(PCI_VENDOR_INTEL, PCI_DEVICE_82371AB_3);
324 if (acpi_dev) {
325 switch (pci_get_revid(acpi_dev)) {
326 /*
327 * Disable throttling control on PIIX4 A and B-step.
328 * See specification changes #13 ("Manual Throttle Duty Cycle")
329 * and #14 ("Enabling and Disabling Manual Throttle"), plus
330 * erratum #5 ("STPCLK# Deassertion Time") from the January
331 * 2002 PIIX4 specification update. Note that few (if any)
332 * mobile systems ever used this part.
333 */
334 case PCI_REVISION_A_STEP:
335 case PCI_REVISION_B_STEP:
336 thr_quirks |= CPU_QUIRK_NO_THROTTLE;
337 break;
338 default:
339 break;
340 }
341 }
342 #endif
343 }
344
345 static int
346 acpi_thr_settings(device_t dev, struct cf_setting *sets, int *count)
347 {
348 int i, speed;
349
350 if (sets == NULL || count == NULL)
351 return (EINVAL);
352 if (*count < CPU_MAX_SPEED)
353 return (E2BIG);
354
355 /* Return a list of valid settings for this driver. */
356 memset(sets, CPUFREQ_VAL_UNKNOWN, sizeof(*sets) * CPU_MAX_SPEED);
357 for (i = 0, speed = CPU_MAX_SPEED; speed != 0; i++, speed--) {
358 sets[i].freq = CPU_SPEED_PERCENT(speed);
359 sets[i].dev = dev;
360 }
361 *count = CPU_MAX_SPEED;
362
363 return (0);
364 }
365
366 static int
367 acpi_thr_set(device_t dev, const struct cf_setting *set)
368 {
369 struct acpi_throttle_softc *sc;
370 uint32_t clk_val, p_cnt, speed;
371
372 if (set == NULL)
373 return (EINVAL);
374 sc = device_get_softc(dev);
375
376 /*
377 * Validate requested state converts to a duty cycle that is an
378 * integer from [1 .. CPU_MAX_SPEED].
379 */
380 speed = set->freq * CPU_MAX_SPEED / 10000;
381 if (speed * 10000 != set->freq * CPU_MAX_SPEED ||
382 speed < 1 || speed > CPU_MAX_SPEED)
383 return (EINVAL);
384
385 /* If we're at this setting, don't bother applying it again. */
386 if (speed == sc->cpu_thr_state)
387 return (0);
388
389 /* Get the current P_CNT value and disable throttling */
390 p_cnt = THR_GET_REG(sc->cpu_p_cnt);
391 p_cnt &= ~CPU_P_CNT_THT_EN;
392 THR_SET_REG(sc->cpu_p_cnt, p_cnt);
393
394 /* If we're at maximum speed, that's all */
395 if (speed < CPU_MAX_SPEED) {
396 /* Mask the old CLK_VAL off and OR in the new value */
397 clk_val = (CPU_MAX_SPEED - 1) << cpu_duty_offset;
398 p_cnt &= ~clk_val;
399 p_cnt |= (speed << cpu_duty_offset);
400
401 /* Write the new P_CNT value and then enable throttling */
402 THR_SET_REG(sc->cpu_p_cnt, p_cnt);
403 p_cnt |= CPU_P_CNT_THT_EN;
404 THR_SET_REG(sc->cpu_p_cnt, p_cnt);
405 }
406 sc->cpu_thr_state = speed;
407
408 return (0);
409 }
410
411 static int
412 acpi_thr_get(device_t dev, struct cf_setting *set)
413 {
414 struct acpi_throttle_softc *sc;
415 uint32_t p_cnt, clk_val;
416
417 if (set == NULL)
418 return (EINVAL);
419 sc = device_get_softc(dev);
420
421 /* Get the current throttling setting from P_CNT. */
422 p_cnt = THR_GET_REG(sc->cpu_p_cnt);
423 clk_val = (p_cnt >> cpu_duty_offset) & (CPU_MAX_SPEED - 1);
424 sc->cpu_thr_state = clk_val;
425
426 memset(set, CPUFREQ_VAL_UNKNOWN, sizeof(*set));
427 set->freq = CPU_SPEED_PERCENT(clk_val);
428 set->dev = dev;
429
430 return (0);
431 }
432
433 static int
434 acpi_thr_type(device_t dev, int *type)
435 {
436
437 if (type == NULL)
438 return (EINVAL);
439
440 *type = CPUFREQ_TYPE_RELATIVE;
441 return (0);
442 }
Cache object: a2244c22eb9cba57d6616324dfcaae80
|