1 /*-
2 * Copyright (c) 2005 Nate Lawson
3 * Copyright (c) 2004 Colin Percival
4 * Copyright (c) 2004-2005 Bruno Durcot
5 * Copyright (c) 2004 FUKUDA Nobuhiko
6 * Copyright (c) 2009 Michael Reifenberger
7 * Copyright (c) 2009 Norikatsu Shigemura
8 * Copyright (c) 2008-2009 Gen Otsuji
9 *
10 * This code is depending on kern_cpu.c, est.c, powernow.c, p4tcc.c, smist.c
11 * in various parts. The authors of these files are Nate Lawson,
12 * Colin Percival, Bruno Durcot, and FUKUDA Nobuhiko.
13 * This code contains patches by Michael Reifenberger and Norikatsu Shigemura.
14 * Thank you.
15 *
16 * Redistribution and use in source and binary forms, with or without
17 * modification, are permitted providing that the following conditions
18 * are met:
19 * 1. Redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer.
21 * 2. Redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR``AS IS'' AND ANY EXPRESS OR
26 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
27 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
29 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
33 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
34 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*
39 * For more info:
40 * BIOS and Kernel Developer's Guide(BKDG) for AMD Family 10h Processors
41 * 31116 Rev 3.20 February 04, 2009
42 * BIOS and Kernel Developer's Guide(BKDG) for AMD Family 11h Processors
43 * 41256 Rev 3.00 - July 07, 2008
44 */
45
46 #include <sys/cdefs.h>
47 __FBSDID("$FreeBSD$");
48
49 #include <sys/param.h>
50 #include <sys/bus.h>
51 #include <sys/cpu.h>
52 #include <sys/kernel.h>
53 #include <sys/module.h>
54 #include <sys/malloc.h>
55 #include <sys/proc.h>
56 #include <sys/pcpu.h>
57 #include <sys/smp.h>
58 #include <sys/sched.h>
59
60 #include <machine/md_var.h>
61 #include <machine/cputypes.h>
62 #include <machine/specialreg.h>
63
64 #include <contrib/dev/acpica/include/acpi.h>
65
66 #include <dev/acpica/acpivar.h>
67
68 #include "acpi_if.h"
69 #include "cpufreq_if.h"
70
71 #define MSR_AMD_10H_11H_LIMIT 0xc0010061
72 #define MSR_AMD_10H_11H_CONTROL 0xc0010062
73 #define MSR_AMD_10H_11H_STATUS 0xc0010063
74 #define MSR_AMD_10H_11H_CONFIG 0xc0010064
75
76 #define AMD_10H_11H_MAX_STATES 16
77
78 /* for MSR_AMD_10H_11H_LIMIT C001_0061 */
79 #define AMD_10H_11H_GET_PSTATE_MAX_VAL(msr) (((msr) >> 4) & 0x7)
80 #define AMD_10H_11H_GET_PSTATE_LIMIT(msr) (((msr)) & 0x7)
81 /* for MSR_AMD_10H_11H_CONFIG 10h:C001_0064:68 / 11h:C001_0064:6B */
82 #define AMD_10H_11H_CUR_VID(msr) (((msr) >> 9) & 0x7F)
83 #define AMD_10H_11H_CUR_DID(msr) (((msr) >> 6) & 0x07)
84 #define AMD_10H_11H_CUR_FID(msr) ((msr) & 0x3F)
85
86 #define AMD_17H_CUR_VID(msr) (((msr) >> 14) & 0xFF)
87 #define AMD_17H_CUR_DID(msr) (((msr) >> 8) & 0x3F)
88 #define AMD_17H_CUR_FID(msr) ((msr) & 0xFF)
89
90 #define HWPSTATE_DEBUG(dev, msg...) \
91 do { \
92 if (hwpstate_verbose) \
93 device_printf(dev, msg); \
94 } while (0)
95
96 struct hwpstate_setting {
97 int freq; /* CPU clock in Mhz or 100ths of a percent. */
98 int volts; /* Voltage in mV. */
99 int power; /* Power consumed in mW. */
100 int lat; /* Transition latency in us. */
101 int pstate_id; /* P-State id */
102 };
103
104 struct hwpstate_softc {
105 device_t dev;
106 struct hwpstate_setting hwpstate_settings[AMD_10H_11H_MAX_STATES];
107 int cfnum;
108 };
109
110 static void hwpstate_identify(driver_t *driver, device_t parent);
111 static int hwpstate_probe(device_t dev);
112 static int hwpstate_attach(device_t dev);
113 static int hwpstate_detach(device_t dev);
114 static int hwpstate_set(device_t dev, const struct cf_setting *cf);
115 static int hwpstate_get(device_t dev, struct cf_setting *cf);
116 static int hwpstate_settings(device_t dev, struct cf_setting *sets, int *count);
117 static int hwpstate_type(device_t dev, int *type);
118 static int hwpstate_shutdown(device_t dev);
119 static int hwpstate_features(driver_t *driver, u_int *features);
120 static int hwpstate_get_info_from_acpi_perf(device_t dev, device_t perf_dev);
121 static int hwpstate_get_info_from_msr(device_t dev);
122 static int hwpstate_goto_pstate(device_t dev, int pstate_id);
123
124 static int hwpstate_verbose;
125 SYSCTL_INT(_debug, OID_AUTO, hwpstate_verbose, CTLFLAG_RWTUN,
126 &hwpstate_verbose, 0, "Debug hwpstate");
127
128 static int hwpstate_verify;
129 SYSCTL_INT(_debug, OID_AUTO, hwpstate_verify, CTLFLAG_RWTUN,
130 &hwpstate_verify, 0, "Verify P-state after setting");
131
132 static device_method_t hwpstate_methods[] = {
133 /* Device interface */
134 DEVMETHOD(device_identify, hwpstate_identify),
135 DEVMETHOD(device_probe, hwpstate_probe),
136 DEVMETHOD(device_attach, hwpstate_attach),
137 DEVMETHOD(device_detach, hwpstate_detach),
138 DEVMETHOD(device_shutdown, hwpstate_shutdown),
139
140 /* cpufreq interface */
141 DEVMETHOD(cpufreq_drv_set, hwpstate_set),
142 DEVMETHOD(cpufreq_drv_get, hwpstate_get),
143 DEVMETHOD(cpufreq_drv_settings, hwpstate_settings),
144 DEVMETHOD(cpufreq_drv_type, hwpstate_type),
145
146 /* ACPI interface */
147 DEVMETHOD(acpi_get_features, hwpstate_features),
148
149 {0, 0}
150 };
151
152 static devclass_t hwpstate_devclass;
153 static driver_t hwpstate_driver = {
154 "hwpstate",
155 hwpstate_methods,
156 sizeof(struct hwpstate_softc),
157 };
158
159 DRIVER_MODULE(hwpstate, cpu, hwpstate_driver, hwpstate_devclass, 0, 0);
160
161 /*
162 * Go to Px-state on all cpus considering the limit.
163 */
164 static int
165 hwpstate_goto_pstate(device_t dev, int id)
166 {
167 sbintime_t sbt;
168 uint64_t msr;
169 int cpu, i, j, limit;
170
171 /* get the current pstate limit */
172 msr = rdmsr(MSR_AMD_10H_11H_LIMIT);
173 limit = AMD_10H_11H_GET_PSTATE_LIMIT(msr);
174 if (limit > id)
175 id = limit;
176
177 cpu = curcpu;
178 HWPSTATE_DEBUG(dev, "setting P%d-state on cpu%d\n", id, cpu);
179 /* Go To Px-state */
180 wrmsr(MSR_AMD_10H_11H_CONTROL, id);
181
182 /*
183 * We are going to the same Px-state on all cpus.
184 * Probably should take _PSD into account.
185 */
186 CPU_FOREACH(i) {
187 if (i == cpu)
188 continue;
189
190 /* Bind to each cpu. */
191 thread_lock(curthread);
192 sched_bind(curthread, i);
193 thread_unlock(curthread);
194 HWPSTATE_DEBUG(dev, "setting P%d-state on cpu%d\n", id, i);
195 /* Go To Px-state */
196 wrmsr(MSR_AMD_10H_11H_CONTROL, id);
197 }
198
199 /*
200 * Verify whether each core is in the requested P-state.
201 */
202 if (hwpstate_verify) {
203 CPU_FOREACH(i) {
204 thread_lock(curthread);
205 sched_bind(curthread, i);
206 thread_unlock(curthread);
207 /* wait loop (100*100 usec is enough ?) */
208 for (j = 0; j < 100; j++) {
209 /* get the result. not assure msr=id */
210 msr = rdmsr(MSR_AMD_10H_11H_STATUS);
211 if (msr == id)
212 break;
213 sbt = SBT_1MS / 10;
214 tsleep_sbt(dev, PZERO, "pstate_goto", sbt,
215 sbt >> tc_precexp, 0);
216 }
217 HWPSTATE_DEBUG(dev, "result: P%d-state on cpu%d\n",
218 (int)msr, i);
219 if (msr != id) {
220 HWPSTATE_DEBUG(dev,
221 "error: loop is not enough.\n");
222 return (ENXIO);
223 }
224 }
225 }
226
227 return (0);
228 }
229
230 static int
231 hwpstate_set(device_t dev, const struct cf_setting *cf)
232 {
233 struct hwpstate_softc *sc;
234 struct hwpstate_setting *set;
235 int i;
236
237 if (cf == NULL)
238 return (EINVAL);
239 sc = device_get_softc(dev);
240 set = sc->hwpstate_settings;
241 for (i = 0; i < sc->cfnum; i++)
242 if (CPUFREQ_CMP(cf->freq, set[i].freq))
243 break;
244 if (i == sc->cfnum)
245 return (EINVAL);
246
247 return (hwpstate_goto_pstate(dev, set[i].pstate_id));
248 }
249
250 static int
251 hwpstate_get(device_t dev, struct cf_setting *cf)
252 {
253 struct hwpstate_softc *sc;
254 struct hwpstate_setting set;
255 uint64_t msr;
256
257 sc = device_get_softc(dev);
258 if (cf == NULL)
259 return (EINVAL);
260 msr = rdmsr(MSR_AMD_10H_11H_STATUS);
261 if (msr >= sc->cfnum)
262 return (EINVAL);
263 set = sc->hwpstate_settings[msr];
264
265 cf->freq = set.freq;
266 cf->volts = set.volts;
267 cf->power = set.power;
268 cf->lat = set.lat;
269 cf->dev = dev;
270 return (0);
271 }
272
273 static int
274 hwpstate_settings(device_t dev, struct cf_setting *sets, int *count)
275 {
276 struct hwpstate_softc *sc;
277 struct hwpstate_setting set;
278 int i;
279
280 if (sets == NULL || count == NULL)
281 return (EINVAL);
282 sc = device_get_softc(dev);
283 if (*count < sc->cfnum)
284 return (E2BIG);
285 for (i = 0; i < sc->cfnum; i++, sets++) {
286 set = sc->hwpstate_settings[i];
287 sets->freq = set.freq;
288 sets->volts = set.volts;
289 sets->power = set.power;
290 sets->lat = set.lat;
291 sets->dev = dev;
292 }
293 *count = sc->cfnum;
294
295 return (0);
296 }
297
298 static int
299 hwpstate_type(device_t dev, int *type)
300 {
301
302 if (type == NULL)
303 return (EINVAL);
304
305 *type = CPUFREQ_TYPE_ABSOLUTE;
306 return (0);
307 }
308
309 static void
310 hwpstate_identify(driver_t *driver, device_t parent)
311 {
312
313 if (device_find_child(parent, "hwpstate", -1) != NULL)
314 return;
315
316 if (cpu_vendor_id != CPU_VENDOR_AMD || CPUID_TO_FAMILY(cpu_id) < 0x10)
317 return;
318
319 /*
320 * Check if hardware pstate enable bit is set.
321 */
322 if ((amd_pminfo & AMDPM_HW_PSTATE) == 0) {
323 HWPSTATE_DEBUG(parent, "hwpstate enable bit is not set.\n");
324 return;
325 }
326
327 if (resource_disabled("hwpstate", 0))
328 return;
329
330 if (BUS_ADD_CHILD(parent, 10, "hwpstate", -1) == NULL)
331 device_printf(parent, "hwpstate: add child failed\n");
332 }
333
334 static int
335 hwpstate_probe(device_t dev)
336 {
337 struct hwpstate_softc *sc;
338 device_t perf_dev;
339 uint64_t msr;
340 int error, type;
341
342 /*
343 * Only hwpstate0.
344 * It goes well with acpi_throttle.
345 */
346 if (device_get_unit(dev) != 0)
347 return (ENXIO);
348
349 sc = device_get_softc(dev);
350 sc->dev = dev;
351
352 /*
353 * Check if acpi_perf has INFO only flag.
354 */
355 perf_dev = device_find_child(device_get_parent(dev), "acpi_perf", -1);
356 error = TRUE;
357 if (perf_dev && device_is_attached(perf_dev)) {
358 error = CPUFREQ_DRV_TYPE(perf_dev, &type);
359 if (error == 0) {
360 if ((type & CPUFREQ_FLAG_INFO_ONLY) == 0) {
361 /*
362 * If acpi_perf doesn't have INFO_ONLY flag,
363 * it will take care of pstate transitions.
364 */
365 HWPSTATE_DEBUG(dev, "acpi_perf will take care of pstate transitions.\n");
366 return (ENXIO);
367 } else {
368 /*
369 * If acpi_perf has INFO_ONLY flag, (_PCT has FFixedHW)
370 * we can get _PSS info from acpi_perf
371 * without going into ACPI.
372 */
373 HWPSTATE_DEBUG(dev, "going to fetch info from acpi_perf\n");
374 error = hwpstate_get_info_from_acpi_perf(dev, perf_dev);
375 }
376 }
377 }
378
379 if (error == 0) {
380 /*
381 * Now we get _PSS info from acpi_perf without error.
382 * Let's check it.
383 */
384 msr = rdmsr(MSR_AMD_10H_11H_LIMIT);
385 if (sc->cfnum != 1 + AMD_10H_11H_GET_PSTATE_MAX_VAL(msr)) {
386 HWPSTATE_DEBUG(dev, "MSR (%jd) and ACPI _PSS (%d)"
387 " count mismatch\n", (intmax_t)msr, sc->cfnum);
388 error = TRUE;
389 }
390 }
391
392 /*
393 * If we cannot get info from acpi_perf,
394 * Let's get info from MSRs.
395 */
396 if (error)
397 error = hwpstate_get_info_from_msr(dev);
398 if (error)
399 return (error);
400
401 device_set_desc(dev, "Cool`n'Quiet 2.0");
402 return (0);
403 }
404
405 static int
406 hwpstate_attach(device_t dev)
407 {
408
409 return (cpufreq_register(dev));
410 }
411
412 static int
413 hwpstate_get_info_from_msr(device_t dev)
414 {
415 struct hwpstate_softc *sc;
416 struct hwpstate_setting *hwpstate_set;
417 uint64_t msr;
418 int family, i, fid, did;
419
420 family = CPUID_TO_FAMILY(cpu_id);
421 sc = device_get_softc(dev);
422 /* Get pstate count */
423 msr = rdmsr(MSR_AMD_10H_11H_LIMIT);
424 sc->cfnum = 1 + AMD_10H_11H_GET_PSTATE_MAX_VAL(msr);
425 hwpstate_set = sc->hwpstate_settings;
426 for (i = 0; i < sc->cfnum; i++) {
427 msr = rdmsr(MSR_AMD_10H_11H_CONFIG + i);
428 if ((msr & ((uint64_t)1 << 63)) == 0) {
429 HWPSTATE_DEBUG(dev, "msr is not valid.\n");
430 return (ENXIO);
431 }
432 did = AMD_10H_11H_CUR_DID(msr);
433 fid = AMD_10H_11H_CUR_FID(msr);
434
435 /* Convert fid/did to frequency. */
436 switch (family) {
437 case 0x11:
438 hwpstate_set[i].freq = (100 * (fid + 0x08)) >> did;
439 break;
440 case 0x10:
441 case 0x12:
442 case 0x15:
443 case 0x16:
444 hwpstate_set[i].freq = (100 * (fid + 0x10)) >> did;
445 break;
446 case 0x17:
447 did = AMD_17H_CUR_DID(msr);
448 if (did == 0) {
449 HWPSTATE_DEBUG(dev, "unexpected did: 0\n");
450 did = 1;
451 }
452 fid = AMD_17H_CUR_FID(msr);
453 hwpstate_set[i].freq = (200 * fid) / did;
454 break;
455 default:
456 HWPSTATE_DEBUG(dev, "get_info_from_msr: AMD family"
457 " 0x%02x CPUs are not supported yet\n", family);
458 return (ENXIO);
459 }
460 hwpstate_set[i].pstate_id = i;
461 /* There was volts calculation, but deleted it. */
462 hwpstate_set[i].volts = CPUFREQ_VAL_UNKNOWN;
463 hwpstate_set[i].power = CPUFREQ_VAL_UNKNOWN;
464 hwpstate_set[i].lat = CPUFREQ_VAL_UNKNOWN;
465 }
466 return (0);
467 }
468
469 static int
470 hwpstate_get_info_from_acpi_perf(device_t dev, device_t perf_dev)
471 {
472 struct hwpstate_softc *sc;
473 struct cf_setting *perf_set;
474 struct hwpstate_setting *hwpstate_set;
475 int count, error, i;
476
477 perf_set = malloc(MAX_SETTINGS * sizeof(*perf_set), M_TEMP, M_NOWAIT);
478 if (perf_set == NULL) {
479 HWPSTATE_DEBUG(dev, "nomem\n");
480 return (ENOMEM);
481 }
482 /*
483 * Fetch settings from acpi_perf.
484 * Now it is attached, and has info only flag.
485 */
486 count = MAX_SETTINGS;
487 error = CPUFREQ_DRV_SETTINGS(perf_dev, perf_set, &count);
488 if (error) {
489 HWPSTATE_DEBUG(dev, "error: CPUFREQ_DRV_SETTINGS.\n");
490 goto out;
491 }
492 sc = device_get_softc(dev);
493 sc->cfnum = count;
494 hwpstate_set = sc->hwpstate_settings;
495 for (i = 0; i < count; i++) {
496 if (i == perf_set[i].spec[0]) {
497 hwpstate_set[i].pstate_id = i;
498 hwpstate_set[i].freq = perf_set[i].freq;
499 hwpstate_set[i].volts = perf_set[i].volts;
500 hwpstate_set[i].power = perf_set[i].power;
501 hwpstate_set[i].lat = perf_set[i].lat;
502 } else {
503 HWPSTATE_DEBUG(dev, "ACPI _PSS object mismatch.\n");
504 error = ENXIO;
505 goto out;
506 }
507 }
508 out:
509 if (perf_set)
510 free(perf_set, M_TEMP);
511 return (error);
512 }
513
514 static int
515 hwpstate_detach(device_t dev)
516 {
517
518 hwpstate_goto_pstate(dev, 0);
519 return (cpufreq_unregister(dev));
520 }
521
522 static int
523 hwpstate_shutdown(device_t dev)
524 {
525
526 /* hwpstate_goto_pstate(dev, 0); */
527 return (0);
528 }
529
530 static int
531 hwpstate_features(driver_t *driver, u_int *features)
532 {
533
534 /* Notify the ACPI CPU that we support direct access to MSRs */
535 *features = ACPI_CAP_PERF_MSRS;
536 return (0);
537 }
Cache object: 24ef5718b5b43854079e9d1951a4f4eb
|