1 /*-
2 * Copyright (c) 2005 Nate Lawson
3 * Copyright (c) 2004 Colin Percival
4 * Copyright (c) 2004-2005 Bruno Durcot
5 * Copyright (c) 2004 FUKUDA Nobuhiko
6 * Copyright (c) 2009 Michael Reifenberger
7 * Copyright (c) 2009 Norikatsu Shigemura
8 * Copyright (c) 2008-2009 Gen Otsuji
9 *
10 * This code is depending on kern_cpu.c, est.c, powernow.c, p4tcc.c, smist.c
11 * in various parts. The authors of these files are Nate Lawson,
12 * Colin Percival, Bruno Durcot, and FUKUDA Nobuhiko.
13 * This code contains patches by Michael Reifenberger and Norikatsu Shigemura.
14 * Thank you.
15 *
16 * Redistribution and use in source and binary forms, with or without
17 * modification, are permitted providing that the following conditions
18 * are met:
19 * 1. Redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer.
21 * 2. Redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR``AS IS'' AND ANY EXPRESS OR
26 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
27 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
29 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
33 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
34 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*
39 * For more info:
40 * BIOS and Kernel Developer's Guide(BKDG) for AMD Family 10h Processors
41 * 31116 Rev 3.20 February 04, 2009
42 * BIOS and Kernel Developer's Guide(BKDG) for AMD Family 11h Processors
43 * 41256 Rev 3.00 - July 07, 2008
44 */
45
46 #include <sys/cdefs.h>
47 __FBSDID("$FreeBSD: releng/11.1/sys/x86/cpufreq/hwpstate.c 309443 2016-12-02 21:35:14Z jhb $");
48
49 #include <sys/param.h>
50 #include <sys/bus.h>
51 #include <sys/cpu.h>
52 #include <sys/kernel.h>
53 #include <sys/module.h>
54 #include <sys/malloc.h>
55 #include <sys/proc.h>
56 #include <sys/pcpu.h>
57 #include <sys/smp.h>
58 #include <sys/sched.h>
59
60 #include <machine/md_var.h>
61 #include <machine/cputypes.h>
62 #include <machine/specialreg.h>
63
64 #include <contrib/dev/acpica/include/acpi.h>
65
66 #include <dev/acpica/acpivar.h>
67
68 #include "acpi_if.h"
69 #include "cpufreq_if.h"
70
71 #define MSR_AMD_10H_11H_LIMIT 0xc0010061
72 #define MSR_AMD_10H_11H_CONTROL 0xc0010062
73 #define MSR_AMD_10H_11H_STATUS 0xc0010063
74 #define MSR_AMD_10H_11H_CONFIG 0xc0010064
75
76 #define AMD_10H_11H_MAX_STATES 16
77
78 /* for MSR_AMD_10H_11H_LIMIT C001_0061 */
79 #define AMD_10H_11H_GET_PSTATE_MAX_VAL(msr) (((msr) >> 4) & 0x7)
80 #define AMD_10H_11H_GET_PSTATE_LIMIT(msr) (((msr)) & 0x7)
81 /* for MSR_AMD_10H_11H_CONFIG 10h:C001_0064:68 / 11h:C001_0064:6B */
82 #define AMD_10H_11H_CUR_VID(msr) (((msr) >> 9) & 0x7F)
83 #define AMD_10H_11H_CUR_DID(msr) (((msr) >> 6) & 0x07)
84 #define AMD_10H_11H_CUR_FID(msr) ((msr) & 0x3F)
85
86 #define HWPSTATE_DEBUG(dev, msg...) \
87 do{ \
88 if(hwpstate_verbose) \
89 device_printf(dev, msg); \
90 }while(0)
91
92 struct hwpstate_setting {
93 int freq; /* CPU clock in Mhz or 100ths of a percent. */
94 int volts; /* Voltage in mV. */
95 int power; /* Power consumed in mW. */
96 int lat; /* Transition latency in us. */
97 int pstate_id; /* P-State id */
98 };
99
100 struct hwpstate_softc {
101 device_t dev;
102 struct hwpstate_setting hwpstate_settings[AMD_10H_11H_MAX_STATES];
103 int cfnum;
104 };
105
106 static void hwpstate_identify(driver_t *driver, device_t parent);
107 static int hwpstate_probe(device_t dev);
108 static int hwpstate_attach(device_t dev);
109 static int hwpstate_detach(device_t dev);
110 static int hwpstate_set(device_t dev, const struct cf_setting *cf);
111 static int hwpstate_get(device_t dev, struct cf_setting *cf);
112 static int hwpstate_settings(device_t dev, struct cf_setting *sets, int *count);
113 static int hwpstate_type(device_t dev, int *type);
114 static int hwpstate_shutdown(device_t dev);
115 static int hwpstate_features(driver_t *driver, u_int *features);
116 static int hwpstate_get_info_from_acpi_perf(device_t dev, device_t perf_dev);
117 static int hwpstate_get_info_from_msr(device_t dev);
118 static int hwpstate_goto_pstate(device_t dev, int pstate_id);
119
120 static int hwpstate_verbose = 0;
121 SYSCTL_INT(_debug, OID_AUTO, hwpstate_verbose, CTLFLAG_RWTUN,
122 &hwpstate_verbose, 0, "Debug hwpstate");
123
124 static device_method_t hwpstate_methods[] = {
125 /* Device interface */
126 DEVMETHOD(device_identify, hwpstate_identify),
127 DEVMETHOD(device_probe, hwpstate_probe),
128 DEVMETHOD(device_attach, hwpstate_attach),
129 DEVMETHOD(device_detach, hwpstate_detach),
130 DEVMETHOD(device_shutdown, hwpstate_shutdown),
131
132 /* cpufreq interface */
133 DEVMETHOD(cpufreq_drv_set, hwpstate_set),
134 DEVMETHOD(cpufreq_drv_get, hwpstate_get),
135 DEVMETHOD(cpufreq_drv_settings, hwpstate_settings),
136 DEVMETHOD(cpufreq_drv_type, hwpstate_type),
137
138 /* ACPI interface */
139 DEVMETHOD(acpi_get_features, hwpstate_features),
140
141 {0, 0}
142 };
143
144 static devclass_t hwpstate_devclass;
145 static driver_t hwpstate_driver = {
146 "hwpstate",
147 hwpstate_methods,
148 sizeof(struct hwpstate_softc),
149 };
150
151 DRIVER_MODULE(hwpstate, cpu, hwpstate_driver, hwpstate_devclass, 0, 0);
152
153 /*
154 * Go to Px-state on all cpus considering the limit.
155 */
156 static int
157 hwpstate_goto_pstate(device_t dev, int pstate)
158 {
159 int i;
160 uint64_t msr;
161 int j;
162 int limit;
163 int id = pstate;
164 int error;
165
166 /* get the current pstate limit */
167 msr = rdmsr(MSR_AMD_10H_11H_LIMIT);
168 limit = AMD_10H_11H_GET_PSTATE_LIMIT(msr);
169 if(limit > id)
170 id = limit;
171
172 /*
173 * We are going to the same Px-state on all cpus.
174 * Probably should take _PSD into account.
175 */
176 error = 0;
177 CPU_FOREACH(i) {
178 /* Bind to each cpu. */
179 thread_lock(curthread);
180 sched_bind(curthread, i);
181 thread_unlock(curthread);
182 HWPSTATE_DEBUG(dev, "setting P%d-state on cpu%d\n",
183 id, PCPU_GET(cpuid));
184 /* Go To Px-state */
185 wrmsr(MSR_AMD_10H_11H_CONTROL, id);
186 }
187 CPU_FOREACH(i) {
188 /* Bind to each cpu. */
189 thread_lock(curthread);
190 sched_bind(curthread, i);
191 thread_unlock(curthread);
192 /* wait loop (100*100 usec is enough ?) */
193 for(j = 0; j < 100; j++){
194 /* get the result. not assure msr=id */
195 msr = rdmsr(MSR_AMD_10H_11H_STATUS);
196 if(msr == id){
197 break;
198 }
199 DELAY(100);
200 }
201 HWPSTATE_DEBUG(dev, "result P%d-state on cpu%d\n",
202 (int)msr, PCPU_GET(cpuid));
203 if (msr != id) {
204 HWPSTATE_DEBUG(dev, "error: loop is not enough.\n");
205 error = ENXIO;
206 }
207 }
208 thread_lock(curthread);
209 sched_unbind(curthread);
210 thread_unlock(curthread);
211 return (error);
212 }
213
214 static int
215 hwpstate_set(device_t dev, const struct cf_setting *cf)
216 {
217 struct hwpstate_softc *sc;
218 struct hwpstate_setting *set;
219 int i;
220
221 if (cf == NULL)
222 return (EINVAL);
223 sc = device_get_softc(dev);
224 set = sc->hwpstate_settings;
225 for (i = 0; i < sc->cfnum; i++)
226 if (CPUFREQ_CMP(cf->freq, set[i].freq))
227 break;
228 if (i == sc->cfnum)
229 return (EINVAL);
230
231 return (hwpstate_goto_pstate(dev, set[i].pstate_id));
232 }
233
234 static int
235 hwpstate_get(device_t dev, struct cf_setting *cf)
236 {
237 struct hwpstate_softc *sc;
238 struct hwpstate_setting set;
239 uint64_t msr;
240
241 sc = device_get_softc(dev);
242 if (cf == NULL)
243 return (EINVAL);
244 msr = rdmsr(MSR_AMD_10H_11H_STATUS);
245 if(msr >= sc->cfnum)
246 return (EINVAL);
247 set = sc->hwpstate_settings[msr];
248
249 cf->freq = set.freq;
250 cf->volts = set.volts;
251 cf->power = set.power;
252 cf->lat = set.lat;
253 cf->dev = dev;
254 return (0);
255 }
256
257 static int
258 hwpstate_settings(device_t dev, struct cf_setting *sets, int *count)
259 {
260 struct hwpstate_softc *sc;
261 struct hwpstate_setting set;
262 int i;
263
264 if (sets == NULL || count == NULL)
265 return (EINVAL);
266 sc = device_get_softc(dev);
267 if (*count < sc->cfnum)
268 return (E2BIG);
269 for (i = 0; i < sc->cfnum; i++, sets++) {
270 set = sc->hwpstate_settings[i];
271 sets->freq = set.freq;
272 sets->volts = set.volts;
273 sets->power = set.power;
274 sets->lat = set.lat;
275 sets->dev = dev;
276 }
277 *count = sc->cfnum;
278
279 return (0);
280 }
281
282 static int
283 hwpstate_type(device_t dev, int *type)
284 {
285
286 if (type == NULL)
287 return (EINVAL);
288
289 *type = CPUFREQ_TYPE_ABSOLUTE;
290 return (0);
291 }
292
293 static void
294 hwpstate_identify(driver_t *driver, device_t parent)
295 {
296
297 if (device_find_child(parent, "hwpstate", -1) != NULL)
298 return;
299
300 if (cpu_vendor_id != CPU_VENDOR_AMD || CPUID_TO_FAMILY(cpu_id) < 0x10)
301 return;
302
303 /*
304 * Check if hardware pstate enable bit is set.
305 */
306 if ((amd_pminfo & AMDPM_HW_PSTATE) == 0) {
307 HWPSTATE_DEBUG(parent, "hwpstate enable bit is not set.\n");
308 return;
309 }
310
311 if (resource_disabled("hwpstate", 0))
312 return;
313
314 if (BUS_ADD_CHILD(parent, 10, "hwpstate", -1) == NULL)
315 device_printf(parent, "hwpstate: add child failed\n");
316 }
317
318 static int
319 hwpstate_probe(device_t dev)
320 {
321 struct hwpstate_softc *sc;
322 device_t perf_dev;
323 uint64_t msr;
324 int error, type;
325
326 /*
327 * Only hwpstate0.
328 * It goes well with acpi_throttle.
329 */
330 if (device_get_unit(dev) != 0)
331 return (ENXIO);
332
333 sc = device_get_softc(dev);
334 sc->dev = dev;
335
336 /*
337 * Check if acpi_perf has INFO only flag.
338 */
339 perf_dev = device_find_child(device_get_parent(dev), "acpi_perf", -1);
340 error = TRUE;
341 if (perf_dev && device_is_attached(perf_dev)) {
342 error = CPUFREQ_DRV_TYPE(perf_dev, &type);
343 if (error == 0) {
344 if ((type & CPUFREQ_FLAG_INFO_ONLY) == 0) {
345 /*
346 * If acpi_perf doesn't have INFO_ONLY flag,
347 * it will take care of pstate transitions.
348 */
349 HWPSTATE_DEBUG(dev, "acpi_perf will take care of pstate transitions.\n");
350 return (ENXIO);
351 } else {
352 /*
353 * If acpi_perf has INFO_ONLY flag, (_PCT has FFixedHW)
354 * we can get _PSS info from acpi_perf
355 * without going into ACPI.
356 */
357 HWPSTATE_DEBUG(dev, "going to fetch info from acpi_perf\n");
358 error = hwpstate_get_info_from_acpi_perf(dev, perf_dev);
359 }
360 }
361 }
362
363 if (error == 0) {
364 /*
365 * Now we get _PSS info from acpi_perf without error.
366 * Let's check it.
367 */
368 msr = rdmsr(MSR_AMD_10H_11H_LIMIT);
369 if (sc->cfnum != 1 + AMD_10H_11H_GET_PSTATE_MAX_VAL(msr)) {
370 HWPSTATE_DEBUG(dev, "msr and acpi _PSS count mismatch.\n");
371 error = TRUE;
372 }
373 }
374
375 /*
376 * If we cannot get info from acpi_perf,
377 * Let's get info from MSRs.
378 */
379 if (error)
380 error = hwpstate_get_info_from_msr(dev);
381 if (error)
382 return (error);
383
384 device_set_desc(dev, "Cool`n'Quiet 2.0");
385 return (0);
386 }
387
388 static int
389 hwpstate_attach(device_t dev)
390 {
391
392 return (cpufreq_register(dev));
393 }
394
395 static int
396 hwpstate_get_info_from_msr(device_t dev)
397 {
398 struct hwpstate_softc *sc;
399 struct hwpstate_setting *hwpstate_set;
400 uint64_t msr;
401 int family, i, fid, did;
402
403 family = CPUID_TO_FAMILY(cpu_id);
404 sc = device_get_softc(dev);
405 /* Get pstate count */
406 msr = rdmsr(MSR_AMD_10H_11H_LIMIT);
407 sc->cfnum = 1 + AMD_10H_11H_GET_PSTATE_MAX_VAL(msr);
408 hwpstate_set = sc->hwpstate_settings;
409 for (i = 0; i < sc->cfnum; i++) {
410 msr = rdmsr(MSR_AMD_10H_11H_CONFIG + i);
411 if ((msr & ((uint64_t)1 << 63)) == 0) {
412 HWPSTATE_DEBUG(dev, "msr is not valid.\n");
413 return (ENXIO);
414 }
415 did = AMD_10H_11H_CUR_DID(msr);
416 fid = AMD_10H_11H_CUR_FID(msr);
417
418 /* Convert fid/did to frequency. */
419 switch(family) {
420 case 0x11:
421 hwpstate_set[i].freq = (100 * (fid + 0x08)) >> did;
422 break;
423 case 0x10:
424 case 0x12:
425 case 0x15:
426 case 0x16:
427 hwpstate_set[i].freq = (100 * (fid + 0x10)) >> did;
428 break;
429 default:
430 HWPSTATE_DEBUG(dev, "get_info_from_msr: AMD family 0x%02x CPU's are not implemented yet. sorry.\n", family);
431 return (ENXIO);
432 }
433 hwpstate_set[i].pstate_id = i;
434 /* There was volts calculation, but deleted it. */
435 hwpstate_set[i].volts = CPUFREQ_VAL_UNKNOWN;
436 hwpstate_set[i].power = CPUFREQ_VAL_UNKNOWN;
437 hwpstate_set[i].lat = CPUFREQ_VAL_UNKNOWN;
438 }
439 return (0);
440 }
441
442 static int
443 hwpstate_get_info_from_acpi_perf(device_t dev, device_t perf_dev)
444 {
445 struct hwpstate_softc *sc;
446 struct cf_setting *perf_set;
447 struct hwpstate_setting *hwpstate_set;
448 int count, error, i;
449
450 perf_set = malloc(MAX_SETTINGS * sizeof(*perf_set), M_TEMP, M_NOWAIT);
451 if (perf_set == NULL) {
452 HWPSTATE_DEBUG(dev, "nomem\n");
453 return (ENOMEM);
454 }
455 /*
456 * Fetch settings from acpi_perf.
457 * Now it is attached, and has info only flag.
458 */
459 count = MAX_SETTINGS;
460 error = CPUFREQ_DRV_SETTINGS(perf_dev, perf_set, &count);
461 if (error) {
462 HWPSTATE_DEBUG(dev, "error: CPUFREQ_DRV_SETTINGS.\n");
463 goto out;
464 }
465 sc = device_get_softc(dev);
466 sc->cfnum = count;
467 hwpstate_set = sc->hwpstate_settings;
468 for (i = 0; i < count; i++) {
469 if (i == perf_set[i].spec[0]) {
470 hwpstate_set[i].pstate_id = i;
471 hwpstate_set[i].freq = perf_set[i].freq;
472 hwpstate_set[i].volts = perf_set[i].volts;
473 hwpstate_set[i].power = perf_set[i].power;
474 hwpstate_set[i].lat = perf_set[i].lat;
475 } else {
476 HWPSTATE_DEBUG(dev, "ACPI _PSS object mismatch.\n");
477 error = ENXIO;
478 goto out;
479 }
480 }
481 out:
482 if (perf_set)
483 free(perf_set, M_TEMP);
484 return (error);
485 }
486
487 static int
488 hwpstate_detach(device_t dev)
489 {
490
491 hwpstate_goto_pstate(dev, 0);
492 return (cpufreq_unregister(dev));
493 }
494
495 static int
496 hwpstate_shutdown(device_t dev)
497 {
498
499 /* hwpstate_goto_pstate(dev, 0); */
500 return (0);
501 }
502
503 static int
504 hwpstate_features(driver_t *driver, u_int *features)
505 {
506
507 /* Notify the ACPI CPU that we support direct access to MSRs */
508 *features = ACPI_CAP_PERF_MSRS;
509 return (0);
510 }
Cache object: 91dac2b97033172eeedb3f479a41bd1e
|