1 /*-
2 * Copyright (c) 2005 Nate Lawson
3 * Copyright (c) 2004 Colin Percival
4 * Copyright (c) 2004-2005 Bruno Durcot
5 * Copyright (c) 2004 FUKUDA Nobuhiko
6 * Copyright (c) 2009 Michael Reifenberger
7 * Copyright (c) 2009 Norikatsu Shigemura
8 * Copyright (c) 2008-2009 Gen Otsuji
9 *
10 * This code is depending on kern_cpu.c, est.c, powernow.c, p4tcc.c, smist.c
11 * in various parts. The authors of these files are Nate Lawson,
12 * Colin Percival, Bruno Durcot, and FUKUDA Nobuhiko.
13 * This code contains patches by Michael Reifenberger and Norikatsu Shigemura.
14 * Thank you.
15 *
16 * Redistribution and use in source and binary forms, with or without
17 * modification, are permitted providing that the following conditions
18 * are met:
19 * 1. Redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer.
21 * 2. Redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR``AS IS'' AND ANY EXPRESS OR
26 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
27 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
29 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
33 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
34 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*
39 * For more info:
40 * BIOS and Kernel Developer's Guide(BKDG) for AMD Family 10h Processors
41 * 31116 Rev 3.20 February 04, 2009
42 * BIOS and Kernel Developer's Guide(BKDG) for AMD Family 11h Processors
43 * 41256 Rev 3.00 - July 07, 2008
44 */
45
46 #include <sys/cdefs.h>
47 __FBSDID("$FreeBSD: releng/7.3/sys/i386/cpufreq/hwpstate.c 200773 2009-12-21 14:03:40Z avg $");
48
49 #include <sys/param.h>
50 #include <sys/bus.h>
51 #include <sys/cpu.h>
52 #include <sys/kernel.h>
53 #include <sys/module.h>
54 #include <sys/malloc.h>
55 #include <sys/proc.h>
56 #include <sys/pcpu.h>
57 #include <sys/smp.h>
58 #include <sys/sched.h>
59
60 #include <machine/md_var.h>
61 #include <machine/cputypes.h>
62 #include <machine/specialreg.h>
63
64 #include <contrib/dev/acpica/acpi.h>
65 #include <dev/acpica/acpivar.h>
66
67 #include "acpi_if.h"
68 #include "cpufreq_if.h"
69
70 #define MSR_AMD_10H_11H_LIMIT 0xc0010061
71 #define MSR_AMD_10H_11H_CONTROL 0xc0010062
72 #define MSR_AMD_10H_11H_STATUS 0xc0010063
73 #define MSR_AMD_10H_11H_CONFIG 0xc0010064
74
75 #define AMD_10H_11H_MAX_STATES 16
76
77 /* for MSR_AMD_10H_11H_LIMIT C001_0061 */
78 #define AMD_10H_11H_GET_PSTATE_MAX_VAL(msr) (((msr) >> 4) & 0x7)
79 #define AMD_10H_11H_GET_PSTATE_LIMIT(msr) (((msr)) & 0x7)
80 /* for MSR_AMD_10H_11H_CONFIG 10h:C001_0064:68 / 11h:C001_0064:6B */
81 #define AMD_10H_11H_CUR_VID(msr) (((msr) >> 9) & 0x7F)
82 #define AMD_10H_11H_CUR_DID(msr) (((msr) >> 6) & 0x07)
83 #define AMD_10H_11H_CUR_FID(msr) ((msr) & 0x3F)
84
85 #define HWPSTATE_DEBUG(dev, msg...) \
86 do{ \
87 if(hwpstate_verbose) \
88 device_printf(dev, msg); \
89 }while(0)
90
91 struct hwpstate_setting {
92 int freq; /* CPU clock in Mhz or 100ths of a percent. */
93 int volts; /* Voltage in mV. */
94 int power; /* Power consumed in mW. */
95 int lat; /* Transition latency in us. */
96 int pstate_id; /* P-State id */
97 };
98
99 struct hwpstate_softc {
100 device_t dev;
101 struct hwpstate_setting hwpstate_settings[AMD_10H_11H_MAX_STATES];
102 int cfnum;
103 };
104
105 static void hwpstate_identify(driver_t *driver, device_t parent);
106 static int hwpstate_probe(device_t dev);
107 static int hwpstate_attach(device_t dev);
108 static int hwpstate_detach(device_t dev);
109 static int hwpstate_set(device_t dev, const struct cf_setting *cf);
110 static int hwpstate_get(device_t dev, struct cf_setting *cf);
111 static int hwpstate_settings(device_t dev, struct cf_setting *sets, int *count);
112 static int hwpstate_type(device_t dev, int *type);
113 static int hwpstate_shutdown(device_t dev);
114 static int hwpstate_features(driver_t *driver, u_int *features);
115 static int hwpstate_get_info_from_acpi_perf(device_t dev, device_t perf_dev);
116 static int hwpstate_get_info_from_msr(device_t dev);
117 static int hwpstate_goto_pstate(device_t dev, int pstate_id);
118
119 static int hwpstate_verbose = 0;
120 SYSCTL_INT(_debug, OID_AUTO, hwpstate_verbose, CTLFLAG_RDTUN,
121 &hwpstate_verbose, 0, "Debug hwpstate");
122
123 static device_method_t hwpstate_methods[] = {
124 /* Device interface */
125 DEVMETHOD(device_identify, hwpstate_identify),
126 DEVMETHOD(device_probe, hwpstate_probe),
127 DEVMETHOD(device_attach, hwpstate_attach),
128 DEVMETHOD(device_detach, hwpstate_detach),
129 DEVMETHOD(device_shutdown, hwpstate_shutdown),
130
131 /* cpufreq interface */
132 DEVMETHOD(cpufreq_drv_set, hwpstate_set),
133 DEVMETHOD(cpufreq_drv_get, hwpstate_get),
134 DEVMETHOD(cpufreq_drv_settings, hwpstate_settings),
135 DEVMETHOD(cpufreq_drv_type, hwpstate_type),
136
137 /* ACPI interface */
138 DEVMETHOD(acpi_get_features, hwpstate_features),
139
140 {0, 0}
141 };
142
143 static devclass_t hwpstate_devclass;
144 static driver_t hwpstate_driver = {
145 "hwpstate",
146 hwpstate_methods,
147 sizeof(struct hwpstate_softc),
148 };
149
150 DRIVER_MODULE(hwpstate, cpu, hwpstate_driver, hwpstate_devclass, 0, 0);
151
152 /*
153 * Go to Px-state on all cpus considering the limit.
154 */
155 static int
156 hwpstate_goto_pstate(device_t dev, int pstate)
157 {
158 struct pcpu *pc;
159 int i;
160 uint64_t msr;
161 int j;
162 int limit;
163 int id = pstate;
164 int error;
165
166 /* get the current pstate limit */
167 msr = rdmsr(MSR_AMD_10H_11H_LIMIT);
168 limit = AMD_10H_11H_GET_PSTATE_LIMIT(msr);
169 if(limit > id)
170 id = limit;
171
172 error = 0;
173 /*
174 * We are going to the same Px-state on all cpus.
175 */
176 for (i = 0; i < mp_ncpus; i++) {
177 /* Find each cpu. */
178 pc = pcpu_find(i);
179 if (pc == NULL)
180 return (ENXIO);
181 thread_lock(curthread);
182 /* Bind to each cpu. */
183 sched_bind(curthread, pc->pc_cpuid);
184 thread_unlock(curthread);
185 HWPSTATE_DEBUG(dev, "setting P%d-state on cpu%d\n",
186 id, PCPU_GET(cpuid));
187 /* Go To Px-state */
188 wrmsr(MSR_AMD_10H_11H_CONTROL, id);
189 /* wait loop (100*100 usec is enough ?) */
190 for(j = 0; j < 100; j++){
191 msr = rdmsr(MSR_AMD_10H_11H_STATUS);
192 if(msr == id){
193 break;
194 }
195 DELAY(100);
196 }
197 /* get the result. not assure msr=id */
198 msr = rdmsr(MSR_AMD_10H_11H_STATUS);
199 HWPSTATE_DEBUG(dev, "result P%d-state on cpu%d\n",
200 (int)msr, PCPU_GET(cpuid));
201 if (msr != id) {
202 HWPSTATE_DEBUG(dev, "error: loop is not enough.\n");
203 error = ENXIO;
204 }
205 thread_lock(curthread);
206 sched_unbind(curthread);
207 thread_unlock(curthread);
208 }
209 return (error);
210 }
211
212 static int
213 hwpstate_set(device_t dev, const struct cf_setting *cf)
214 {
215 struct hwpstate_softc *sc;
216 struct hwpstate_setting *set;
217 int i;
218
219 if (cf == NULL)
220 return (EINVAL);
221 sc = device_get_softc(dev);
222 set = sc->hwpstate_settings;
223 for (i = 0; i < sc->cfnum; i++)
224 if (CPUFREQ_CMP(cf->freq, set[i].freq))
225 break;
226 if (i == sc->cfnum)
227 return (EINVAL);
228
229 return (hwpstate_goto_pstate(dev, set[i].pstate_id));
230 }
231
232 static int
233 hwpstate_get(device_t dev, struct cf_setting *cf)
234 {
235 struct hwpstate_softc *sc;
236 struct hwpstate_setting set;
237 uint64_t msr;
238
239 sc = device_get_softc(dev);
240 if (cf == NULL)
241 return (EINVAL);
242 msr = rdmsr(MSR_AMD_10H_11H_STATUS);
243 if(msr >= sc->cfnum)
244 return (EINVAL);
245 set = sc->hwpstate_settings[msr];
246
247 cf->freq = set.freq;
248 cf->volts = set.volts;
249 cf->power = set.power;
250 cf->lat = set.lat;
251 cf->dev = dev;
252 return (0);
253 }
254
255 static int
256 hwpstate_settings(device_t dev, struct cf_setting *sets, int *count)
257 {
258 struct hwpstate_softc *sc;
259 struct hwpstate_setting set;
260 int i;
261
262 if (sets == NULL || count == NULL)
263 return (EINVAL);
264 sc = device_get_softc(dev);
265 if (*count < sc->cfnum)
266 return (E2BIG);
267 for (i = 0; i < sc->cfnum; i++, sets++) {
268 set = sc->hwpstate_settings[i];
269 sets->freq = set.freq;
270 sets->volts = set.volts;
271 sets->power = set.power;
272 sets->lat = set.lat;
273 sets->dev = dev;
274 }
275 *count = sc->cfnum;
276
277 return (0);
278 }
279
280 static int
281 hwpstate_type(device_t dev, int *type)
282 {
283
284 if (type == NULL)
285 return (EINVAL);
286
287 *type = CPUFREQ_TYPE_ABSOLUTE;
288 return (0);
289 }
290
291 static void
292 hwpstate_identify(driver_t *driver, device_t parent)
293 {
294
295 if (device_find_child(parent, "hwpstate", -1) != NULL)
296 return;
297
298 if (cpu_vendor_id != CPU_VENDOR_AMD || CPUID_TO_FAMILY(cpu_id) < 0x10)
299 return;
300
301 /*
302 * Check if hardware pstate enable bit is set.
303 */
304 if ((amd_pminfo & AMDPM_HW_PSTATE) == 0) {
305 HWPSTATE_DEBUG(parent, "hwpstate enable bit is not set.\n");
306 return;
307 }
308
309 if (resource_disabled("hwpstate", 0))
310 return;
311
312 if (BUS_ADD_CHILD(parent, 10, "hwpstate", -1) == NULL)
313 device_printf(parent, "hwpstate: add child failed\n");
314 }
315
316 static int
317 hwpstate_probe(device_t dev)
318 {
319 struct hwpstate_softc *sc;
320 device_t perf_dev;
321 uint64_t msr;
322 int error, type;
323
324 /*
325 * Only hwpstate0.
326 * It goes well with acpi_throttle.
327 */
328 if (device_get_unit(dev) != 0)
329 return (ENXIO);
330
331 sc = device_get_softc(dev);
332 sc->dev = dev;
333
334 /*
335 * Check if acpi_perf has INFO only flag.
336 */
337 perf_dev = device_find_child(device_get_parent(dev), "acpi_perf", -1);
338 error = TRUE;
339 if (perf_dev && device_is_attached(perf_dev)) {
340 error = CPUFREQ_DRV_TYPE(perf_dev, &type);
341 if (error == 0) {
342 if ((type & CPUFREQ_FLAG_INFO_ONLY) == 0) {
343 /*
344 * If acpi_perf doesn't have INFO_ONLY flag,
345 * it will take care of pstate transitions.
346 */
347 HWPSTATE_DEBUG(dev, "acpi_perf will take care of pstate transitions.\n");
348 return (ENXIO);
349 } else {
350 /*
351 * If acpi_perf has INFO_ONLY flag, (_PCT has FFixedHW)
352 * we can get _PSS info from acpi_perf
353 * without going into ACPI.
354 */
355 HWPSTATE_DEBUG(dev, "going to fetch info from acpi_perf\n");
356 error = hwpstate_get_info_from_acpi_perf(dev, perf_dev);
357 }
358 }
359 }
360
361 if (error == 0) {
362 /*
363 * Now we get _PSS info from acpi_perf without error.
364 * Let's check it.
365 */
366 msr = rdmsr(MSR_AMD_10H_11H_LIMIT);
367 if (sc->cfnum != 1 + AMD_10H_11H_GET_PSTATE_MAX_VAL(msr)) {
368 HWPSTATE_DEBUG(dev, "msr and acpi _PSS count mismatch.\n");
369 error = TRUE;
370 }
371 }
372
373 /*
374 * If we cannot get info from acpi_perf,
375 * Let's get info from MSRs.
376 */
377 if (error)
378 error = hwpstate_get_info_from_msr(dev);
379 if (error)
380 return (error);
381
382 device_set_desc(dev, "Cool`n'Quiet 2.0");
383 return (0);
384 }
385
386 static int
387 hwpstate_attach(device_t dev)
388 {
389
390 return (cpufreq_register(dev));
391 }
392
393 static int
394 hwpstate_get_info_from_msr(device_t dev)
395 {
396 struct hwpstate_softc *sc;
397 struct hwpstate_setting *hwpstate_set;
398 uint64_t msr;
399 int family, i, fid, did;
400
401 family = CPUID_TO_FAMILY(cpu_id);
402 sc = device_get_softc(dev);
403 /* Get pstate count */
404 msr = rdmsr(MSR_AMD_10H_11H_LIMIT);
405 sc->cfnum = 1 + AMD_10H_11H_GET_PSTATE_MAX_VAL(msr);
406 hwpstate_set = sc->hwpstate_settings;
407 for (i = 0; i < sc->cfnum; i++) {
408 msr = rdmsr(MSR_AMD_10H_11H_CONFIG + i);
409 if ((msr & ((uint64_t)1 << 63)) != ((uint64_t)1 << 63)) {
410 HWPSTATE_DEBUG(dev, "msr is not valid.\n");
411 return (ENXIO);
412 }
413 did = AMD_10H_11H_CUR_DID(msr);
414 fid = AMD_10H_11H_CUR_FID(msr);
415 switch(family) {
416 case 0x11:
417 /* fid/did to frequency */
418 hwpstate_set[i].freq = 100 * (fid + 0x08) / (1 << did);
419 break;
420 case 0x10:
421 /* fid/did to frequency */
422 hwpstate_set[i].freq = 100 * (fid + 0x10) / (1 << did);
423 break;
424 default:
425 HWPSTATE_DEBUG(dev, "get_info_from_msr: AMD family %d CPU's are not implemented yet. sorry.\n", family);
426 return (ENXIO);
427 break;
428 }
429 hwpstate_set[i].pstate_id = i;
430 /* There was volts calculation, but deleted it. */
431 hwpstate_set[i].volts = CPUFREQ_VAL_UNKNOWN;
432 hwpstate_set[i].power = CPUFREQ_VAL_UNKNOWN;
433 hwpstate_set[i].lat = CPUFREQ_VAL_UNKNOWN;
434 }
435 return (0);
436 }
437
438 static int
439 hwpstate_get_info_from_acpi_perf(device_t dev, device_t perf_dev)
440 {
441 struct hwpstate_softc *sc;
442 struct cf_setting *perf_set;
443 struct hwpstate_setting *hwpstate_set;
444 int count, error, i;
445
446 perf_set = malloc(MAX_SETTINGS * sizeof(*perf_set), M_TEMP, M_NOWAIT);
447 if (perf_set == NULL) {
448 HWPSTATE_DEBUG(dev, "nomem\n");
449 return (ENOMEM);
450 }
451 /*
452 * Fetch settings from acpi_perf.
453 * Now it is attached, and has info only flag.
454 */
455 count = MAX_SETTINGS;
456 error = CPUFREQ_DRV_SETTINGS(perf_dev, perf_set, &count);
457 if (error) {
458 HWPSTATE_DEBUG(dev, "error: CPUFREQ_DRV_SETTINGS.\n");
459 goto out;
460 }
461 sc = device_get_softc(dev);
462 sc->cfnum = count;
463 hwpstate_set = sc->hwpstate_settings;
464 for (i = 0; i < count; i++) {
465 if (i == perf_set[i].spec[0]) {
466 hwpstate_set[i].pstate_id = i;
467 hwpstate_set[i].freq = perf_set[i].freq;
468 hwpstate_set[i].volts = perf_set[i].volts;
469 hwpstate_set[i].power = perf_set[i].power;
470 hwpstate_set[i].lat = perf_set[i].lat;
471 } else {
472 HWPSTATE_DEBUG(dev, "ACPI _PSS object mismatch.\n");
473 error = ENXIO;
474 goto out;
475 }
476 }
477 out:
478 if (perf_set)
479 free(perf_set, M_TEMP);
480 return (error);
481 }
482
483 static int
484 hwpstate_detach(device_t dev)
485 {
486
487 hwpstate_goto_pstate(dev, 0);
488 return (cpufreq_unregister(dev));
489 }
490
491 static int
492 hwpstate_shutdown(device_t dev)
493 {
494
495 /* hwpstate_goto_pstate(dev, 0); */
496 return (0);
497 }
498
499 static int
500 hwpstate_features(driver_t *driver, u_int *features)
501 {
502
503 /* Notify the ACPI CPU that we support direct access to MSRs */
504 *features = ACPI_CAP_PERF_MSRS;
505 return (0);
506 }
Cache object: 2ff2255d5d400a13544b08ddd1f704d2
|