1 /*-
2 * Copyright (c) 2005 Nate Lawson
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
15 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
16 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
17 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
18 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
19 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
20 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
21 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
22 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27 /*
28 * Throttle clock frequency by using the thermal control circuit. This
29 * operates independently of SpeedStep and ACPI throttling and is supported
30 * on Pentium 4 and later models (feature TM).
31 *
32 * Reference: Intel Developer's manual v.3 #245472-012
33 *
34 * The original version of this driver was written by Ted Unangst for
35 * OpenBSD and imported by Maxim Sobolev. It was rewritten by Nate Lawson
36 * for use with the cpufreq framework.
37 */
38
39 #include <sys/cdefs.h>
40 __FBSDID("$FreeBSD$");
41
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/bus.h>
45 #include <sys/cpu.h>
46 #include <sys/kernel.h>
47 #include <sys/module.h>
48
49 #include <machine/md_var.h>
50 #include <machine/specialreg.h>
51
52 #include "cpufreq_if.h"
53
54 #include <contrib/dev/acpica/acpi.h>
55 #include <dev/acpica/acpivar.h>
56 #include "acpi_if.h"
57
58 struct p4tcc_softc {
59 device_t dev;
60 int set_count;
61 int lowest_val;
62 int auto_mode;
63 };
64
65 #define TCC_NUM_SETTINGS 8
66
67 #define TCC_ENABLE_ONDEMAND (1<<4)
68 #define TCC_REG_OFFSET 1
69 #define TCC_SPEED_PERCENT(x) ((10000 * (x)) / TCC_NUM_SETTINGS)
70
71 static int p4tcc_features(driver_t *driver, u_int *features);
72 static void p4tcc_identify(driver_t *driver, device_t parent);
73 static int p4tcc_probe(device_t dev);
74 static int p4tcc_attach(device_t dev);
75 static int p4tcc_settings(device_t dev, struct cf_setting *sets,
76 int *count);
77 static int p4tcc_set(device_t dev, const struct cf_setting *set);
78 static int p4tcc_get(device_t dev, struct cf_setting *set);
79 static int p4tcc_type(device_t dev, int *type);
80
81 static device_method_t p4tcc_methods[] = {
82 /* Device interface */
83 DEVMETHOD(device_identify, p4tcc_identify),
84 DEVMETHOD(device_probe, p4tcc_probe),
85 DEVMETHOD(device_attach, p4tcc_attach),
86
87 /* cpufreq interface */
88 DEVMETHOD(cpufreq_drv_set, p4tcc_set),
89 DEVMETHOD(cpufreq_drv_get, p4tcc_get),
90 DEVMETHOD(cpufreq_drv_type, p4tcc_type),
91 DEVMETHOD(cpufreq_drv_settings, p4tcc_settings),
92
93 /* ACPI interface */
94 DEVMETHOD(acpi_get_features, p4tcc_features),
95
96 {0, 0}
97 };
98
99 static driver_t p4tcc_driver = {
100 "p4tcc",
101 p4tcc_methods,
102 sizeof(struct p4tcc_softc),
103 };
104
105 static devclass_t p4tcc_devclass;
106 DRIVER_MODULE(p4tcc, cpu, p4tcc_driver, p4tcc_devclass, 0, 0);
107
108 static int
109 p4tcc_features(driver_t *driver, u_int *features)
110 {
111
112 /* Notify the ACPI CPU that we support direct access to MSRs */
113 *features = ACPI_CAP_THR_MSRS;
114 return (0);
115 }
116
117 static void
118 p4tcc_identify(driver_t *driver, device_t parent)
119 {
120
121 if ((cpu_feature & (CPUID_ACPI | CPUID_TM)) != (CPUID_ACPI | CPUID_TM))
122 return;
123
124 /* Make sure we're not being doubly invoked. */
125 if (device_find_child(parent, "p4tcc", -1) != NULL)
126 return;
127
128 /*
129 * We attach a p4tcc child for every CPU since settings need to
130 * be performed on every CPU in the SMP case. See section 13.15.3
131 * of the IA32 Intel Architecture Software Developer's Manual,
132 * Volume 3, for more info.
133 */
134 if (BUS_ADD_CHILD(parent, 10, "p4tcc", -1) == NULL)
135 device_printf(parent, "add p4tcc child failed\n");
136 }
137
138 static int
139 p4tcc_probe(device_t dev)
140 {
141
142 if (resource_disabled("p4tcc", 0))
143 return (ENXIO);
144
145 device_set_desc(dev, "CPU Frequency Thermal Control");
146 return (0);
147 }
148
149 static int
150 p4tcc_attach(device_t dev)
151 {
152 struct p4tcc_softc *sc;
153 struct cf_setting set;
154
155 sc = device_get_softc(dev);
156 sc->dev = dev;
157 sc->set_count = TCC_NUM_SETTINGS;
158
159 /*
160 * On boot, the TCC is usually in Automatic mode where reading the
161 * current performance level is likely to produce bogus results.
162 * We record that state here and don't trust the contents of the
163 * status MSR until we've set it ourselves.
164 */
165 sc->auto_mode = TRUE;
166
167 /*
168 * XXX: After a cursory glance at various Intel specification
169 * XXX: updates it seems like these tests for errata is bogus.
170 * XXX: As far as I can tell, the failure mode is benign, in
171 * XXX: that cpus with no errata will have their bottom two
172 * XXX: STPCLK# rates disabled, so rather than waste more time
173 * XXX: hunting down intel docs, just document it and punt. /phk
174 */
175 switch (cpu_id & 0xff) {
176 case 0x22:
177 case 0x24:
178 case 0x25:
179 case 0x27:
180 case 0x29:
181 /*
182 * These CPU models hang when set to 12.5%.
183 * See Errata O50, P44, and Z21.
184 */
185 sc->set_count -= 1;
186 break;
187 case 0x07: /* errata N44 and P18 */
188 case 0x0a:
189 case 0x12:
190 case 0x13:
191 /*
192 * These CPU models hang when set to 12.5% or 25%.
193 * See Errata N44 and P18l.
194 */
195 sc->set_count -= 2;
196 break;
197 }
198 sc->lowest_val = TCC_NUM_SETTINGS - sc->set_count + 1;
199
200 /*
201 * Before we finish attach, switch to 100%. It's possible the BIOS
202 * set us to a lower rate. The user can override this after boot.
203 */
204 set.freq = 10000;
205 p4tcc_set(dev, &set);
206
207 cpufreq_register(dev);
208 return (0);
209 }
210
211 static int
212 p4tcc_settings(device_t dev, struct cf_setting *sets, int *count)
213 {
214 struct p4tcc_softc *sc;
215 int i, val;
216
217 sc = device_get_softc(dev);
218 if (sets == NULL || count == NULL)
219 return (EINVAL);
220 if (*count < sc->set_count)
221 return (E2BIG);
222
223 /* Return a list of valid settings for this driver. */
224 memset(sets, CPUFREQ_VAL_UNKNOWN, sizeof(*sets) * sc->set_count);
225 val = TCC_NUM_SETTINGS;
226 for (i = 0; i < sc->set_count; i++, val--) {
227 sets[i].freq = TCC_SPEED_PERCENT(val);
228 sets[i].dev = dev;
229 }
230 *count = sc->set_count;
231
232 return (0);
233 }
234
235 static int
236 p4tcc_set(device_t dev, const struct cf_setting *set)
237 {
238 struct p4tcc_softc *sc;
239 uint64_t mask, msr;
240 int val;
241
242 if (set == NULL)
243 return (EINVAL);
244 sc = device_get_softc(dev);
245
246 /*
247 * Validate requested state converts to a setting that is an integer
248 * from [sc->lowest_val .. TCC_NUM_SETTINGS].
249 */
250 val = set->freq * TCC_NUM_SETTINGS / 10000;
251 if (val * 10000 != set->freq * TCC_NUM_SETTINGS ||
252 val < sc->lowest_val || val > TCC_NUM_SETTINGS)
253 return (EINVAL);
254
255 /*
256 * Read the current register and mask off the old setting and
257 * On-Demand bit. If the new val is < 100%, set it and the On-Demand
258 * bit, otherwise just return to Automatic mode.
259 */
260 msr = rdmsr(MSR_THERM_CONTROL);
261 mask = (TCC_NUM_SETTINGS - 1) << TCC_REG_OFFSET;
262 msr &= ~(mask | TCC_ENABLE_ONDEMAND);
263 if (val < TCC_NUM_SETTINGS)
264 msr |= (val << TCC_REG_OFFSET) | TCC_ENABLE_ONDEMAND;
265 wrmsr(MSR_THERM_CONTROL, msr);
266
267 /*
268 * Record whether we're now in Automatic or On-Demand mode. We have
269 * to cache this since there is no reliable way to check if TCC is in
270 * Automatic mode (i.e., at 100% or possibly 50%). Reading bit 4 of
271 * the ACPI Thermal Monitor Control Register produces 0 no matter
272 * what the current mode.
273 */
274 if (msr & TCC_ENABLE_ONDEMAND)
275 sc->auto_mode = TRUE;
276 else
277 sc->auto_mode = FALSE;
278
279 return (0);
280 }
281
282 static int
283 p4tcc_get(device_t dev, struct cf_setting *set)
284 {
285 struct p4tcc_softc *sc;
286 uint64_t msr;
287 int val;
288
289 if (set == NULL)
290 return (EINVAL);
291 sc = device_get_softc(dev);
292
293 /*
294 * Read the current register and extract the current setting. If
295 * in automatic mode, assume we're at TCC_NUM_SETTINGS (100%).
296 *
297 * XXX This is not completely reliable since at high temperatures
298 * the CPU may be automatically throttling to 50% but it's the best
299 * we can do.
300 */
301 if (!sc->auto_mode) {
302 msr = rdmsr(MSR_THERM_CONTROL);
303 val = (msr >> TCC_REG_OFFSET) & (TCC_NUM_SETTINGS - 1);
304 } else
305 val = TCC_NUM_SETTINGS;
306
307 memset(set, CPUFREQ_VAL_UNKNOWN, sizeof(*set));
308 set->freq = TCC_SPEED_PERCENT(val);
309 set->dev = dev;
310
311 return (0);
312 }
313
314 static int
315 p4tcc_type(device_t dev, int *type)
316 {
317
318 if (type == NULL)
319 return (EINVAL);
320
321 *type = CPUFREQ_TYPE_RELATIVE;
322 return (0);
323 }
Cache object: a37bb945b9fcd30bb792a7c3d1a8ffae
|