1 /*-
2 * Copyright (c) 2008 Joseph Koshy
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27 /*
28 * Common code for handling Intel CPUs.
29 */
30
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD: releng/7.3/sys/dev/hwpmc/hwpmc_intel.c 195197 2009-06-30 17:10:08Z avg $");
33
34 #include <sys/param.h>
35 #include <sys/pmc.h>
36 #include <sys/pmckern.h>
37 #include <sys/systm.h>
38
39 #include <machine/cpu.h>
40 #include <machine/cputypes.h>
41 #include <machine/md_var.h>
42 #include <machine/specialreg.h>
43
44 static int
45 intel_switch_in(struct pmc_cpu *pc, struct pmc_process *pp)
46 {
47 (void) pc;
48
49 PMCDBG(MDP,SWI,1, "pc=%p pp=%p enable-msr=%d", pc, pp,
50 pp->pp_flags & PMC_PP_ENABLE_MSR_ACCESS);
51
52 /* allow the RDPMC instruction if needed */
53 if (pp->pp_flags & PMC_PP_ENABLE_MSR_ACCESS)
54 load_cr4(rcr4() | CR4_PCE);
55
56 PMCDBG(MDP,SWI,1, "cr4=0x%jx", (uintmax_t) rcr4());
57
58 return 0;
59 }
60
61 static int
62 intel_switch_out(struct pmc_cpu *pc, struct pmc_process *pp)
63 {
64 (void) pc;
65 (void) pp; /* can be NULL */
66
67 PMCDBG(MDP,SWO,1, "pc=%p pp=%p cr4=0x%jx", pc, pp,
68 (uintmax_t) rcr4());
69
70 /* always turn off the RDPMC instruction */
71 load_cr4(rcr4() & ~CR4_PCE);
72
73 return 0;
74 }
75
76 struct pmc_mdep *
77 pmc_intel_initialize(void)
78 {
79 struct pmc_mdep *pmc_mdep;
80 enum pmc_cputype cputype;
81 int error, model, nclasses, ncpus;
82
83 KASSERT(cpu_vendor_id == CPU_VENDOR_INTEL,
84 ("[intel,%d] Initializing non-intel processor", __LINE__));
85
86 PMCDBG(MDP,INI,0, "intel-initialize cpuid=0x%x", cpu_id);
87
88 cputype = -1;
89 nclasses = 2;
90
91 model = ((cpu_id & 0xF0000) >> 12) | ((cpu_id & 0xF0) >> 4);
92
93 switch (cpu_id & 0xF00) {
94 #if defined(__i386__)
95 case 0x500: /* Pentium family processors */
96 cputype = PMC_CPU_INTEL_P5;
97 break;
98 #endif
99 case 0x600: /* Pentium Pro, Celeron, Pentium II & III */
100 switch (model) {
101 #if defined(__i386__)
102 case 0x1:
103 cputype = PMC_CPU_INTEL_P6;
104 break;
105 case 0x3: case 0x5:
106 cputype = PMC_CPU_INTEL_PII;
107 break;
108 case 0x6: case 0x16:
109 cputype = PMC_CPU_INTEL_CL;
110 break;
111 case 0x7: case 0x8: case 0xA: case 0xB:
112 cputype = PMC_CPU_INTEL_PIII;
113 break;
114 case 0x9: case 0xD:
115 cputype = PMC_CPU_INTEL_PM;
116 break;
117 #endif
118 case 0xE:
119 cputype = PMC_CPU_INTEL_CORE;
120 break;
121 case 0xF:
122 cputype = PMC_CPU_INTEL_CORE2;
123 nclasses = 3;
124 break;
125 case 0x17:
126 cputype = PMC_CPU_INTEL_CORE2EXTREME;
127 nclasses = 3;
128 break;
129 case 0x1C: /* Per Intel document 320047-002. */
130 cputype = PMC_CPU_INTEL_ATOM;
131 nclasses = 3;
132 break;
133 case 0x1A:
134 cputype = PMC_CPU_INTEL_COREI7;
135 nclasses = 3;
136 break;
137 }
138 break;
139 #if defined(__i386__) || defined(__amd64__)
140 case 0xF00: /* P4 */
141 if (model >= 0 && model <= 6) /* known models */
142 cputype = PMC_CPU_INTEL_PIV;
143 break;
144 }
145 #endif
146
147 if ((int) cputype == -1) {
148 printf("pmc: Unknown Intel CPU.\n");
149 return (NULL);
150 }
151
152 pmc_mdep = malloc(sizeof(struct pmc_mdep) + nclasses *
153 sizeof(struct pmc_classdep), M_PMC, M_WAITOK|M_ZERO);
154
155 pmc_mdep->pmd_cputype = cputype;
156 pmc_mdep->pmd_nclass = nclasses;
157
158 pmc_mdep->pmd_switch_in = intel_switch_in;
159 pmc_mdep->pmd_switch_out = intel_switch_out;
160
161 ncpus = pmc_cpu_max();
162
163 error = pmc_tsc_initialize(pmc_mdep, ncpus);
164 if (error)
165 goto error;
166
167 switch (cputype) {
168 #if defined(__i386__) || defined(__amd64__)
169 /*
170 * Intel Core, Core 2 and Atom processors.
171 */
172 case PMC_CPU_INTEL_ATOM:
173 case PMC_CPU_INTEL_CORE:
174 case PMC_CPU_INTEL_CORE2:
175 case PMC_CPU_INTEL_CORE2EXTREME:
176 case PMC_CPU_INTEL_COREI7:
177 error = pmc_core_initialize(pmc_mdep, ncpus);
178 break;
179
180 /*
181 * Intel Pentium 4 Processors, and P4/EMT64 processors.
182 */
183
184 case PMC_CPU_INTEL_PIV:
185 error = pmc_p4_initialize(pmc_mdep, ncpus);
186
187 KASSERT(pmc_mdep->pmd_npmc == TSC_NPMCS + P4_NPMCS,
188 ("[intel,%d] incorrect npmc count %d", __LINE__,
189 pmc_mdep->pmd_npmc));
190 break;
191 #endif
192
193 #if defined(__i386__)
194 /*
195 * P6 Family Processors
196 */
197
198 case PMC_CPU_INTEL_P6:
199 case PMC_CPU_INTEL_CL:
200 case PMC_CPU_INTEL_PII:
201 case PMC_CPU_INTEL_PIII:
202 case PMC_CPU_INTEL_PM:
203 error = pmc_p6_initialize(pmc_mdep, ncpus);
204
205 KASSERT(pmc_mdep->pmd_npmc == TSC_NPMCS + P6_NPMCS,
206 ("[intel,%d] incorrect npmc count %d", __LINE__,
207 pmc_mdep->pmd_npmc));
208 break;
209
210 /*
211 * Intel Pentium PMCs.
212 */
213
214 case PMC_CPU_INTEL_P5:
215 error = pmc_p5_initialize(pmc_mdep, ncpus);
216
217 KASSERT(pmc_mdep->pmd_npmc == TSC_NPMCS + PENTIUM_NPMCS,
218 ("[intel,%d] incorrect npmc count %d", __LINE__,
219 pmc_mdep->pmd_npmc));
220 break;
221 #endif
222
223 default:
224 KASSERT(0, ("[intel,%d] Unknown CPU type", __LINE__));
225 }
226
227
228 error:
229 if (error) {
230 free(pmc_mdep, M_PMC);
231 pmc_mdep = NULL;
232 }
233
234 return (pmc_mdep);
235 }
236
237 void
238 pmc_intel_finalize(struct pmc_mdep *md)
239 {
240 pmc_tsc_finalize(md);
241
242 switch (md->pmd_cputype) {
243 #if defined(__i386__) || defined(__amd64__)
244 case PMC_CPU_INTEL_ATOM:
245 case PMC_CPU_INTEL_CORE:
246 case PMC_CPU_INTEL_CORE2:
247 case PMC_CPU_INTEL_CORE2EXTREME:
248 pmc_core_finalize(md);
249 break;
250
251 case PMC_CPU_INTEL_PIV:
252 pmc_p4_finalize(md);
253 break;
254 #endif
255 #if defined(__i386__)
256 case PMC_CPU_INTEL_P6:
257 case PMC_CPU_INTEL_CL:
258 case PMC_CPU_INTEL_PII:
259 case PMC_CPU_INTEL_PIII:
260 case PMC_CPU_INTEL_PM:
261 pmc_p6_finalize(md);
262 break;
263 case PMC_CPU_INTEL_P5:
264 pmc_p5_finalize(md);
265 break;
266 #endif
267 default:
268 KASSERT(0, ("[intel,%d] unknown CPU type", __LINE__));
269 }
270 }
Cache object: ed21c05e6897cc117b263432e3f4f3a8
|