1 /*-
2 * Copyright (c) 2014 Andrew Turner
3 * Copyright (c) 2014 The FreeBSD Foundation
4 * All rights reserved.
5 *
6 * Portions of this software were developed by Semihalf
7 * under sponsorship of the FreeBSD Foundation.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 *
30 */
31
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD: releng/11.2/sys/arm64/arm64/identcpu.c 305768 2016-09-13 14:14:39Z andrew $");
34
35 #include <sys/param.h>
36 #include <sys/pcpu.h>
37 #include <sys/sysctl.h>
38 #include <sys/systm.h>
39
40 #include <machine/atomic.h>
41 #include <machine/cpu.h>
42 #include <machine/cpufunc.h>
43
44 static int ident_lock;
45
46 char machine[] = "arm64";
47
48 SYSCTL_STRING(_hw, HW_MACHINE, machine, CTLFLAG_RD, machine, 0,
49 "Machine class");
50
51 /*
52 * Per-CPU affinity as provided in MPIDR_EL1
53 * Indexed by CPU number in logical order selected by the system.
54 * Relevant fields can be extracted using CPU_AFFn macros,
55 * Aff3.Aff2.Aff1.Aff0 construct a unique CPU address in the system.
56 *
57 * Fields used by us:
58 * Aff1 - Cluster number
59 * Aff0 - CPU number in Aff1 cluster
60 */
61 uint64_t __cpu_affinity[MAXCPU];
62 static u_int cpu_aff_levels;
63
64 struct cpu_desc {
65 u_int cpu_impl;
66 u_int cpu_part_num;
67 u_int cpu_variant;
68 u_int cpu_revision;
69 const char *cpu_impl_name;
70 const char *cpu_part_name;
71
72 uint64_t mpidr;
73 uint64_t id_aa64afr0;
74 uint64_t id_aa64afr1;
75 uint64_t id_aa64dfr0;
76 uint64_t id_aa64dfr1;
77 uint64_t id_aa64isar0;
78 uint64_t id_aa64isar1;
79 uint64_t id_aa64mmfr0;
80 uint64_t id_aa64mmfr1;
81 uint64_t id_aa64pfr0;
82 uint64_t id_aa64pfr1;
83 };
84
85 struct cpu_desc cpu_desc[MAXCPU];
86 static u_int cpu_print_regs;
87 #define PRINT_ID_AA64_AFR0 0x00000001
88 #define PRINT_ID_AA64_AFR1 0x00000002
89 #define PRINT_ID_AA64_DFR0 0x00000004
90 #define PRINT_ID_AA64_DFR1 0x00000008
91 #define PRINT_ID_AA64_ISAR0 0x00000010
92 #define PRINT_ID_AA64_ISAR1 0x00000020
93 #define PRINT_ID_AA64_MMFR0 0x00000040
94 #define PRINT_ID_AA64_MMFR1 0x00000080
95 #define PRINT_ID_AA64_PFR0 0x00000100
96 #define PRINT_ID_AA64_PFR1 0x00000200
97
98 struct cpu_parts {
99 u_int part_id;
100 const char *part_name;
101 };
102 #define CPU_PART_NONE { 0, "Unknown Processor" }
103
104 struct cpu_implementers {
105 u_int impl_id;
106 const char *impl_name;
107 /*
108 * Part number is implementation defined
109 * so each vendor will have its own set of values and names.
110 */
111 const struct cpu_parts *cpu_parts;
112 };
113 #define CPU_IMPLEMENTER_NONE { 0, "Unknown Implementer", cpu_parts_none }
114
115 /*
116 * Per-implementer table of (PartNum, CPU Name) pairs.
117 */
118 /* ARM Ltd. */
119 static const struct cpu_parts cpu_parts_arm[] = {
120 { CPU_PART_FOUNDATION, "Foundation-Model" },
121 { CPU_PART_CORTEX_A53, "Cortex-A53" },
122 { CPU_PART_CORTEX_A57, "Cortex-A57" },
123 CPU_PART_NONE,
124 };
125 /* Cavium */
126 static const struct cpu_parts cpu_parts_cavium[] = {
127 { CPU_PART_THUNDER, "Thunder" },
128 CPU_PART_NONE,
129 };
130
131 /* Unknown */
132 static const struct cpu_parts cpu_parts_none[] = {
133 CPU_PART_NONE,
134 };
135
136 /*
137 * Implementers table.
138 */
139 const struct cpu_implementers cpu_implementers[] = {
140 { CPU_IMPL_ARM, "ARM", cpu_parts_arm },
141 { CPU_IMPL_BROADCOM, "Broadcom", cpu_parts_none },
142 { CPU_IMPL_CAVIUM, "Cavium", cpu_parts_cavium },
143 { CPU_IMPL_DEC, "DEC", cpu_parts_none },
144 { CPU_IMPL_INFINEON, "IFX", cpu_parts_none },
145 { CPU_IMPL_FREESCALE, "Freescale", cpu_parts_none },
146 { CPU_IMPL_NVIDIA, "NVIDIA", cpu_parts_none },
147 { CPU_IMPL_APM, "APM", cpu_parts_none },
148 { CPU_IMPL_QUALCOMM, "Qualcomm", cpu_parts_none },
149 { CPU_IMPL_MARVELL, "Marvell", cpu_parts_none },
150 { CPU_IMPL_INTEL, "Intel", cpu_parts_none },
151 CPU_IMPLEMENTER_NONE,
152 };
153
154 void
155 print_cpu_features(u_int cpu)
156 {
157 int printed;
158
159 printf("CPU%3d: %s %s r%dp%d", cpu, cpu_desc[cpu].cpu_impl_name,
160 cpu_desc[cpu].cpu_part_name, cpu_desc[cpu].cpu_variant,
161 cpu_desc[cpu].cpu_revision);
162
163 printf(" affinity:");
164 switch(cpu_aff_levels) {
165 default:
166 case 4:
167 printf(" %2d", CPU_AFF3(cpu_desc[cpu].mpidr));
168 /* FALLTHROUGH */
169 case 3:
170 printf(" %2d", CPU_AFF2(cpu_desc[cpu].mpidr));
171 /* FALLTHROUGH */
172 case 2:
173 printf(" %2d", CPU_AFF1(cpu_desc[cpu].mpidr));
174 /* FALLTHROUGH */
175 case 1:
176 case 0: /* On UP this will be zero */
177 printf(" %2d", CPU_AFF0(cpu_desc[cpu].mpidr));
178 break;
179 }
180 printf("\n");
181
182 /*
183 * There is a hardware errata where, if one CPU is performing a TLB
184 * invalidation while another is performing a store-exclusive the
185 * store-exclusive may return the wrong status. A workaround seems
186 * to be to use an IPI to invalidate on each CPU, however given the
187 * limited number of affected units (pass 1.1 is the evaluation
188 * hardware revision), and the lack of information from Cavium
189 * this has not been implemented.
190 *
191 * At the time of writing this the only information is from:
192 * https://lkml.org/lkml/2016/8/4/722
193 */
194 /*
195 * XXX: CPU_MATCH_ERRATA_CAVIUM_THUNDER_1_1 on it's own also
196 * triggers on pass 2.0+.
197 */
198 if (cpu == 0 && CPU_VAR(PCPU_GET(midr)) == 0 &&
199 CPU_MATCH_ERRATA_CAVIUM_THUNDER_1_1)
200 printf("WARNING: ThunderX Pass 1.1 detected.\nThis has known "
201 "hardware bugs that may cause the incorrect operation of "
202 "atomic operations.\n");
203
204 if (cpu != 0 && cpu_print_regs == 0)
205 return;
206
207 #define SEP_STR ((printed++) == 0) ? "" : ","
208
209 /* AArch64 Instruction Set Attribute Register 0 */
210 if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_ISAR0) != 0) {
211 printed = 0;
212 printf(" Instruction Set Attributes 0 = <");
213
214 switch (ID_AA64ISAR0_RDM(cpu_desc[cpu].id_aa64isar0)) {
215 case ID_AA64ISAR0_RDM_NONE:
216 break;
217 case ID_AA64ISAR0_RDM_IMPL:
218 printf("%sRDM", SEP_STR);
219 break;
220 default:
221 printf("%sUnknown RDM", SEP_STR);
222 }
223
224 switch (ID_AA64ISAR0_ATOMIC(cpu_desc[cpu].id_aa64isar0)) {
225 case ID_AA64ISAR0_ATOMIC_NONE:
226 break;
227 case ID_AA64ISAR0_ATOMIC_IMPL:
228 printf("%sAtomic", SEP_STR);
229 break;
230 default:
231 printf("%sUnknown Atomic", SEP_STR);
232 }
233
234 switch (ID_AA64ISAR0_AES(cpu_desc[cpu].id_aa64isar0)) {
235 case ID_AA64ISAR0_AES_NONE:
236 break;
237 case ID_AA64ISAR0_AES_BASE:
238 printf("%sAES", SEP_STR);
239 break;
240 case ID_AA64ISAR0_AES_PMULL:
241 printf("%sAES+PMULL", SEP_STR);
242 break;
243 default:
244 printf("%sUnknown AES", SEP_STR);
245 break;
246 }
247
248 switch (ID_AA64ISAR0_SHA1(cpu_desc[cpu].id_aa64isar0)) {
249 case ID_AA64ISAR0_SHA1_NONE:
250 break;
251 case ID_AA64ISAR0_SHA1_BASE:
252 printf("%sSHA1", SEP_STR);
253 break;
254 default:
255 printf("%sUnknown SHA1", SEP_STR);
256 break;
257 }
258
259 switch (ID_AA64ISAR0_SHA2(cpu_desc[cpu].id_aa64isar0)) {
260 case ID_AA64ISAR0_SHA2_NONE:
261 break;
262 case ID_AA64ISAR0_SHA2_BASE:
263 printf("%sSHA2", SEP_STR);
264 break;
265 default:
266 printf("%sUnknown SHA2", SEP_STR);
267 break;
268 }
269
270 switch (ID_AA64ISAR0_CRC32(cpu_desc[cpu].id_aa64isar0)) {
271 case ID_AA64ISAR0_CRC32_NONE:
272 break;
273 case ID_AA64ISAR0_CRC32_BASE:
274 printf("%sCRC32", SEP_STR);
275 break;
276 default:
277 printf("%sUnknown CRC32", SEP_STR);
278 break;
279 }
280
281 if ((cpu_desc[cpu].id_aa64isar0 & ~ID_AA64ISAR0_MASK) != 0)
282 printf("%s%#lx", SEP_STR,
283 cpu_desc[cpu].id_aa64isar0 & ~ID_AA64ISAR0_MASK);
284
285 printf(">\n");
286 }
287
288 /* AArch64 Instruction Set Attribute Register 1 */
289 if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_ISAR1) != 0) {
290 printf(" Instruction Set Attributes 1 = <%#lx>\n",
291 cpu_desc[cpu].id_aa64isar1);
292 }
293
294 /* AArch64 Processor Feature Register 0 */
295 if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_PFR0) != 0) {
296 printed = 0;
297 printf(" Processor Features 0 = <");
298 switch (ID_AA64PFR0_GIC(cpu_desc[cpu].id_aa64pfr0)) {
299 case ID_AA64PFR0_GIC_CPUIF_NONE:
300 break;
301 case ID_AA64PFR0_GIC_CPUIF_EN:
302 printf("%sGIC", SEP_STR);
303 break;
304 default:
305 printf("%sUnknown GIC interface", SEP_STR);
306 break;
307 }
308
309 switch (ID_AA64PFR0_ADV_SIMD(cpu_desc[cpu].id_aa64pfr0)) {
310 case ID_AA64PFR0_ADV_SIMD_NONE:
311 break;
312 case ID_AA64PFR0_ADV_SIMD_IMPL:
313 printf("%sAdvSIMD", SEP_STR);
314 break;
315 default:
316 printf("%sUnknown AdvSIMD", SEP_STR);
317 break;
318 }
319
320 switch (ID_AA64PFR0_FP(cpu_desc[cpu].id_aa64pfr0)) {
321 case ID_AA64PFR0_FP_NONE:
322 break;
323 case ID_AA64PFR0_FP_IMPL:
324 printf("%sFloat", SEP_STR);
325 break;
326 default:
327 printf("%sUnknown Float", SEP_STR);
328 break;
329 }
330
331 switch (ID_AA64PFR0_EL3(cpu_desc[cpu].id_aa64pfr0)) {
332 case ID_AA64PFR0_EL3_NONE:
333 printf("%sNo EL3", SEP_STR);
334 break;
335 case ID_AA64PFR0_EL3_64:
336 printf("%sEL3", SEP_STR);
337 break;
338 case ID_AA64PFR0_EL3_64_32:
339 printf("%sEL3 32", SEP_STR);
340 break;
341 default:
342 printf("%sUnknown EL3", SEP_STR);
343 break;
344 }
345
346 switch (ID_AA64PFR0_EL2(cpu_desc[cpu].id_aa64pfr0)) {
347 case ID_AA64PFR0_EL2_NONE:
348 printf("%sNo EL2", SEP_STR);
349 break;
350 case ID_AA64PFR0_EL2_64:
351 printf("%sEL2", SEP_STR);
352 break;
353 case ID_AA64PFR0_EL2_64_32:
354 printf("%sEL2 32", SEP_STR);
355 break;
356 default:
357 printf("%sUnknown EL2", SEP_STR);
358 break;
359 }
360
361 switch (ID_AA64PFR0_EL1(cpu_desc[cpu].id_aa64pfr0)) {
362 case ID_AA64PFR0_EL1_64:
363 printf("%sEL1", SEP_STR);
364 break;
365 case ID_AA64PFR0_EL1_64_32:
366 printf("%sEL1 32", SEP_STR);
367 break;
368 default:
369 printf("%sUnknown EL1", SEP_STR);
370 break;
371 }
372
373 switch (ID_AA64PFR0_EL0(cpu_desc[cpu].id_aa64pfr0)) {
374 case ID_AA64PFR0_EL0_64:
375 printf("%sEL0", SEP_STR);
376 break;
377 case ID_AA64PFR0_EL0_64_32:
378 printf("%sEL0 32", SEP_STR);
379 break;
380 default:
381 printf("%sUnknown EL0", SEP_STR);
382 break;
383 }
384
385 if ((cpu_desc[cpu].id_aa64pfr0 & ~ID_AA64PFR0_MASK) != 0)
386 printf("%s%#lx", SEP_STR,
387 cpu_desc[cpu].id_aa64pfr0 & ~ID_AA64PFR0_MASK);
388
389 printf(">\n");
390 }
391
392 /* AArch64 Processor Feature Register 1 */
393 if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_PFR1) != 0) {
394 printf(" Processor Features 1 = <%#lx>\n",
395 cpu_desc[cpu].id_aa64pfr1);
396 }
397
398 /* AArch64 Memory Model Feature Register 0 */
399 if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_MMFR0) != 0) {
400 printed = 0;
401 printf(" Memory Model Features 0 = <");
402 switch (ID_AA64MMFR0_TGRAN4(cpu_desc[cpu].id_aa64mmfr0)) {
403 case ID_AA64MMFR0_TGRAN4_NONE:
404 break;
405 case ID_AA64MMFR0_TGRAN4_IMPL:
406 printf("%s4k Granule", SEP_STR);
407 break;
408 default:
409 printf("%sUnknown 4k Granule", SEP_STR);
410 break;
411 }
412
413 switch (ID_AA64MMFR0_TGRAN16(cpu_desc[cpu].id_aa64mmfr0)) {
414 case ID_AA64MMFR0_TGRAN16_NONE:
415 break;
416 case ID_AA64MMFR0_TGRAN16_IMPL:
417 printf("%s16k Granule", SEP_STR);
418 break;
419 default:
420 printf("%sUnknown 16k Granule", SEP_STR);
421 break;
422 }
423
424 switch (ID_AA64MMFR0_TGRAN64(cpu_desc[cpu].id_aa64mmfr0)) {
425 case ID_AA64MMFR0_TGRAN64_NONE:
426 break;
427 case ID_AA64MMFR0_TGRAN64_IMPL:
428 printf("%s64k Granule", SEP_STR);
429 break;
430 default:
431 printf("%sUnknown 64k Granule", SEP_STR);
432 break;
433 }
434
435 switch (ID_AA64MMFR0_BIGEND(cpu_desc[cpu].id_aa64mmfr0)) {
436 case ID_AA64MMFR0_BIGEND_FIXED:
437 break;
438 case ID_AA64MMFR0_BIGEND_MIXED:
439 printf("%sMixedEndian", SEP_STR);
440 break;
441 default:
442 printf("%sUnknown Endian switching", SEP_STR);
443 break;
444 }
445
446 switch (ID_AA64MMFR0_BIGEND_EL0(cpu_desc[cpu].id_aa64mmfr0)) {
447 case ID_AA64MMFR0_BIGEND_EL0_FIXED:
448 break;
449 case ID_AA64MMFR0_BIGEND_EL0_MIXED:
450 printf("%sEL0 MixEndian", SEP_STR);
451 break;
452 default:
453 printf("%sUnknown EL0 Endian switching", SEP_STR);
454 break;
455 }
456
457 switch (ID_AA64MMFR0_S_NS_MEM(cpu_desc[cpu].id_aa64mmfr0)) {
458 case ID_AA64MMFR0_S_NS_MEM_NONE:
459 break;
460 case ID_AA64MMFR0_S_NS_MEM_DISTINCT:
461 printf("%sS/NS Mem", SEP_STR);
462 break;
463 default:
464 printf("%sUnknown S/NS Mem", SEP_STR);
465 break;
466 }
467
468 switch (ID_AA64MMFR0_ASID_BITS(cpu_desc[cpu].id_aa64mmfr0)) {
469 case ID_AA64MMFR0_ASID_BITS_8:
470 printf("%s8bit ASID", SEP_STR);
471 break;
472 case ID_AA64MMFR0_ASID_BITS_16:
473 printf("%s16bit ASID", SEP_STR);
474 break;
475 default:
476 printf("%sUnknown ASID", SEP_STR);
477 break;
478 }
479
480 switch (ID_AA64MMFR0_PA_RANGE(cpu_desc[cpu].id_aa64mmfr0)) {
481 case ID_AA64MMFR0_PA_RANGE_4G:
482 printf("%s4GB PA", SEP_STR);
483 break;
484 case ID_AA64MMFR0_PA_RANGE_64G:
485 printf("%s64GB PA", SEP_STR);
486 break;
487 case ID_AA64MMFR0_PA_RANGE_1T:
488 printf("%s1TB PA", SEP_STR);
489 break;
490 case ID_AA64MMFR0_PA_RANGE_4T:
491 printf("%s4TB PA", SEP_STR);
492 break;
493 case ID_AA64MMFR0_PA_RANGE_16T:
494 printf("%s16TB PA", SEP_STR);
495 break;
496 case ID_AA64MMFR0_PA_RANGE_256T:
497 printf("%s256TB PA", SEP_STR);
498 break;
499 default:
500 printf("%sUnknown PA Range", SEP_STR);
501 break;
502 }
503
504 if ((cpu_desc[cpu].id_aa64mmfr0 & ~ID_AA64MMFR0_MASK) != 0)
505 printf("%s%#lx", SEP_STR,
506 cpu_desc[cpu].id_aa64mmfr0 & ~ID_AA64MMFR0_MASK);
507 printf(">\n");
508 }
509
510 /* AArch64 Memory Model Feature Register 1 */
511 if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_MMFR1) != 0) {
512 printed = 0;
513 printf(" Memory Model Features 1 = <");
514
515 switch (ID_AA64MMFR1_PAN(cpu_desc[cpu].id_aa64mmfr1)) {
516 case ID_AA64MMFR1_PAN_NONE:
517 break;
518 case ID_AA64MMFR1_PAN_IMPL:
519 printf("%sPAN", SEP_STR);
520 break;
521 default:
522 printf("%sUnknown PAN", SEP_STR);
523 break;
524 }
525
526 switch (ID_AA64MMFR1_LO(cpu_desc[cpu].id_aa64mmfr1)) {
527 case ID_AA64MMFR1_LO_NONE:
528 break;
529 case ID_AA64MMFR1_LO_IMPL:
530 printf("%sLO", SEP_STR);
531 break;
532 default:
533 printf("%sUnknown LO", SEP_STR);
534 break;
535 }
536
537 switch (ID_AA64MMFR1_HPDS(cpu_desc[cpu].id_aa64mmfr1)) {
538 case ID_AA64MMFR1_HPDS_NONE:
539 break;
540 case ID_AA64MMFR1_HPDS_IMPL:
541 printf("%sHPDS", SEP_STR);
542 break;
543 default:
544 printf("%sUnknown HPDS", SEP_STR);
545 break;
546 }
547
548 switch (ID_AA64MMFR1_VH(cpu_desc[cpu].id_aa64mmfr1)) {
549 case ID_AA64MMFR1_VH_NONE:
550 break;
551 case ID_AA64MMFR1_VH_IMPL:
552 printf("%sVHE", SEP_STR);
553 break;
554 default:
555 printf("%sUnknown VHE", SEP_STR);
556 break;
557 }
558
559 switch (ID_AA64MMFR1_VMIDBITS(cpu_desc[cpu].id_aa64mmfr1)) {
560 case ID_AA64MMFR1_VMIDBITS_8:
561 break;
562 case ID_AA64MMFR1_VMIDBITS_16:
563 printf("%s16 VMID bits", SEP_STR);
564 break;
565 default:
566 printf("%sUnknown VMID bits", SEP_STR);
567 break;
568 }
569
570 switch (ID_AA64MMFR1_HAFDBS(cpu_desc[cpu].id_aa64mmfr1)) {
571 case ID_AA64MMFR1_HAFDBS_NONE:
572 break;
573 case ID_AA64MMFR1_HAFDBS_AF:
574 printf("%sAF", SEP_STR);
575 break;
576 case ID_AA64MMFR1_HAFDBS_AF_DBS:
577 printf("%sAF+DBS", SEP_STR);
578 break;
579 default:
580 printf("%sUnknown Hardware update AF/DBS", SEP_STR);
581 break;
582 }
583
584 if ((cpu_desc[cpu].id_aa64mmfr1 & ~ID_AA64MMFR1_MASK) != 0)
585 printf("%s%#lx", SEP_STR,
586 cpu_desc[cpu].id_aa64mmfr1 & ~ID_AA64MMFR1_MASK);
587 printf(">\n");
588 }
589
590 /* AArch64 Debug Feature Register 0 */
591 if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_DFR0) != 0) {
592 printed = 0;
593 printf(" Debug Features 0 = <");
594 printf("%s%lu CTX Breakpoints", SEP_STR,
595 ID_AA64DFR0_CTX_CMPS(cpu_desc[cpu].id_aa64dfr0));
596
597 printf("%s%lu Watchpoints", SEP_STR,
598 ID_AA64DFR0_WRPS(cpu_desc[cpu].id_aa64dfr0));
599
600 printf("%s%lu Breakpoints", SEP_STR,
601 ID_AA64DFR0_BRPS(cpu_desc[cpu].id_aa64dfr0));
602
603 switch (ID_AA64DFR0_PMU_VER(cpu_desc[cpu].id_aa64dfr0)) {
604 case ID_AA64DFR0_PMU_VER_NONE:
605 break;
606 case ID_AA64DFR0_PMU_VER_3:
607 printf("%sPMUv3", SEP_STR);
608 break;
609 case ID_AA64DFR0_PMU_VER_3_1:
610 printf("%sPMUv3+16 bit evtCount", SEP_STR);
611 break;
612 case ID_AA64DFR0_PMU_VER_IMPL:
613 printf("%sImplementation defined PMU", SEP_STR);
614 break;
615 default:
616 printf("%sUnknown PMU", SEP_STR);
617 break;
618 }
619
620 switch (ID_AA64DFR0_TRACE_VER(cpu_desc[cpu].id_aa64dfr0)) {
621 case ID_AA64DFR0_TRACE_VER_NONE:
622 break;
623 case ID_AA64DFR0_TRACE_VER_IMPL:
624 printf("%sTrace", SEP_STR);
625 break;
626 default:
627 printf("%sUnknown Trace", SEP_STR);
628 break;
629 }
630
631 switch (ID_AA64DFR0_DEBUG_VER(cpu_desc[cpu].id_aa64dfr0)) {
632 case ID_AA64DFR0_DEBUG_VER_8:
633 printf("%sDebug v8", SEP_STR);
634 break;
635 case ID_AA64DFR0_DEBUG_VER_8_VHE:
636 printf("%sDebug v8+VHE", SEP_STR);
637 break;
638 default:
639 printf("%sUnknown Debug", SEP_STR);
640 break;
641 }
642
643 if (cpu_desc[cpu].id_aa64dfr0 & ~ID_AA64DFR0_MASK)
644 printf("%s%#lx", SEP_STR,
645 cpu_desc[cpu].id_aa64dfr0 & ~ID_AA64DFR0_MASK);
646 printf(">\n");
647 }
648
649 /* AArch64 Memory Model Feature Register 1 */
650 if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_DFR1) != 0) {
651 printf(" Debug Features 1 = <%#lx>\n",
652 cpu_desc[cpu].id_aa64dfr1);
653 }
654
655 /* AArch64 Auxiliary Feature Register 0 */
656 if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_AFR0) != 0) {
657 printf(" Auxiliary Features 0 = <%#lx>\n",
658 cpu_desc[cpu].id_aa64afr0);
659 }
660
661 /* AArch64 Auxiliary Feature Register 1 */
662 if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_AFR1) != 0) {
663 printf(" Auxiliary Features 1 = <%#lx>\n",
664 cpu_desc[cpu].id_aa64afr1);
665 }
666
667 #undef SEP_STR
668 }
669
670 void
671 identify_cpu(void)
672 {
673 u_int midr;
674 u_int impl_id;
675 u_int part_id;
676 u_int cpu;
677 size_t i;
678 const struct cpu_parts *cpu_partsp = NULL;
679
680 cpu = PCPU_GET(cpuid);
681 midr = get_midr();
682
683 /*
684 * Store midr to pcpu to allow fast reading
685 * from EL0, EL1 and assembly code.
686 */
687 PCPU_SET(midr, midr);
688
689 impl_id = CPU_IMPL(midr);
690 for (i = 0; i < nitems(cpu_implementers); i++) {
691 if (impl_id == cpu_implementers[i].impl_id ||
692 cpu_implementers[i].impl_id == 0) {
693 cpu_desc[cpu].cpu_impl = impl_id;
694 cpu_desc[cpu].cpu_impl_name = cpu_implementers[i].impl_name;
695 cpu_partsp = cpu_implementers[i].cpu_parts;
696 break;
697 }
698 }
699
700 part_id = CPU_PART(midr);
701 for (i = 0; &cpu_partsp[i] != NULL; i++) {
702 if (part_id == cpu_partsp[i].part_id ||
703 cpu_partsp[i].part_id == 0) {
704 cpu_desc[cpu].cpu_part_num = part_id;
705 cpu_desc[cpu].cpu_part_name = cpu_partsp[i].part_name;
706 break;
707 }
708 }
709
710 cpu_desc[cpu].cpu_revision = CPU_REV(midr);
711 cpu_desc[cpu].cpu_variant = CPU_VAR(midr);
712
713 /* Save affinity for current CPU */
714 cpu_desc[cpu].mpidr = get_mpidr();
715 CPU_AFFINITY(cpu) = cpu_desc[cpu].mpidr & CPU_AFF_MASK;
716
717 cpu_desc[cpu].id_aa64dfr0 = READ_SPECIALREG(id_aa64dfr0_el1);
718 cpu_desc[cpu].id_aa64dfr1 = READ_SPECIALREG(id_aa64dfr1_el1);
719 cpu_desc[cpu].id_aa64isar0 = READ_SPECIALREG(id_aa64isar0_el1);
720 cpu_desc[cpu].id_aa64isar1 = READ_SPECIALREG(id_aa64isar1_el1);
721 cpu_desc[cpu].id_aa64mmfr0 = READ_SPECIALREG(id_aa64mmfr0_el1);
722 cpu_desc[cpu].id_aa64mmfr1 = READ_SPECIALREG(id_aa64mmfr1_el1);
723 cpu_desc[cpu].id_aa64pfr0 = READ_SPECIALREG(id_aa64pfr0_el1);
724 cpu_desc[cpu].id_aa64pfr1 = READ_SPECIALREG(id_aa64pfr1_el1);
725
726 if (cpu != 0) {
727 /*
728 * This code must run on one cpu at a time, but we are
729 * not scheduling on the current core so implement a
730 * simple spinlock.
731 */
732 while (atomic_cmpset_acq_int(&ident_lock, 0, 1) == 0)
733 __asm __volatile("wfe" ::: "memory");
734
735 switch (cpu_aff_levels) {
736 case 0:
737 if (CPU_AFF0(cpu_desc[cpu].mpidr) !=
738 CPU_AFF0(cpu_desc[0].mpidr))
739 cpu_aff_levels = 1;
740 /* FALLTHROUGH */
741 case 1:
742 if (CPU_AFF1(cpu_desc[cpu].mpidr) !=
743 CPU_AFF1(cpu_desc[0].mpidr))
744 cpu_aff_levels = 2;
745 /* FALLTHROUGH */
746 case 2:
747 if (CPU_AFF2(cpu_desc[cpu].mpidr) !=
748 CPU_AFF2(cpu_desc[0].mpidr))
749 cpu_aff_levels = 3;
750 /* FALLTHROUGH */
751 case 3:
752 if (CPU_AFF3(cpu_desc[cpu].mpidr) !=
753 CPU_AFF3(cpu_desc[0].mpidr))
754 cpu_aff_levels = 4;
755 break;
756 }
757
758 if (cpu_desc[cpu].id_aa64afr0 != cpu_desc[0].id_aa64afr0)
759 cpu_print_regs |= PRINT_ID_AA64_AFR0;
760 if (cpu_desc[cpu].id_aa64afr1 != cpu_desc[0].id_aa64afr1)
761 cpu_print_regs |= PRINT_ID_AA64_AFR1;
762
763 if (cpu_desc[cpu].id_aa64dfr0 != cpu_desc[0].id_aa64dfr0)
764 cpu_print_regs |= PRINT_ID_AA64_DFR0;
765 if (cpu_desc[cpu].id_aa64dfr1 != cpu_desc[0].id_aa64dfr1)
766 cpu_print_regs |= PRINT_ID_AA64_DFR1;
767
768 if (cpu_desc[cpu].id_aa64isar0 != cpu_desc[0].id_aa64isar0)
769 cpu_print_regs |= PRINT_ID_AA64_ISAR0;
770 if (cpu_desc[cpu].id_aa64isar1 != cpu_desc[0].id_aa64isar1)
771 cpu_print_regs |= PRINT_ID_AA64_ISAR1;
772
773 if (cpu_desc[cpu].id_aa64mmfr0 != cpu_desc[0].id_aa64mmfr0)
774 cpu_print_regs |= PRINT_ID_AA64_MMFR0;
775 if (cpu_desc[cpu].id_aa64mmfr1 != cpu_desc[0].id_aa64mmfr1)
776 cpu_print_regs |= PRINT_ID_AA64_MMFR1;
777
778 if (cpu_desc[cpu].id_aa64pfr0 != cpu_desc[0].id_aa64pfr0)
779 cpu_print_regs |= PRINT_ID_AA64_PFR0;
780 if (cpu_desc[cpu].id_aa64pfr1 != cpu_desc[0].id_aa64pfr1)
781 cpu_print_regs |= PRINT_ID_AA64_PFR1;
782
783 /* Wake up the other CPUs */
784 atomic_store_rel_int(&ident_lock, 0);
785 __asm __volatile("sev" ::: "memory");
786 }
787 }
Cache object: a0884e699a3c149f7cde37f757af5c0d
|