1 /*-
2 * Copyright (c) 2015 Ruslan Bukin <br@bsdpad.com>
3 * All rights reserved.
4 *
5 * This software was developed by SRI International and the University of
6 * Cambridge Computer Laboratory under DARPA/AFRL contract (FA8750-10-C-0237)
7 * ("CTSRD"), as part of the DARPA CRASH research programme.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 */
30
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/pmc.h>
37 #include <sys/pmckern.h>
38
39 #include <machine/pmc_mdep.h>
40 #include <machine/cpu.h>
41
42 static int armv7_npmcs;
43
44 struct armv7_event_code_map {
45 enum pmc_event pe_ev;
46 uint8_t pe_code;
47 };
48
49 #define PMC_EV_CPU_CYCLES 0xFF
50
51 /*
52 * Per-processor information.
53 */
54 struct armv7_cpu {
55 struct pmc_hw *pc_armv7pmcs;
56 };
57
58 static struct armv7_cpu **armv7_pcpu;
59
60 /*
61 * Interrupt Enable Set Register
62 */
63 static __inline void
64 armv7_interrupt_enable(uint32_t pmc)
65 {
66 uint32_t reg;
67
68 reg = (1 << pmc);
69 cp15_pminten_set(reg);
70 }
71
72 /*
73 * Interrupt Clear Set Register
74 */
75 static __inline void
76 armv7_interrupt_disable(uint32_t pmc)
77 {
78 uint32_t reg;
79
80 reg = (1 << pmc);
81 cp15_pminten_clr(reg);
82 }
83
84 /*
85 * Counter Set Enable Register
86 */
87 static __inline void
88 armv7_counter_enable(unsigned int pmc)
89 {
90 uint32_t reg;
91
92 reg = (1 << pmc);
93 cp15_pmcnten_set(reg);
94 }
95
96 /*
97 * Counter Clear Enable Register
98 */
99 static __inline void
100 armv7_counter_disable(unsigned int pmc)
101 {
102 uint32_t reg;
103
104 reg = (1 << pmc);
105 cp15_pmcnten_clr(reg);
106 }
107
108 /*
109 * Performance Count Register N
110 */
111 static uint32_t
112 armv7_pmcn_read(unsigned int pmc, uint32_t evsel)
113 {
114
115 if (evsel == PMC_EV_CPU_CYCLES) {
116 return ((uint32_t)cp15_pmccntr_get());
117 }
118
119 KASSERT(pmc < armv7_npmcs, ("%s: illegal PMC number %d", __func__, pmc));
120
121 cp15_pmselr_set(pmc);
122 return (cp15_pmxevcntr_get());
123 }
124
125 static uint32_t
126 armv7_pmcn_write(unsigned int pmc, uint32_t reg)
127 {
128
129 KASSERT(pmc < armv7_npmcs, ("%s: illegal PMC number %d", __func__, pmc));
130
131 cp15_pmselr_set(pmc);
132 cp15_pmxevcntr_set(reg);
133
134 return (reg);
135 }
136
137 static int
138 armv7_allocate_pmc(int cpu, int ri, struct pmc *pm,
139 const struct pmc_op_pmcallocate *a)
140 {
141 enum pmc_event pe;
142 uint32_t config;
143
144 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
145 ("[armv7,%d] illegal CPU value %d", __LINE__, cpu));
146 KASSERT(ri >= 0 && ri < armv7_npmcs,
147 ("[armv7,%d] illegal row index %d", __LINE__, ri));
148
149 if (a->pm_class != PMC_CLASS_ARMV7)
150 return (EINVAL);
151 pe = a->pm_ev;
152
153 config = (pe & EVENT_ID_MASK);
154 pm->pm_md.pm_armv7.pm_armv7_evsel = config;
155
156 PMCDBG2(MDP, ALL, 2, "armv7-allocate ri=%d -> config=0x%x", ri, config);
157
158 return 0;
159 }
160
161
162 static int
163 armv7_read_pmc(int cpu, int ri, pmc_value_t *v)
164 {
165 pmc_value_t tmp;
166 struct pmc *pm;
167 register_t s;
168 u_int reg;
169
170 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
171 ("[armv7,%d] illegal CPU value %d", __LINE__, cpu));
172 KASSERT(ri >= 0 && ri < armv7_npmcs,
173 ("[armv7,%d] illegal row index %d", __LINE__, ri));
174
175 pm = armv7_pcpu[cpu]->pc_armv7pmcs[ri].phw_pmc;
176
177 s = intr_disable();
178 tmp = armv7_pmcn_read(ri, pm->pm_md.pm_armv7.pm_armv7_evsel);
179
180 /* Check if counter has overflowed */
181 if (pm->pm_md.pm_armv7.pm_armv7_evsel == PMC_EV_CPU_CYCLES)
182 reg = (1u << 31);
183 else
184 reg = (1u << ri);
185
186 if ((cp15_pmovsr_get() & reg) != 0) {
187 /* Clear Overflow Flag */
188 cp15_pmovsr_set(reg);
189 pm->pm_pcpu_state[cpu].pps_overflowcnt++;
190
191 /* Reread counter in case we raced. */
192 tmp = armv7_pmcn_read(ri, pm->pm_md.pm_armv7.pm_armv7_evsel);
193 }
194 tmp += 0x100000000llu * pm->pm_pcpu_state[cpu].pps_overflowcnt;
195 intr_restore(s);
196
197 PMCDBG2(MDP, REA, 2, "armv7-read id=%d -> %jd", ri, tmp);
198 if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm))) {
199 /*
200 * Clamp value to 0 if the counter just overflowed,
201 * otherwise the returned reload count would wrap to a
202 * huge value.
203 */
204 if ((tmp & (1ull << 63)) == 0)
205 tmp = 0;
206 else
207 tmp = ARMV7_PERFCTR_VALUE_TO_RELOAD_COUNT(tmp);
208 }
209 *v = tmp;
210
211 return 0;
212 }
213
214 static int
215 armv7_write_pmc(int cpu, int ri, pmc_value_t v)
216 {
217 struct pmc *pm;
218
219 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
220 ("[armv7,%d] illegal CPU value %d", __LINE__, cpu));
221 KASSERT(ri >= 0 && ri < armv7_npmcs,
222 ("[armv7,%d] illegal row-index %d", __LINE__, ri));
223
224 pm = armv7_pcpu[cpu]->pc_armv7pmcs[ri].phw_pmc;
225
226 if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
227 v = ARMV7_RELOAD_COUNT_TO_PERFCTR_VALUE(v);
228
229 PMCDBG3(MDP, WRI, 1, "armv7-write cpu=%d ri=%d v=%jx", cpu, ri, v);
230
231 pm->pm_pcpu_state[cpu].pps_overflowcnt = v >> 32;
232 if (pm->pm_md.pm_armv7.pm_armv7_evsel == PMC_EV_CPU_CYCLES)
233 cp15_pmccntr_set(v);
234 else
235 armv7_pmcn_write(ri, v);
236
237 return 0;
238 }
239
240 static int
241 armv7_config_pmc(int cpu, int ri, struct pmc *pm)
242 {
243 struct pmc_hw *phw;
244
245 PMCDBG3(MDP, CFG, 1, "cpu=%d ri=%d pm=%p", cpu, ri, pm);
246
247 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
248 ("[armv7,%d] illegal CPU value %d", __LINE__, cpu));
249 KASSERT(ri >= 0 && ri < armv7_npmcs,
250 ("[armv7,%d] illegal row-index %d", __LINE__, ri));
251
252 phw = &armv7_pcpu[cpu]->pc_armv7pmcs[ri];
253
254 KASSERT(pm == NULL || phw->phw_pmc == NULL,
255 ("[armv7,%d] pm=%p phw->pm=%p hwpmc not unconfigured",
256 __LINE__, pm, phw->phw_pmc));
257
258 phw->phw_pmc = pm;
259
260 return 0;
261 }
262
263 static int
264 armv7_start_pmc(int cpu, int ri)
265 {
266 struct pmc_hw *phw;
267 uint32_t config;
268 struct pmc *pm;
269
270 phw = &armv7_pcpu[cpu]->pc_armv7pmcs[ri];
271 pm = phw->phw_pmc;
272 config = pm->pm_md.pm_armv7.pm_armv7_evsel;
273
274 /*
275 * Configure the event selection.
276 */
277 if (config != PMC_EV_CPU_CYCLES) {
278 cp15_pmselr_set(ri);
279 cp15_pmxevtyper_set(config);
280 } else
281 ri = 31;
282
283 /*
284 * Enable the PMC.
285 */
286 armv7_interrupt_enable(ri);
287 armv7_counter_enable(ri);
288
289 return 0;
290 }
291
292 static int
293 armv7_stop_pmc(int cpu, int ri)
294 {
295 struct pmc_hw *phw;
296 struct pmc *pm;
297 uint32_t config;
298
299 phw = &armv7_pcpu[cpu]->pc_armv7pmcs[ri];
300 pm = phw->phw_pmc;
301 config = pm->pm_md.pm_armv7.pm_armv7_evsel;
302 if (config == PMC_EV_CPU_CYCLES)
303 ri = 31;
304
305 /*
306 * Disable the PMCs.
307 */
308 armv7_counter_disable(ri);
309 armv7_interrupt_disable(ri);
310
311 return 0;
312 }
313
314 static int
315 armv7_release_pmc(int cpu, int ri, struct pmc *pmc)
316 {
317 struct pmc_hw *phw __diagused;
318
319 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
320 ("[armv7,%d] illegal CPU value %d", __LINE__, cpu));
321 KASSERT(ri >= 0 && ri < armv7_npmcs,
322 ("[armv7,%d] illegal row-index %d", __LINE__, ri));
323
324 phw = &armv7_pcpu[cpu]->pc_armv7pmcs[ri];
325 KASSERT(phw->phw_pmc == NULL,
326 ("[armv7,%d] PHW pmc %p non-NULL", __LINE__, phw->phw_pmc));
327
328 return 0;
329 }
330
331 static int
332 armv7_intr(struct trapframe *tf)
333 {
334 int retval, ri;
335 struct pmc *pm;
336 int error;
337 int reg, cpu;
338
339 cpu = curcpu;
340 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
341 ("[armv7,%d] CPU %d out of range", __LINE__, cpu));
342
343 retval = 0;
344
345 for (ri = 0; ri < armv7_npmcs; ri++) {
346 pm = armv7_pcpu[cpu]->pc_armv7pmcs[ri].phw_pmc;
347 if (pm == NULL)
348 continue;
349
350 /* Check if counter has overflowed */
351 if (pm->pm_md.pm_armv7.pm_armv7_evsel == PMC_EV_CPU_CYCLES)
352 reg = (1u << 31);
353 else
354 reg = (1u << ri);
355
356 if ((cp15_pmovsr_get() & reg) == 0) {
357 continue;
358 }
359
360 /* Clear Overflow Flag */
361 cp15_pmovsr_set(reg);
362
363 retval = 1; /* Found an interrupting PMC. */
364
365 pm->pm_pcpu_state[cpu].pps_overflowcnt += 1;
366
367 if (!PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
368 continue;
369
370 if (pm->pm_state != PMC_STATE_RUNNING)
371 continue;
372
373 error = pmc_process_interrupt(PMC_HR, pm, tf);
374 if (error)
375 armv7_stop_pmc(cpu, ri);
376
377 /* Reload sampling count */
378 armv7_write_pmc(cpu, ri, pm->pm_sc.pm_reloadcount);
379 }
380
381 return (retval);
382 }
383
384 static int
385 armv7_describe(int cpu, int ri, struct pmc_info *pi, struct pmc **ppmc)
386 {
387 char armv7_name[PMC_NAME_MAX];
388 struct pmc_hw *phw;
389 int error;
390
391 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
392 ("[armv7,%d], illegal CPU %d", __LINE__, cpu));
393 KASSERT(ri >= 0 && ri < armv7_npmcs,
394 ("[armv7,%d] row-index %d out of range", __LINE__, ri));
395
396 phw = &armv7_pcpu[cpu]->pc_armv7pmcs[ri];
397 snprintf(armv7_name, sizeof(armv7_name), "ARMV7-%d", ri);
398 if ((error = copystr(armv7_name, pi->pm_name, PMC_NAME_MAX,
399 NULL)) != 0)
400 return error;
401 pi->pm_class = PMC_CLASS_ARMV7;
402 if (phw->phw_state & PMC_PHW_FLAG_IS_ENABLED) {
403 pi->pm_enabled = TRUE;
404 *ppmc = phw->phw_pmc;
405 } else {
406 pi->pm_enabled = FALSE;
407 *ppmc = NULL;
408 }
409
410 return (0);
411 }
412
413 static int
414 armv7_get_config(int cpu, int ri, struct pmc **ppm)
415 {
416
417 *ppm = armv7_pcpu[cpu]->pc_armv7pmcs[ri].phw_pmc;
418
419 return 0;
420 }
421
422 /*
423 * XXX don't know what we should do here.
424 */
425 static int
426 armv7_switch_in(struct pmc_cpu *pc, struct pmc_process *pp)
427 {
428
429 return 0;
430 }
431
432 static int
433 armv7_switch_out(struct pmc_cpu *pc, struct pmc_process *pp)
434 {
435
436 return 0;
437 }
438
439 static int
440 armv7_pcpu_init(struct pmc_mdep *md, int cpu)
441 {
442 struct armv7_cpu *pac;
443 struct pmc_hw *phw;
444 struct pmc_cpu *pc;
445 uint32_t pmnc;
446 int first_ri;
447 int i;
448
449 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
450 ("[armv7,%d] wrong cpu number %d", __LINE__, cpu));
451 PMCDBG1(MDP, INI, 1, "armv7-init cpu=%d", cpu);
452
453 armv7_pcpu[cpu] = pac = malloc(sizeof(struct armv7_cpu), M_PMC,
454 M_WAITOK|M_ZERO);
455
456 pac->pc_armv7pmcs = malloc(sizeof(struct pmc_hw) * armv7_npmcs,
457 M_PMC, M_WAITOK|M_ZERO);
458 pc = pmc_pcpu[cpu];
459 first_ri = md->pmd_classdep[PMC_MDEP_CLASS_INDEX_ARMV7].pcd_ri;
460 KASSERT(pc != NULL, ("[armv7,%d] NULL per-cpu pointer", __LINE__));
461
462 for (i = 0, phw = pac->pc_armv7pmcs; i < armv7_npmcs; i++, phw++) {
463 phw->phw_state = PMC_PHW_FLAG_IS_ENABLED |
464 PMC_PHW_CPU_TO_STATE(cpu) | PMC_PHW_INDEX_TO_STATE(i);
465 phw->phw_pmc = NULL;
466 pc->pc_hwpmcs[i + first_ri] = phw;
467 }
468
469 pmnc = 0xffffffff;
470 cp15_pmcnten_clr(pmnc);
471 cp15_pminten_clr(pmnc);
472 cp15_pmovsr_set(pmnc);
473
474 /* Enable unit */
475 pmnc = cp15_pmcr_get();
476 pmnc |= ARMV7_PMNC_ENABLE;
477 cp15_pmcr_set(pmnc);
478
479 return 0;
480 }
481
482 static int
483 armv7_pcpu_fini(struct pmc_mdep *md, int cpu)
484 {
485 uint32_t pmnc;
486
487 pmnc = cp15_pmcr_get();
488 pmnc &= ~ARMV7_PMNC_ENABLE;
489 cp15_pmcr_set(pmnc);
490
491 pmnc = 0xffffffff;
492 cp15_pmcnten_clr(pmnc);
493 cp15_pminten_clr(pmnc);
494 cp15_pmovsr_set(pmnc);
495
496 return 0;
497 }
498
499 struct pmc_mdep *
500 pmc_armv7_initialize(void)
501 {
502 struct pmc_mdep *pmc_mdep;
503 struct pmc_classdep *pcd;
504 int idcode;
505 int reg;
506
507 reg = cp15_pmcr_get();
508 armv7_npmcs = (reg >> ARMV7_PMNC_N_SHIFT) & \
509 ARMV7_PMNC_N_MASK;
510 idcode = (reg & ARMV7_IDCODE_MASK) >> ARMV7_IDCODE_SHIFT;
511
512 PMCDBG1(MDP, INI, 1, "armv7-init npmcs=%d", armv7_npmcs);
513
514 /*
515 * Allocate space for pointers to PMC HW descriptors and for
516 * the MDEP structure used by MI code.
517 */
518 armv7_pcpu = malloc(sizeof(struct armv7_cpu *) * pmc_cpu_max(),
519 M_PMC, M_WAITOK | M_ZERO);
520
521 /* Just one class */
522 pmc_mdep = pmc_mdep_alloc(1);
523
524 switch (idcode) {
525 case ARMV7_IDCODE_CORTEX_A9:
526 pmc_mdep->pmd_cputype = PMC_CPU_ARMV7_CORTEX_A9;
527 break;
528 default:
529 case ARMV7_IDCODE_CORTEX_A8:
530 /*
531 * On A8 we implemented common events only,
532 * so use it for the rest of machines.
533 */
534 pmc_mdep->pmd_cputype = PMC_CPU_ARMV7_CORTEX_A8;
535 break;
536 }
537
538 pcd = &pmc_mdep->pmd_classdep[PMC_MDEP_CLASS_INDEX_ARMV7];
539 pcd->pcd_caps = ARMV7_PMC_CAPS;
540 pcd->pcd_class = PMC_CLASS_ARMV7;
541 pcd->pcd_num = armv7_npmcs;
542 pcd->pcd_ri = pmc_mdep->pmd_npmc;
543 pcd->pcd_width = 32;
544
545 pcd->pcd_allocate_pmc = armv7_allocate_pmc;
546 pcd->pcd_config_pmc = armv7_config_pmc;
547 pcd->pcd_pcpu_fini = armv7_pcpu_fini;
548 pcd->pcd_pcpu_init = armv7_pcpu_init;
549 pcd->pcd_describe = armv7_describe;
550 pcd->pcd_get_config = armv7_get_config;
551 pcd->pcd_read_pmc = armv7_read_pmc;
552 pcd->pcd_release_pmc = armv7_release_pmc;
553 pcd->pcd_start_pmc = armv7_start_pmc;
554 pcd->pcd_stop_pmc = armv7_stop_pmc;
555 pcd->pcd_write_pmc = armv7_write_pmc;
556
557 pmc_mdep->pmd_intr = armv7_intr;
558 pmc_mdep->pmd_switch_in = armv7_switch_in;
559 pmc_mdep->pmd_switch_out = armv7_switch_out;
560
561 pmc_mdep->pmd_npmc += armv7_npmcs;
562
563 return (pmc_mdep);
564 }
565
566 void
567 pmc_armv7_finalize(struct pmc_mdep *md)
568 {
569
570 }
Cache object: 955fe57262688292e67df6b462158471
|