1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2003-2008 Joseph Koshy
5 * Copyright (c) 2007 The FreeBSD Foundation
6 * Copyright (c) 2021 Ampere Computing LLC
7 *
8 * Portions of this software were developed by A. Joseph Koshy under
9 * sponsorship from the FreeBSD Foundation and Google, Inc.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 */
32
33 /* Support for ARM DMC-620 Memory Controller PMU */
34
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
37
38 #include <sys/param.h>
39 #include <sys/lock.h>
40 #include <sys/malloc.h>
41 #include <sys/mutex.h>
42 #include <sys/pmc.h>
43 #include <sys/pmckern.h>
44 #include <sys/systm.h>
45
46 #include <dev/hwpmc/pmu_dmc620_reg.h>
47
48 #define DMC620_TYPE_CLKDIV2 0
49 #define DMC620_TYPE_CLK 1
50 #define CLASS2TYPE(c) ((c) - PMC_CLASS_DMC620_PMU_CD2)
51
52 /* Create wrapper for each class. */
53 #define CLASSDEP_FN2(fn, t1, a1, t2, a2) \
54 static int fn(int class, t1 a1, t2 a2); \
55 static int fn ## _cd2(t1 a1, t2 a2) \
56 { \
57 return (fn(PMC_CLASS_DMC620_PMU_CD2, a1, a2)); \
58 } \
59 static int fn ## _c(t1 a1, t2 a2) \
60 { \
61 return (fn(PMC_CLASS_DMC620_PMU_C, a1, a2)); \
62 } \
63 static int fn(int class, t1 a1, t2 a2)
64
65 #define CLASSDEP_FN3(fn, t1, a1, t2, a2, t3, a3) \
66 static int fn(int class, t1 a1, t2 a2, t3 a3); \
67 static int fn ## _cd2(t1 a1, t2 a2, t3 a3) \
68 { \
69 return (fn(PMC_CLASS_DMC620_PMU_CD2, a1, a2, a3)); \
70 } \
71 static int fn ## _c(t1 a1, t2 a2, t3 a3) \
72 { \
73 return (fn(PMC_CLASS_DMC620_PMU_C, a1, a2, a3)); \
74 } \
75 static int fn(int class, t1 a1, t2 a2, t3 a3)
76
77 #define CLASSDEP_FN4(fn, t1, a1, t2, a2, t3, a3, t4, a4) \
78 static int fn(int class, t1 a1, t2 a2, t3 a3, t4 a4); \
79 static int fn ## _cd2(t1 a1, t2 a2, t3 a3, t4 a4) \
80 { \
81 return (fn(PMC_CLASS_DMC620_PMU_CD2, a1, a2, a3, a4)); \
82 } \
83 static int fn ## _c(t1 a1, t2 a2, t3 a3, t4 a4) \
84 { \
85 return (fn(PMC_CLASS_DMC620_PMU_C, a1, a2, a3, a4)); \
86 } \
87 static int fn(int class, t1 a1, t2 a2, t3 a3, t4 a4)
88
89 struct dmc620_pmc {
90 void *arg;
91 int domain;
92 };
93
94 struct dmc620_descr {
95 struct pmc_descr pd_descr; /* "base class" */
96 void *pd_rw_arg; /* Argument to use with read/write */
97 struct pmc *pd_pmc;
98 struct pmc_hw *pd_phw;
99 uint32_t pd_config;
100 uint32_t pd_match;
101 uint32_t pd_mask;
102 uint32_t pd_evsel; /* address of EVSEL register */
103 uint32_t pd_perfctr; /* address of PERFCTR register */
104
105 };
106
107 static struct dmc620_descr **dmc620_pmcdesc[2];
108 static struct dmc620_pmc dmc620_pmcs[DMC620_UNIT_MAX];
109 static int dmc620_npmcs = 0;
110
111 void
112 dmc620_pmc_register(int unit, void *arg, int domain)
113 {
114
115 if (unit >= DMC620_UNIT_MAX) {
116 /* TODO */
117 return;
118 }
119
120 dmc620_pmcs[unit].arg = arg;
121 dmc620_pmcs[unit].domain = domain;
122 dmc620_npmcs++;
123 }
124
125 void
126 dmc620_pmc_unregister(int unit)
127 {
128
129 dmc620_pmcs[unit].arg = NULL;
130 dmc620_npmcs--;
131 }
132
133 int
134 pmc_dmc620_nclasses(void)
135 {
136
137 if (dmc620_npmcs > 0)
138 return (2);
139 return (0);
140 }
141
142 static inline struct dmc620_descr *
143 dmc620desc(int class, int cpu, int ri)
144 {
145 int c;
146
147 c = CLASS2TYPE(class);
148 KASSERT((c & 0xfffffffe) == 0, ("[dmc620,%d] 'c' can only be 0 or 1. "
149 "now %d", __LINE__, c));
150
151 return (dmc620_pmcdesc[c][ri]);
152 }
153
154 static inline int
155 cntr(int class, int ri)
156 {
157 int c;
158
159 c = CLASS2TYPE(class);
160 KASSERT((c & 0xfffffffe) == 0, ("[dmc620,%d] 'c' can only be 0 or 1. "
161 "now %d", __LINE__, c));
162
163 if (c == DMC620_TYPE_CLKDIV2)
164 return (ri % DMC620_CLKDIV2_COUNTERS_N);
165 return ((ri % DMC620_CLK_COUNTERS_N) + DMC620_CLKDIV2_COUNTERS_N);
166 }
167
168 static inline int
169 class2mdep(int class)
170 {
171
172 switch (class) {
173 case PMC_CLASS_DMC620_PMU_CD2:
174 return (PMC_MDEP_CLASS_INDEX_DMC620_CD2);
175 case PMC_CLASS_DMC620_PMU_C:
176 return (PMC_MDEP_CLASS_INDEX_DMC620_C);
177 }
178 return (-1);
179 }
180
181 static inline int
182 class_ri2unit(int class, int ri)
183 {
184
185 if (class == PMC_CLASS_DMC620_PMU_CD2)
186 return (ri / DMC620_CLKDIV2_COUNTERS_N);
187 else
188 return (ri / DMC620_CLK_COUNTERS_N);
189 }
190
191 /*
192 * read a pmc register
193 */
194
195 CLASSDEP_FN3(dmc620_read_pmc, int, cpu, int, ri, pmc_value_t *, v)
196 {
197 struct dmc620_descr *desc;
198 struct pmc *pm;
199
200 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
201 ("[dmc620,%d] illegal CPU value %d", __LINE__, cpu));
202 KASSERT(ri >= 0, ("[dmc620,%d] row-index %d out of range", __LINE__,
203 ri));
204
205 desc = dmc620desc(class, cpu, ri);
206 pm = desc->pd_phw->phw_pmc;
207
208 KASSERT(pm != NULL,
209 ("[dmc620,%d] No owner for HWPMC [cpu%d,pmc%d]", __LINE__,
210 cpu, ri));
211
212 PMCDBG3(MDP,REA,1,"%s id=%d class=%d", __func__, ri, class);
213
214 /*
215 * Should emulate 64bits, because 32 bits counter overflows faster than
216 * pmcstat default period.
217 */
218 /* Always CPU0. Single controller for all CPUs. */
219 *v = ((uint64_t)pm->pm_pcpu_state[0].pps_overflowcnt << 32) |
220 pmu_dmc620_rd4(desc->pd_rw_arg, cntr(class, ri),
221 DMC620_COUNTER_VALUE_LO);
222
223 PMCDBG3(MDP, REA, 2, "%s id=%d -> %jd", __func__, ri, *v);
224
225 return (0);
226 }
227
228 /*
229 * Write a pmc register.
230 */
231
232 CLASSDEP_FN3(dmc620_write_pmc, int, cpu, int, ri, pmc_value_t, v)
233 {
234 struct dmc620_descr *desc;
235 struct pmc *pm __diagused;
236
237 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
238 ("[dmc620,%d] illegal CPU value %d", __LINE__, cpu));
239 KASSERT(ri >= 0, ("[dmc620,%d] row-index %d out of range", __LINE__,
240 ri));
241
242 desc = dmc620desc(class, cpu, ri);
243 pm = desc->pd_phw->phw_pmc;
244
245 KASSERT(pm != NULL,
246 ("[dmc620,%d] PMC not owned (cpu%d,pmc%d)", __LINE__,
247 cpu, ri));
248
249 PMCDBG4(MDP, WRI, 1, "%s cpu=%d ri=%d v=%jx", __func__, cpu, ri, v);
250
251 pmu_dmc620_wr4(desc->pd_rw_arg, cntr(class, ri),
252 DMC620_COUNTER_VALUE_LO, v);
253 return (0);
254 }
255
256 /*
257 * configure hardware pmc according to the configuration recorded in
258 * pmc 'pm'.
259 */
260
261 CLASSDEP_FN3(dmc620_config_pmc, int, cpu, int, ri, struct pmc *, pm)
262 {
263 struct pmc_hw *phw;
264
265 PMCDBG4(MDP, CFG, 1, "%s cpu=%d ri=%d pm=%p", __func__, cpu, ri, pm);
266
267 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
268 ("[dmc620,%d] illegal CPU value %d", __LINE__, cpu));
269 KASSERT(ri >= 0, ("[dmc620,%d] row-index %d out of range", __LINE__,
270 ri));
271
272 phw = dmc620desc(class, cpu, ri)->pd_phw;
273
274 KASSERT(pm == NULL || phw->phw_pmc == NULL,
275 ("[dmc620,%d] pm=%p phw->pm=%p hwpmc not unconfigured",
276 __LINE__, pm, phw->phw_pmc));
277
278 phw->phw_pmc = pm;
279 return (0);
280 }
281
282 /*
283 * Retrieve a configured PMC pointer from hardware state.
284 */
285
286 CLASSDEP_FN3(dmc620_get_config, int, cpu, int, ri, struct pmc **, ppm)
287 {
288
289 *ppm = dmc620desc(class, cpu, ri)->pd_phw->phw_pmc;
290
291 return (0);
292 }
293
294 /*
295 * Check if a given allocation is feasible.
296 */
297
298 CLASSDEP_FN4(dmc620_allocate_pmc, int, cpu, int, ri, struct pmc *,pm,
299 const struct pmc_op_pmcallocate *, a)
300 {
301 const struct pmc_descr *pd;
302 uint64_t caps, control;
303 enum pmc_event pe;
304 uint8_t e;
305
306 (void) cpu;
307
308 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
309 ("[dmc620,%d] illegal CPU value %d", __LINE__, cpu));
310 KASSERT(ri >= 0, ("[dmc620,%d] row-index %d out of range", __LINE__,
311 ri));
312
313 pd = &dmc620desc(class, cpu, ri)->pd_descr;
314 if (dmc620_pmcs[class_ri2unit(class, ri)].domain !=
315 pcpu_find(cpu)->pc_domain)
316 return (EINVAL);
317
318 /* check class match */
319 if (pd->pd_class != a->pm_class)
320 return (EINVAL);
321
322 caps = pm->pm_caps;
323
324 PMCDBG3(MDP, ALL, 1, "%s ri=%d caps=0x%x", __func__, ri, caps);
325
326 pe = a->pm_ev;
327 if (class == PMC_CLASS_DMC620_PMU_CD2)
328 e = pe - PMC_EV_DMC620_PMU_CD2_FIRST;
329 else
330 e = pe - PMC_EV_DMC620_PMU_C_FIRST;
331
332 control = (e << DMC620_COUNTER_CONTROL_EVENT_SHIFT) &
333 DMC620_COUNTER_CONTROL_EVENT_MASK;
334
335 if (caps & PMC_CAP_INVERT)
336 control |= DMC620_COUNTER_CONTROL_INVERT;
337
338 pm->pm_md.pm_dmc620.pm_control = control;
339 pm->pm_md.pm_dmc620.pm_match = a->pm_md.pm_dmc620.pm_dmc620_match;
340 pm->pm_md.pm_dmc620.pm_mask = a->pm_md.pm_dmc620.pm_dmc620_mask;
341
342 PMCDBG3(MDP, ALL, 2, "%s ri=%d -> control=0x%x", __func__, ri, control);
343
344 return (0);
345 }
346
347 /*
348 * Release machine dependent state associated with a PMC. This is a
349 * no-op on this architecture.
350 *
351 */
352
353 /* ARGSUSED0 */
354 CLASSDEP_FN3(dmc620_release_pmc, int, cpu, int, ri, struct pmc *, pmc)
355 {
356 struct pmc_hw *phw __diagused;
357
358 (void) pmc;
359
360 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
361 ("[dmc620,%d] illegal CPU value %d", __LINE__, cpu));
362 KASSERT(ri >= 0, ("[dmc620,%d] row-index %d out of range", __LINE__,
363 ri));
364
365 phw = dmc620desc(class, cpu, ri)->pd_phw;
366
367 KASSERT(phw->phw_pmc == NULL,
368 ("[dmc620,%d] PHW pmc %p non-NULL", __LINE__, phw->phw_pmc));
369
370 return (0);
371 }
372
373 /*
374 * start a PMC.
375 */
376
377 CLASSDEP_FN2(dmc620_start_pmc, int, cpu, int, ri)
378 {
379 struct dmc620_descr *desc;
380 struct pmc_hw *phw;
381 uint64_t control;
382 struct pmc *pm;
383
384 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
385 ("[dmc620,%d] illegal CPU value %d", __LINE__, cpu));
386 KASSERT(ri >= 0, ("[dmc620,%d] row-index %d out of range", __LINE__,
387 ri));
388
389 desc = dmc620desc(class, cpu, ri);
390 phw = desc->pd_phw;
391 pm = phw->phw_pmc;
392
393 KASSERT(pm != NULL,
394 ("[dmc620,%d] starting cpu%d,pmc%d with null pmc record", __LINE__,
395 cpu, ri));
396
397 PMCDBG3(MDP, STA, 1, "%s cpu=%d ri=%d", __func__, cpu, ri);
398
399 pmu_dmc620_wr4(desc->pd_rw_arg, cntr(class, ri),
400 DMC620_COUNTER_MASK_LO, pm->pm_md.pm_dmc620.pm_mask & 0xffffffff);
401 pmu_dmc620_wr4(desc->pd_rw_arg, cntr(class, ri),
402 DMC620_COUNTER_MASK_HI, pm->pm_md.pm_dmc620.pm_mask >> 32);
403 pmu_dmc620_wr4(desc->pd_rw_arg, cntr(class, ri),
404 DMC620_COUNTER_MATCH_LO, pm->pm_md.pm_dmc620.pm_match & 0xffffffff);
405 pmu_dmc620_wr4(desc->pd_rw_arg, cntr(class, ri),
406 DMC620_COUNTER_MATCH_HI, pm->pm_md.pm_dmc620.pm_match >> 32);
407 /* turn on the PMC ENABLE bit */
408 control = pm->pm_md.pm_dmc620.pm_control | DMC620_COUNTER_CONTROL_ENABLE;
409
410 PMCDBG2(MDP, STA, 2, "%s control=0x%x", __func__, control);
411
412 pmu_dmc620_wr4(desc->pd_rw_arg, cntr(class, ri),
413 DMC620_COUNTER_CONTROL, control);
414 return (0);
415 }
416
417 /*
418 * Stop a PMC.
419 */
420
421 CLASSDEP_FN2(dmc620_stop_pmc, int, cpu, int, ri)
422 {
423 struct dmc620_descr *desc;
424 struct pmc_hw *phw;
425 struct pmc *pm;
426 uint64_t control;
427
428 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
429 ("[dmc620,%d] illegal CPU value %d", __LINE__, cpu));
430 KASSERT(ri >= 0, ("[dmc620,%d] row-index %d out of range", __LINE__,
431 ri));
432
433 desc = dmc620desc(class, cpu, ri);
434 phw = desc->pd_phw;
435 pm = phw->phw_pmc;
436
437 KASSERT(pm != NULL,
438 ("[dmc620,%d] cpu%d,pmc%d no PMC to stop", __LINE__,
439 cpu, ri));
440
441 PMCDBG2(MDP, STO, 1, "%s ri=%d", __func__, ri);
442
443 /* turn off the PMC ENABLE bit */
444 control = pm->pm_md.pm_dmc620.pm_control & ~DMC620_COUNTER_CONTROL_ENABLE;
445 pmu_dmc620_wr4(desc->pd_rw_arg, cntr(class, ri),
446 DMC620_COUNTER_CONTROL, control);
447
448 return (0);
449 }
450
451 /*
452 * describe a PMC
453 */
454 CLASSDEP_FN4(dmc620_describe, int, cpu, int, ri, struct pmc_info *, pi,
455 struct pmc **, ppmc)
456 {
457 struct pmc_hw *phw;
458 size_t copied;
459 int error;
460
461 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
462 ("[dmc620,%d] illegal CPU %d", __LINE__, cpu));
463 KASSERT(ri >= 0, ("[dmc620,%d] row-index %d out of range", __LINE__,
464 ri));
465
466 phw = dmc620desc(class, cpu, ri)->pd_phw;
467
468 if ((error = copystr(dmc620desc(class, cpu, ri)->pd_descr.pd_name,
469 pi->pm_name, PMC_NAME_MAX, &copied)) != 0)
470 return (error);
471
472 pi->pm_class = dmc620desc(class, cpu, ri)->pd_descr.pd_class;
473
474 if (phw->phw_state & PMC_PHW_FLAG_IS_ENABLED) {
475 pi->pm_enabled = TRUE;
476 *ppmc = phw->phw_pmc;
477 } else {
478 pi->pm_enabled = FALSE;
479 *ppmc = NULL;
480 }
481
482 return (0);
483 }
484
485 /*
486 * processor dependent initialization.
487 */
488
489 CLASSDEP_FN2(dmc620_pcpu_init, struct pmc_mdep *, md, int, cpu)
490 {
491 int first_ri, n, npmc;
492 struct pmc_hw *phw;
493 struct pmc_cpu *pc;
494 int mdep_class;
495
496 mdep_class = class2mdep(class);
497 KASSERT(mdep_class != -1, ("[dmc620,%d] wrong class %d", __LINE__,
498 class));
499 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
500 ("[dmc620,%d] insane cpu number %d", __LINE__, cpu));
501
502 PMCDBG1(MDP, INI, 1, "dmc620-init cpu=%d", cpu);
503
504 /*
505 * Set the content of the hardware descriptors to a known
506 * state and initialize pointers in the MI per-cpu descriptor.
507 */
508
509 pc = pmc_pcpu[cpu];
510 first_ri = md->pmd_classdep[mdep_class].pcd_ri;
511 npmc = md->pmd_classdep[mdep_class].pcd_num;
512
513 for (n = 0; n < npmc; n++, phw++) {
514 phw = dmc620desc(class, cpu, n)->pd_phw;
515 phw->phw_state = PMC_PHW_CPU_TO_STATE(cpu) |
516 PMC_PHW_INDEX_TO_STATE(n);
517 /* Set enabled only if unit present. */
518 if (dmc620_pmcs[class_ri2unit(class, n)].arg != NULL)
519 phw->phw_state |= PMC_PHW_FLAG_IS_ENABLED;
520 phw->phw_pmc = NULL;
521 pc->pc_hwpmcs[n + first_ri] = phw;
522 }
523 return (0);
524 }
525
526 /*
527 * processor dependent cleanup prior to the KLD
528 * being unloaded
529 */
530
531 CLASSDEP_FN2(dmc620_pcpu_fini, struct pmc_mdep *, md, int, cpu)
532 {
533
534 return (0);
535 }
536
537 int
538 dmc620_intr(struct trapframe *tf, int class, int unit, int i)
539 {
540 struct pmc_cpu *pc __diagused;
541 struct pmc_hw *phw;
542 struct pmc *pm;
543 int error, cpu, ri;
544
545 ri = i + unit * ((class == PMC_CLASS_DMC620_PMU_CD2) ?
546 DMC620_CLKDIV2_COUNTERS_N : DMC620_CLK_COUNTERS_N);
547 cpu = curcpu;
548 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
549 ("[dmc620,%d] CPU %d out of range", __LINE__, cpu));
550 pc = pmc_pcpu[cpu];
551 KASSERT(pc != NULL, ("pc != NULL"));
552
553 phw = dmc620desc(class, cpu, ri)->pd_phw;
554 KASSERT(phw != NULL, ("phw != NULL"));
555 pm = phw->phw_pmc;
556 if (pm == NULL)
557 return (0);
558
559 if (!PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm))) {
560 /* Always CPU0. */
561 pm->pm_pcpu_state[0].pps_overflowcnt += 1;
562 return (0);
563 }
564
565 if (pm->pm_state != PMC_STATE_RUNNING)
566 return (0);
567
568 error = pmc_process_interrupt(PMC_HR, pm, tf);
569 if (error)
570 dmc620_stop_pmc(class, cpu, ri);
571
572 /* Reload sampling count */
573 dmc620_write_pmc(class, cpu, ri, pm->pm_sc.pm_reloadcount);
574
575 return (0);
576 }
577
578 /*
579 * Initialize ourselves.
580 */
581
582 int
583 pmc_dmc620_initialize_cd2(struct pmc_mdep *md)
584 {
585 struct pmc_classdep *pcd;
586 int i, npmc, unit;
587
588 KASSERT(md != NULL, ("[dmc620,%d] md is NULL", __LINE__));
589 KASSERT(dmc620_npmcs <= DMC620_UNIT_MAX,
590 ("[dmc620,%d] dmc620_npmcs too big", __LINE__));
591
592 PMCDBG0(MDP,INI,1, "dmc620-initialize");
593
594 npmc = DMC620_CLKDIV2_COUNTERS_N * dmc620_npmcs;
595 pcd = &md->pmd_classdep[PMC_MDEP_CLASS_INDEX_DMC620_CD2];
596
597 pcd->pcd_caps = PMC_CAP_SYSTEM | PMC_CAP_READ |
598 PMC_CAP_WRITE | PMC_CAP_INVERT | PMC_CAP_QUALIFIER |
599 PMC_CAP_INTERRUPT | PMC_CAP_DOMWIDE;
600 pcd->pcd_class = PMC_CLASS_DMC620_PMU_CD2;
601 pcd->pcd_num = npmc;
602 pcd->pcd_ri = md->pmd_npmc;
603 pcd->pcd_width = 32;
604
605 pcd->pcd_allocate_pmc = dmc620_allocate_pmc_cd2;
606 pcd->pcd_config_pmc = dmc620_config_pmc_cd2;
607 pcd->pcd_describe = dmc620_describe_cd2;
608 pcd->pcd_get_config = dmc620_get_config_cd2;
609 pcd->pcd_get_msr = NULL;
610 pcd->pcd_pcpu_fini = dmc620_pcpu_fini_cd2;
611 pcd->pcd_pcpu_init = dmc620_pcpu_init_cd2;
612 pcd->pcd_read_pmc = dmc620_read_pmc_cd2;
613 pcd->pcd_release_pmc = dmc620_release_pmc_cd2;
614 pcd->pcd_start_pmc = dmc620_start_pmc_cd2;
615 pcd->pcd_stop_pmc = dmc620_stop_pmc_cd2;
616 pcd->pcd_write_pmc = dmc620_write_pmc_cd2;
617
618 md->pmd_npmc += npmc;
619 dmc620_pmcdesc[0] = malloc(sizeof(struct dmc620_descr *) * npmc *
620 DMC620_PMU_DEFAULT_UNITS_N, M_PMC, M_WAITOK|M_ZERO);
621 for (i = 0; i < npmc; i++) {
622 dmc620_pmcdesc[0][i] = malloc(sizeof(struct dmc620_descr),
623 M_PMC, M_WAITOK|M_ZERO);
624
625 unit = i / DMC620_CLKDIV2_COUNTERS_N;
626 KASSERT(unit >= 0, ("unit >= 0"));
627 KASSERT(dmc620_pmcs[unit].arg != NULL, ("arg != NULL"));
628
629 dmc620_pmcdesc[0][i]->pd_rw_arg = dmc620_pmcs[unit].arg;
630 dmc620_pmcdesc[0][i]->pd_descr.pd_class =
631 PMC_CLASS_DMC620_PMU_CD2;
632 dmc620_pmcdesc[0][i]->pd_descr.pd_caps = pcd->pcd_caps;
633 dmc620_pmcdesc[0][i]->pd_phw = malloc(sizeof(struct pmc_hw),
634 M_PMC, M_WAITOK|M_ZERO);
635 snprintf(dmc620_pmcdesc[0][i]->pd_descr.pd_name, 63,
636 "DMC620_CD2_%d", i);
637 }
638
639 return (0);
640 }
641
642 int
643 pmc_dmc620_initialize_c(struct pmc_mdep *md)
644 {
645 struct pmc_classdep *pcd;
646 int i, npmc, unit;
647
648 KASSERT(md != NULL, ("[dmc620,%d] md is NULL", __LINE__));
649 KASSERT(dmc620_npmcs <= DMC620_UNIT_MAX,
650 ("[dmc620,%d] dmc620_npmcs too big", __LINE__));
651
652 PMCDBG0(MDP,INI,1, "dmc620-initialize");
653
654 npmc = DMC620_CLK_COUNTERS_N * dmc620_npmcs;
655 pcd = &md->pmd_classdep[PMC_MDEP_CLASS_INDEX_DMC620_C];
656
657 pcd->pcd_caps = PMC_CAP_SYSTEM | PMC_CAP_READ |
658 PMC_CAP_WRITE | PMC_CAP_INVERT | PMC_CAP_QUALIFIER |
659 PMC_CAP_INTERRUPT | PMC_CAP_DOMWIDE;
660 pcd->pcd_class = PMC_CLASS_DMC620_PMU_C;
661 pcd->pcd_num = npmc;
662 pcd->pcd_ri = md->pmd_npmc;
663 pcd->pcd_width = 32;
664
665 pcd->pcd_allocate_pmc = dmc620_allocate_pmc_c;
666 pcd->pcd_config_pmc = dmc620_config_pmc_c;
667 pcd->pcd_describe = dmc620_describe_c;
668 pcd->pcd_get_config = dmc620_get_config_c;
669 pcd->pcd_get_msr = NULL;
670 pcd->pcd_pcpu_fini = dmc620_pcpu_fini_c;
671 pcd->pcd_pcpu_init = dmc620_pcpu_init_c;
672 pcd->pcd_read_pmc = dmc620_read_pmc_c;
673 pcd->pcd_release_pmc = dmc620_release_pmc_c;
674 pcd->pcd_start_pmc = dmc620_start_pmc_c;
675 pcd->pcd_stop_pmc = dmc620_stop_pmc_c;
676 pcd->pcd_write_pmc = dmc620_write_pmc_c;
677
678 md->pmd_npmc += npmc;
679 dmc620_pmcdesc[1] = malloc(sizeof(struct dmc620_descr *) * npmc *
680 DMC620_PMU_DEFAULT_UNITS_N, M_PMC, M_WAITOK|M_ZERO);
681 for (i = 0; i < npmc; i++) {
682 dmc620_pmcdesc[1][i] = malloc(sizeof(struct dmc620_descr),
683 M_PMC, M_WAITOK|M_ZERO);
684
685 unit = i / DMC620_CLK_COUNTERS_N;
686 KASSERT(unit >= 0, ("unit >= 0"));
687 KASSERT(dmc620_pmcs[unit].arg != NULL, ("arg != NULL"));
688
689 dmc620_pmcdesc[1][i]->pd_rw_arg = dmc620_pmcs[unit].arg;
690 dmc620_pmcdesc[1][i]->pd_descr.pd_class = PMC_CLASS_DMC620_PMU_C;
691 dmc620_pmcdesc[1][i]->pd_descr.pd_caps = pcd->pcd_caps;
692 dmc620_pmcdesc[1][i]->pd_phw = malloc(sizeof(struct pmc_hw),
693 M_PMC, M_WAITOK|M_ZERO);
694 snprintf(dmc620_pmcdesc[1][i]->pd_descr.pd_name, 63,
695 "DMC620_C_%d", i);
696 }
697
698 return (0);
699 }
700
701 void
702 pmc_dmc620_finalize_cd2(struct pmc_mdep *md)
703 {
704 struct pmc_classdep *pcd;
705 int i, npmc;
706
707 KASSERT(md->pmd_classdep[PMC_MDEP_CLASS_INDEX_DMC620_CD2].pcd_class ==
708 PMC_CLASS_DMC620_PMU_CD2, ("[dmc620,%d] pmc class mismatch",
709 __LINE__));
710
711 pcd = &md->pmd_classdep[PMC_MDEP_CLASS_INDEX_DMC620_CD2];
712
713 npmc = pcd->pcd_num;
714 for (i = 0; i < npmc; i++) {
715 free(dmc620_pmcdesc[0][i]->pd_phw, M_PMC);
716 free(dmc620_pmcdesc[0][i], M_PMC);
717 }
718 free(dmc620_pmcdesc[0], M_PMC);
719 dmc620_pmcdesc[0] = NULL;
720 }
721
722 void
723 pmc_dmc620_finalize_c(struct pmc_mdep *md)
724 {
725 struct pmc_classdep *pcd;
726 int i, npmc;
727
728 KASSERT(md->pmd_classdep[PMC_MDEP_CLASS_INDEX_DMC620_C].pcd_class ==
729 PMC_CLASS_DMC620_PMU_C, ("[dmc620,%d] pmc class mismatch",
730 __LINE__));
731
732 pcd = &md->pmd_classdep[PMC_MDEP_CLASS_INDEX_DMC620_C];
733
734 npmc = pcd->pcd_num;
735 for (i = 0; i < npmc; i++) {
736 free(dmc620_pmcdesc[1][i]->pd_phw, M_PMC);
737 free(dmc620_pmcdesc[1][i], M_PMC);
738 }
739 free(dmc620_pmcdesc[1], M_PMC);
740 dmc620_pmcdesc[1] = NULL;
741 }
Cache object: e2e7d763e781728d4e760bf387459d5f
|