The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/hwpmc/hwpmc_arm64.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 2015 Ruslan Bukin <br@bsdpad.com>
    3  * All rights reserved.
    4  *
    5  * This software was developed by the University of Cambridge Computer
    6  * Laboratory with support from ARM Ltd.
    7  *
    8  * Redistribution and use in source and binary forms, with or without
    9  * modification, are permitted provided that the following conditions
   10  * are met:
   11  * 1. Redistributions of source code must retain the above copyright
   12  *    notice, this list of conditions and the following disclaimer.
   13  * 2. Redistributions in binary form must reproduce the above copyright
   14  *    notice, this list of conditions and the following disclaimer in the
   15  *    documentation and/or other materials provided with the distribution.
   16  *
   17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   27  * SUCH DAMAGE.
   28  */
   29 
   30 #include <sys/cdefs.h>
   31 __FBSDID("$FreeBSD$");
   32 
   33 #include <sys/param.h>
   34 #include <sys/systm.h>
   35 #include <sys/pmc.h>
   36 #include <sys/pmckern.h>
   37 
   38 #include <machine/pmc_mdep.h>
   39 #include <machine/cpu.h>
   40 
   41 #include "opt_acpi.h"
   42 
   43 static int arm64_npmcs;
   44 
   45 struct arm64_event_code_map {
   46         enum pmc_event  pe_ev;
   47         uint8_t         pe_code;
   48 };
   49 
   50 /*
   51  * Per-processor information.
   52  */
   53 struct arm64_cpu {
   54         struct pmc_hw   *pc_arm64pmcs;
   55 };
   56 
   57 static struct arm64_cpu **arm64_pcpu;
   58 
   59 /*
   60  * Interrupt Enable Set Register
   61  */
   62 static __inline void
   63 arm64_interrupt_enable(uint32_t pmc)
   64 {
   65         uint32_t reg;
   66 
   67         reg = (1 << pmc);
   68         WRITE_SPECIALREG(pmintenset_el1, reg);
   69 
   70         isb();
   71 }
   72 
   73 /*
   74  * Interrupt Clear Set Register
   75  */
   76 static __inline void
   77 arm64_interrupt_disable(uint32_t pmc)
   78 {
   79         uint32_t reg;
   80 
   81         reg = (1 << pmc);
   82         WRITE_SPECIALREG(pmintenclr_el1, reg);
   83 
   84         isb();
   85 }
   86 
   87 /*
   88  * Counter Set Enable Register
   89  */
   90 static __inline void
   91 arm64_counter_enable(unsigned int pmc)
   92 {
   93         uint32_t reg;
   94 
   95         reg = (1 << pmc);
   96         WRITE_SPECIALREG(pmcntenset_el0, reg);
   97 
   98         isb();
   99 }
  100 
  101 /*
  102  * Counter Clear Enable Register
  103  */
  104 static __inline void
  105 arm64_counter_disable(unsigned int pmc)
  106 {
  107         uint32_t reg;
  108 
  109         reg = (1 << pmc);
  110         WRITE_SPECIALREG(pmcntenclr_el0, reg);
  111 
  112         isb();
  113 }
  114 
  115 /*
  116  * Performance Monitors Control Register
  117  */
  118 static uint32_t
  119 arm64_pmcr_read(void)
  120 {
  121         uint32_t reg;
  122 
  123         reg = READ_SPECIALREG(pmcr_el0);
  124 
  125         return (reg);
  126 }
  127 
  128 static void
  129 arm64_pmcr_write(uint32_t reg)
  130 {
  131 
  132         WRITE_SPECIALREG(pmcr_el0, reg);
  133 
  134         isb();
  135 }
  136 
  137 /*
  138  * Performance Count Register N
  139  */
  140 static uint32_t
  141 arm64_pmcn_read(unsigned int pmc)
  142 {
  143 
  144         KASSERT(pmc < arm64_npmcs, ("%s: illegal PMC number %d", __func__, pmc));
  145 
  146         WRITE_SPECIALREG(pmselr_el0, pmc);
  147 
  148         isb();
  149 
  150         return (READ_SPECIALREG(pmxevcntr_el0));
  151 }
  152 
  153 static void
  154 arm64_pmcn_write(unsigned int pmc, uint32_t reg)
  155 {
  156 
  157         KASSERT(pmc < arm64_npmcs, ("%s: illegal PMC number %d", __func__, pmc));
  158 
  159         WRITE_SPECIALREG(pmselr_el0, pmc);
  160         WRITE_SPECIALREG(pmxevcntr_el0, reg);
  161 
  162         isb();
  163 }
  164 
  165 static int
  166 arm64_allocate_pmc(int cpu, int ri, struct pmc *pm,
  167   const struct pmc_op_pmcallocate *a)
  168 {
  169         uint32_t config;
  170         enum pmc_event pe;
  171 
  172         KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
  173             ("[arm64,%d] illegal CPU value %d", __LINE__, cpu));
  174         KASSERT(ri >= 0 && ri < arm64_npmcs,
  175             ("[arm64,%d] illegal row index %d", __LINE__, ri));
  176 
  177         if (a->pm_class != PMC_CLASS_ARMV8) {
  178                 return (EINVAL);
  179         }
  180         pe = a->pm_ev;
  181 
  182         /* Adjust the config value if needed. */
  183         config = a->pm_md.pm_md_config;
  184         if ((a->pm_md.pm_md_flags & PM_MD_RAW_EVENT) == 0) {
  185                 config = (uint32_t)pe - PMC_EV_ARMV8_FIRST;
  186                 if (config > (PMC_EV_ARMV8_LAST - PMC_EV_ARMV8_FIRST))
  187                         return (EINVAL);
  188         }
  189 
  190         switch (a->pm_caps & (PMC_CAP_SYSTEM | PMC_CAP_USER)) {
  191         case PMC_CAP_SYSTEM:
  192                 config |= PMEVTYPER_U;
  193                 break;
  194         case PMC_CAP_USER:
  195                 config |= PMEVTYPER_P;
  196                 break;
  197         default:
  198                 /*
  199                  * Trace both USER and SYSTEM if none are specified
  200                  * (default setting) or if both flags are specified
  201                  * (user explicitly requested both qualifiers).
  202                  */
  203                 break;
  204         }
  205 
  206         pm->pm_md.pm_arm64.pm_arm64_evsel = config;
  207         PMCDBG2(MDP, ALL, 2, "arm64-allocate ri=%d -> config=0x%x", ri, config);
  208 
  209         return (0);
  210 }
  211 
  212 
  213 static int
  214 arm64_read_pmc(int cpu, int ri, pmc_value_t *v)
  215 {
  216         pmc_value_t tmp;
  217         struct pmc *pm;
  218         register_t s;
  219         int reg;
  220 
  221         KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
  222             ("[arm64,%d] illegal CPU value %d", __LINE__, cpu));
  223         KASSERT(ri >= 0 && ri < arm64_npmcs,
  224             ("[arm64,%d] illegal row index %d", __LINE__, ri));
  225 
  226         pm  = arm64_pcpu[cpu]->pc_arm64pmcs[ri].phw_pmc;
  227 
  228         /*
  229          * Ensure we don't get interrupted while updating the overflow count.
  230          */
  231         s = intr_disable();
  232         tmp = arm64_pmcn_read(ri);
  233         reg = (1 << ri);
  234         if ((READ_SPECIALREG(pmovsclr_el0) & reg) != 0) {
  235                 /* Clear Overflow Flag */
  236                 WRITE_SPECIALREG(pmovsclr_el0, reg);
  237                 pm->pm_pcpu_state[cpu].pps_overflowcnt++;
  238 
  239                 /* Reread counter in case we raced. */
  240                 tmp = arm64_pmcn_read(ri);
  241         }
  242         tmp += 0x100000000llu * pm->pm_pcpu_state[cpu].pps_overflowcnt;
  243         intr_restore(s);
  244 
  245         PMCDBG2(MDP, REA, 2, "arm64-read id=%d -> %jd", ri, tmp);
  246         if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm))) {
  247                 /*
  248                  * Clamp value to 0 if the counter just overflowed,
  249                  * otherwise the returned reload count would wrap to a
  250                  * huge value.
  251                  */
  252                 if ((tmp & (1ull << 63)) == 0)
  253                         tmp = 0;
  254                 else
  255                         tmp = ARMV8_PERFCTR_VALUE_TO_RELOAD_COUNT(tmp);
  256         }
  257         *v = tmp;
  258 
  259         return (0);
  260 }
  261 
  262 static int
  263 arm64_write_pmc(int cpu, int ri, pmc_value_t v)
  264 {
  265         struct pmc *pm;
  266 
  267         KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
  268             ("[arm64,%d] illegal CPU value %d", __LINE__, cpu));
  269         KASSERT(ri >= 0 && ri < arm64_npmcs,
  270             ("[arm64,%d] illegal row-index %d", __LINE__, ri));
  271 
  272         pm  = arm64_pcpu[cpu]->pc_arm64pmcs[ri].phw_pmc;
  273 
  274         if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
  275                 v = ARMV8_RELOAD_COUNT_TO_PERFCTR_VALUE(v);
  276 
  277         PMCDBG3(MDP, WRI, 1, "arm64-write cpu=%d ri=%d v=%jx", cpu, ri, v);
  278 
  279         pm->pm_pcpu_state[cpu].pps_overflowcnt = v >> 32;
  280         arm64_pmcn_write(ri, v);
  281 
  282         return (0);
  283 }
  284 
  285 static int
  286 arm64_config_pmc(int cpu, int ri, struct pmc *pm)
  287 {
  288         struct pmc_hw *phw;
  289 
  290         PMCDBG3(MDP, CFG, 1, "cpu=%d ri=%d pm=%p", cpu, ri, pm);
  291 
  292         KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
  293             ("[arm64,%d] illegal CPU value %d", __LINE__, cpu));
  294         KASSERT(ri >= 0 && ri < arm64_npmcs,
  295             ("[arm64,%d] illegal row-index %d", __LINE__, ri));
  296 
  297         phw = &arm64_pcpu[cpu]->pc_arm64pmcs[ri];
  298 
  299         KASSERT(pm == NULL || phw->phw_pmc == NULL,
  300             ("[arm64,%d] pm=%p phw->pm=%p hwpmc not unconfigured",
  301             __LINE__, pm, phw->phw_pmc));
  302 
  303         phw->phw_pmc = pm;
  304 
  305         return (0);
  306 }
  307 
  308 static int
  309 arm64_start_pmc(int cpu, int ri)
  310 {
  311         struct pmc_hw *phw;
  312         uint32_t config;
  313         struct pmc *pm;
  314 
  315         phw    = &arm64_pcpu[cpu]->pc_arm64pmcs[ri];
  316         pm     = phw->phw_pmc;
  317         config = pm->pm_md.pm_arm64.pm_arm64_evsel;
  318 
  319         /*
  320          * Configure the event selection.
  321          */
  322         WRITE_SPECIALREG(pmselr_el0, ri);
  323         WRITE_SPECIALREG(pmxevtyper_el0, config);
  324 
  325         isb();
  326 
  327         /*
  328          * Enable the PMC.
  329          */
  330         arm64_interrupt_enable(ri);
  331         arm64_counter_enable(ri);
  332 
  333         return (0);
  334 }
  335 
  336 static int
  337 arm64_stop_pmc(int cpu, int ri)
  338 {
  339         /*
  340          * Disable the PMCs.
  341          */
  342         arm64_counter_disable(ri);
  343         arm64_interrupt_disable(ri);
  344 
  345         return (0);
  346 }
  347 
  348 static int
  349 arm64_release_pmc(int cpu, int ri, struct pmc *pmc)
  350 {
  351         struct pmc_hw *phw __diagused;
  352 
  353         KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
  354             ("[arm64,%d] illegal CPU value %d", __LINE__, cpu));
  355         KASSERT(ri >= 0 && ri < arm64_npmcs,
  356             ("[arm64,%d] illegal row-index %d", __LINE__, ri));
  357 
  358         phw = &arm64_pcpu[cpu]->pc_arm64pmcs[ri];
  359         KASSERT(phw->phw_pmc == NULL,
  360             ("[arm64,%d] PHW pmc %p non-NULL", __LINE__, phw->phw_pmc));
  361 
  362         return (0);
  363 }
  364 
  365 static int
  366 arm64_intr(struct trapframe *tf)
  367 {
  368         int retval, ri;
  369         struct pmc *pm;
  370         int error;
  371         int reg, cpu;
  372 
  373         cpu = curcpu;
  374         KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
  375             ("[arm64,%d] CPU %d out of range", __LINE__, cpu));
  376 
  377         PMCDBG3(MDP,INT,1, "cpu=%d tf=%p um=%d", cpu, (void *)tf,
  378             TRAPF_USERMODE(tf));
  379 
  380         retval = 0;
  381 
  382         for (ri = 0; ri < arm64_npmcs; ri++) {
  383                 pm = arm64_pcpu[cpu]->pc_arm64pmcs[ri].phw_pmc;
  384                 if (pm == NULL)
  385                         continue;
  386                 /* Check if counter is overflowed */
  387                 reg = (1 << ri);
  388                 if ((READ_SPECIALREG(pmovsclr_el0) & reg) == 0)
  389                         continue;
  390                 /* Clear Overflow Flag */
  391                 WRITE_SPECIALREG(pmovsclr_el0, reg);
  392 
  393                 isb();
  394 
  395                 retval = 1; /* Found an interrupting PMC. */
  396 
  397                 pm->pm_pcpu_state[cpu].pps_overflowcnt += 1;
  398 
  399                 if (!PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
  400                         continue;
  401 
  402                 if (pm->pm_state != PMC_STATE_RUNNING)
  403                         continue;
  404 
  405                 error = pmc_process_interrupt(PMC_HR, pm, tf);
  406                 if (error)
  407                         arm64_stop_pmc(cpu, ri);
  408 
  409                 /* Reload sampling count */
  410                 arm64_write_pmc(cpu, ri, pm->pm_sc.pm_reloadcount);
  411         }
  412 
  413         return (retval);
  414 }
  415 
  416 static int
  417 arm64_describe(int cpu, int ri, struct pmc_info *pi, struct pmc **ppmc)
  418 {
  419         char arm64_name[PMC_NAME_MAX];
  420         struct pmc_hw *phw;
  421         int error;
  422 
  423         KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
  424             ("[arm64,%d], illegal CPU %d", __LINE__, cpu));
  425         KASSERT(ri >= 0 && ri < arm64_npmcs,
  426             ("[arm64,%d] row-index %d out of range", __LINE__, ri));
  427 
  428         phw = &arm64_pcpu[cpu]->pc_arm64pmcs[ri];
  429         snprintf(arm64_name, sizeof(arm64_name), "ARMV8-%d", ri);
  430         if ((error = copystr(arm64_name, pi->pm_name, PMC_NAME_MAX,
  431             NULL)) != 0)
  432                 return (error);
  433         pi->pm_class = PMC_CLASS_ARMV8;
  434         if (phw->phw_state & PMC_PHW_FLAG_IS_ENABLED) {
  435                 pi->pm_enabled = TRUE;
  436                 *ppmc = phw->phw_pmc;
  437         } else {
  438                 pi->pm_enabled = FALSE;
  439                 *ppmc = NULL;
  440         }
  441 
  442         return (0);
  443 }
  444 
  445 static int
  446 arm64_get_config(int cpu, int ri, struct pmc **ppm)
  447 {
  448 
  449         *ppm = arm64_pcpu[cpu]->pc_arm64pmcs[ri].phw_pmc;
  450 
  451         return (0);
  452 }
  453 
  454 /*
  455  * XXX don't know what we should do here.
  456  */
  457 static int
  458 arm64_switch_in(struct pmc_cpu *pc, struct pmc_process *pp)
  459 {
  460 
  461         return (0);
  462 }
  463 
  464 static int
  465 arm64_switch_out(struct pmc_cpu *pc, struct pmc_process *pp)
  466 {
  467 
  468         return (0);
  469 }
  470 
  471 static int
  472 arm64_pcpu_init(struct pmc_mdep *md, int cpu)
  473 {
  474         struct arm64_cpu *pac;
  475         struct pmc_hw  *phw;
  476         struct pmc_cpu *pc;
  477         uint64_t pmcr;
  478         int first_ri;
  479         int i;
  480 
  481         KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
  482             ("[arm64,%d] wrong cpu number %d", __LINE__, cpu));
  483         PMCDBG1(MDP, INI, 1, "arm64-init cpu=%d", cpu);
  484 
  485         arm64_pcpu[cpu] = pac = malloc(sizeof(struct arm64_cpu), M_PMC,
  486             M_WAITOK | M_ZERO);
  487 
  488         pac->pc_arm64pmcs = malloc(sizeof(struct pmc_hw) * arm64_npmcs,
  489             M_PMC, M_WAITOK | M_ZERO);
  490         pc = pmc_pcpu[cpu];
  491         first_ri = md->pmd_classdep[PMC_MDEP_CLASS_INDEX_ARMV8].pcd_ri;
  492         KASSERT(pc != NULL, ("[arm64,%d] NULL per-cpu pointer", __LINE__));
  493 
  494         for (i = 0, phw = pac->pc_arm64pmcs; i < arm64_npmcs; i++, phw++) {
  495                 phw->phw_state    = PMC_PHW_FLAG_IS_ENABLED |
  496                     PMC_PHW_CPU_TO_STATE(cpu) | PMC_PHW_INDEX_TO_STATE(i);
  497                 phw->phw_pmc      = NULL;
  498                 pc->pc_hwpmcs[i + first_ri] = phw;
  499         }
  500 
  501         /*
  502          * Disable all counters and overflow interrupts. Upon reset they are in
  503          * an undefined state.
  504          *
  505          * Don't issue an isb here, just wait for the one in arm64_pmcr_write()
  506          * to make the writes visible.
  507          */
  508         WRITE_SPECIALREG(pmcntenclr_el0, 0xffffffff);
  509         WRITE_SPECIALREG(pmintenclr_el1, 0xffffffff);
  510 
  511         /* Enable unit */
  512         pmcr = arm64_pmcr_read();
  513         pmcr |= PMCR_E;
  514         arm64_pmcr_write(pmcr);
  515 
  516         return (0);
  517 }
  518 
  519 static int
  520 arm64_pcpu_fini(struct pmc_mdep *md, int cpu)
  521 {
  522         uint32_t pmcr;
  523 
  524         pmcr = arm64_pmcr_read();
  525         pmcr &= ~PMCR_E;
  526         arm64_pmcr_write(pmcr);
  527 
  528         return (0);
  529 }
  530 
  531 struct pmc_mdep *
  532 pmc_arm64_initialize(void)
  533 {
  534         struct pmc_mdep *pmc_mdep;
  535         struct pmc_classdep *pcd;
  536         int classes, idcode, impcode;
  537         int reg;
  538         uint64_t midr;
  539 
  540         reg = arm64_pmcr_read();
  541         arm64_npmcs = (reg & PMCR_N_MASK) >> PMCR_N_SHIFT;
  542         impcode = (reg & PMCR_IMP_MASK) >> PMCR_IMP_SHIFT;
  543         idcode = (reg & PMCR_IDCODE_MASK) >> PMCR_IDCODE_SHIFT;
  544 
  545         PMCDBG1(MDP, INI, 1, "arm64-init npmcs=%d", arm64_npmcs);
  546 
  547         /*
  548          * Write the CPU model to kern.hwpmc.cpuid.
  549          *
  550          * We zero the variant and revision fields.
  551          *
  552          * TODO: how to handle differences between cores due to big.LITTLE?
  553          * For now, just use MIDR from CPU 0.
  554          */
  555         midr = (uint64_t)(pcpu_find(0)->pc_midr);
  556         midr &= ~(CPU_VAR_MASK | CPU_REV_MASK);
  557         snprintf(pmc_cpuid, sizeof(pmc_cpuid), "0x%016lx", midr);
  558 
  559         /*
  560          * Allocate space for pointers to PMC HW descriptors and for
  561          * the MDEP structure used by MI code.
  562          */
  563         arm64_pcpu = malloc(sizeof(struct arm64_cpu *) * pmc_cpu_max(),
  564                 M_PMC, M_WAITOK | M_ZERO);
  565 
  566         /* One AArch64 CPU class */
  567         classes = 1;
  568 
  569 #ifdef DEV_ACPI
  570         /* Query presence of optional classes and set max class. */
  571         if (pmc_cmn600_nclasses() > 0)
  572                 classes = MAX(classes, PMC_MDEP_CLASS_INDEX_CMN600);
  573         if (pmc_dmc620_nclasses() > 0)
  574                 classes = MAX(classes, PMC_MDEP_CLASS_INDEX_DMC620_C);
  575 #endif
  576 
  577         pmc_mdep = pmc_mdep_alloc(classes);
  578 
  579         switch(impcode) {
  580         case PMCR_IMP_ARM:
  581                 switch (idcode) {
  582                 case PMCR_IDCODE_CORTEX_A76:
  583                 case PMCR_IDCODE_NEOVERSE_N1:
  584                         pmc_mdep->pmd_cputype = PMC_CPU_ARMV8_CORTEX_A76;
  585                         break;
  586                 case PMCR_IDCODE_CORTEX_A57:
  587                 case PMCR_IDCODE_CORTEX_A72:
  588                         pmc_mdep->pmd_cputype = PMC_CPU_ARMV8_CORTEX_A57;
  589                         break;
  590                 default:
  591                 case PMCR_IDCODE_CORTEX_A53:
  592                         pmc_mdep->pmd_cputype = PMC_CPU_ARMV8_CORTEX_A53;
  593                         break;
  594                 }
  595                 break;
  596         default:
  597                 pmc_mdep->pmd_cputype = PMC_CPU_ARMV8_CORTEX_A53;
  598                 break;
  599         }
  600 
  601         pcd = &pmc_mdep->pmd_classdep[PMC_MDEP_CLASS_INDEX_ARMV8];
  602         pcd->pcd_caps  = ARMV8_PMC_CAPS;
  603         pcd->pcd_class = PMC_CLASS_ARMV8;
  604         pcd->pcd_num   = arm64_npmcs;
  605         pcd->pcd_ri    = pmc_mdep->pmd_npmc;
  606         pcd->pcd_width = 32;
  607 
  608         pcd->pcd_allocate_pmc   = arm64_allocate_pmc;
  609         pcd->pcd_config_pmc     = arm64_config_pmc;
  610         pcd->pcd_pcpu_fini      = arm64_pcpu_fini;
  611         pcd->pcd_pcpu_init      = arm64_pcpu_init;
  612         pcd->pcd_describe       = arm64_describe;
  613         pcd->pcd_get_config     = arm64_get_config;
  614         pcd->pcd_read_pmc       = arm64_read_pmc;
  615         pcd->pcd_release_pmc    = arm64_release_pmc;
  616         pcd->pcd_start_pmc      = arm64_start_pmc;
  617         pcd->pcd_stop_pmc       = arm64_stop_pmc;
  618         pcd->pcd_write_pmc      = arm64_write_pmc;
  619 
  620         pmc_mdep->pmd_intr       = arm64_intr;
  621         pmc_mdep->pmd_switch_in  = arm64_switch_in;
  622         pmc_mdep->pmd_switch_out = arm64_switch_out;
  623 
  624         pmc_mdep->pmd_npmc   += arm64_npmcs;
  625 
  626 #ifdef DEV_ACPI
  627         if (pmc_cmn600_nclasses() > 0)
  628                 pmc_cmn600_initialize(pmc_mdep);
  629         if (pmc_dmc620_nclasses() > 0) {
  630                 pmc_dmc620_initialize_cd2(pmc_mdep);
  631                 pmc_dmc620_initialize_c(pmc_mdep);
  632         }
  633 #endif
  634 
  635         return (pmc_mdep);
  636 }
  637 
  638 void
  639 pmc_arm64_finalize(struct pmc_mdep *md)
  640 {
  641 
  642 }

Cache object: 78247b6ff7befc45784baa81890b2f0e


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.