The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/hwpmc/hwpmc_cmn600.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * SPDX-License-Identifier: BSD-2-Clause
    3  *
    4  * Copyright (c) 2003-2008 Joseph Koshy
    5  * Copyright (c) 2007 The FreeBSD Foundation
    6  * Copyright (c) 2021-2022 ARM Ltd
    7  *
    8  * Portions of this software were developed by A. Joseph Koshy under
    9  * sponsorship from the FreeBSD Foundation and Google, Inc.
   10  *
   11  * Redistribution and use in source and binary forms, with or without
   12  * modification, are permitted provided that the following conditions
   13  * are met:
   14  * 1. Redistributions of source code must retain the above copyright
   15  *    notice, this list of conditions and the following disclaimer.
   16  * 2. Redistributions in binary form must reproduce the above copyright
   17  *    notice, this list of conditions and the following disclaimer in the
   18  *    documentation and/or other materials provided with the distribution.
   19  *
   20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   30  * SUCH DAMAGE.
   31  */
   32 
   33 /* Arm CoreLink CMN-600 Coherent Mesh Network PMU Driver */
   34 
   35 #include <sys/cdefs.h>
   36 __FBSDID("$FreeBSD$");
   37 
   38 #include "opt_acpi.h"
   39 
   40 /*
   41  * This depends on ACPI, but is built unconditionally in the hwpmc module.
   42  */
   43 #ifdef DEV_ACPI
   44 #include <sys/param.h>
   45 #include <sys/lock.h>
   46 #include <sys/malloc.h>
   47 #include <sys/module.h>
   48 #include <sys/mutex.h>
   49 #include <sys/pmc.h>
   50 #include <sys/pmckern.h>
   51 #include <sys/systm.h>
   52 
   53 #include <machine/cmn600_reg.h>
   54 
   55 struct cmn600_descr {
   56         struct pmc_descr pd_descr;  /* "base class" */
   57         void            *pd_rw_arg; /* Argument to use with read/write */
   58         struct pmc      *pd_pmc;
   59         struct pmc_hw   *pd_phw;
   60         uint32_t         pd_nodeid;
   61         int32_t          pd_node_type;
   62         int              pd_local_counter;
   63 
   64 };
   65 
   66 static struct cmn600_descr **cmn600_pmcdesc;
   67 
   68 static struct cmn600_pmc cmn600_pmcs[CMN600_UNIT_MAX];
   69 static int cmn600_units = 0;
   70 
   71 static inline struct cmn600_descr *
   72 cmn600desc(int ri)
   73 {
   74 
   75         return (cmn600_pmcdesc[ri]);
   76 }
   77 
   78 static inline int
   79 class_ri2unit(int ri)
   80 {
   81 
   82         return (ri / CMN600_COUNTERS_N);
   83 }
   84 
   85 #define EVENCNTR(x)     (((x) >> POR_DT_PMEVCNT_EVENCNT_SHIFT) << \
   86     POR_DTM_PMEVCNT_CNTR_WIDTH)
   87 #define ODDCNTR(x)      (((x) >> POR_DT_PMEVCNT_ODDCNT_SHIFT) << \
   88     POR_DTM_PMEVCNT_CNTR_WIDTH)
   89 
   90 static uint64_t
   91 cmn600_pmu_readcntr(void *arg, u_int nodeid, u_int xpcntr, u_int dtccntr,
   92     u_int width)
   93 {
   94         uint64_t dtcval, xpval;
   95 
   96         KASSERT(xpcntr < 4, ("[cmn600,%d] XP counter number %d is too big."
   97             " Max: 3", __LINE__, xpcntr));
   98         KASSERT(dtccntr < 8, ("[cmn600,%d] Global counter number %d is too"
   99             " big. Max: 7", __LINE__, dtccntr));
  100 
  101         dtcval = pmu_cmn600_rd8(arg, nodeid, NODE_TYPE_DTC,
  102             POR_DT_PMEVCNT(dtccntr >> 1));
  103         if (width == 4) {
  104                 dtcval = (dtccntr & 1) ? ODDCNTR(dtcval) : EVENCNTR(dtcval);
  105                 dtcval &= 0xffffffff0000UL;
  106         } else
  107                 dtcval <<= POR_DTM_PMEVCNT_CNTR_WIDTH;
  108 
  109         xpval = pmu_cmn600_rd8(arg, nodeid, NODE_TYPE_XP, POR_DTM_PMEVCNT);
  110         xpval >>= xpcntr * POR_DTM_PMEVCNT_CNTR_WIDTH;
  111         xpval &= 0xffffUL;
  112         return (dtcval | xpval);
  113 }
  114 
  115 static void
  116 cmn600_pmu_writecntr(void *arg, u_int nodeid, u_int xpcntr, u_int dtccntr,
  117     u_int width, uint64_t val)
  118 {
  119         int shift;
  120 
  121         KASSERT(xpcntr < 4, ("[cmn600,%d] XP counter number %d is too big."
  122             " Max: 3", __LINE__, xpcntr));
  123         KASSERT(dtccntr < 8, ("[cmn600,%d] Global counter number %d is too"
  124             " big. Max: 7", __LINE__, dtccntr));
  125 
  126         if (width == 4) {
  127                 shift = (dtccntr & 1) ? POR_DT_PMEVCNT_ODDCNT_SHIFT :
  128                     POR_DT_PMEVCNT_EVENCNT_SHIFT;
  129                 pmu_cmn600_md8(arg, nodeid, NODE_TYPE_DTC,
  130                     POR_DT_PMEVCNT(dtccntr >> 1), 0xffffffffUL << shift,
  131                     ((val >> POR_DTM_PMEVCNT_CNTR_WIDTH) & 0xffffffff) << shift);
  132         } else
  133                 pmu_cmn600_wr8(arg, nodeid, NODE_TYPE_DTC,
  134                     POR_DT_PMEVCNT(dtccntr & ~0x1), val >>
  135                     POR_DTM_PMEVCNT_CNTR_WIDTH);
  136 
  137         shift = xpcntr * POR_DTM_PMEVCNT_CNTR_WIDTH;
  138         val &= 0xffffUL;
  139         pmu_cmn600_md8(arg, nodeid, NODE_TYPE_XP, POR_DTM_PMEVCNT,
  140             0xffffUL << shift, val << shift);
  141 }
  142 
  143 #undef  EVENCNTR
  144 #undef  ODDCNTR
  145 
  146 /*
  147  * read a pmc register
  148  */
  149 static int
  150 cmn600_read_pmc(int cpu, int ri, pmc_value_t *v)
  151 {
  152         int counter, local_counter, nodeid;
  153         struct cmn600_descr *desc;
  154         struct pmc *pm;
  155         void *arg;
  156 
  157         KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
  158             ("[cmn600,%d] illegal CPU value %d", __LINE__, cpu));
  159         KASSERT(ri >= 0, ("[cmn600,%d] row-index %d out of range", __LINE__,
  160             ri));
  161 
  162         counter = ri % CMN600_COUNTERS_N;
  163         desc = cmn600desc(ri);
  164         pm = desc->pd_phw->phw_pmc;
  165         arg = desc->pd_rw_arg;
  166         nodeid = pm->pm_md.pm_cmn600.pm_cmn600_nodeid;
  167         local_counter = pm->pm_md.pm_cmn600.pm_cmn600_local_counter;
  168 
  169         KASSERT(pm != NULL,
  170             ("[cmn600,%d] No owner for HWPMC [cpu%d,pmc%d]", __LINE__,
  171                 cpu, ri));
  172 
  173         *v = cmn600_pmu_readcntr(arg, nodeid, local_counter, counter, 4);
  174         PMCDBG3(MDP, REA, 2, "%s id=%d -> %jd", __func__, ri, *v);
  175 
  176         return (0);
  177 }
  178 
  179 /*
  180  * Write a pmc register.
  181  */
  182 static int
  183 cmn600_write_pmc(int cpu, int ri, pmc_value_t v)
  184 {
  185         int counter, local_counter, nodeid;
  186         struct cmn600_descr *desc;
  187         struct pmc *pm;
  188         void *arg;
  189 
  190         KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
  191             ("[cmn600,%d] illegal CPU value %d", __LINE__, cpu));
  192         KASSERT(ri >= 0, ("[cmn600,%d] row-index %d out of range", __LINE__,
  193             ri));
  194 
  195         counter = ri % CMN600_COUNTERS_N;
  196         desc = cmn600desc(ri);
  197         pm = desc->pd_phw->phw_pmc;
  198         arg = desc->pd_rw_arg;
  199         nodeid = pm->pm_md.pm_cmn600.pm_cmn600_nodeid;
  200         local_counter = pm->pm_md.pm_cmn600.pm_cmn600_local_counter;
  201 
  202         KASSERT(pm != NULL,
  203             ("[cmn600,%d] PMC not owned (cpu%d,pmc%d)", __LINE__,
  204                 cpu, ri));
  205 
  206         PMCDBG4(MDP, WRI, 1, "%s cpu=%d ri=%d v=%jx", __func__, cpu, ri, v);
  207 
  208         cmn600_pmu_writecntr(arg, nodeid, local_counter, counter, 4, v);
  209         return (0);
  210 }
  211 
  212 /*
  213  * configure hardware pmc according to the configuration recorded in
  214  * pmc 'pm'.
  215  */
  216 static int
  217 cmn600_config_pmc(int cpu, int ri, struct pmc *pm)
  218 {
  219         struct pmc_hw *phw;
  220 
  221         PMCDBG4(MDP, CFG, 1, "%s cpu=%d ri=%d pm=%p", __func__, cpu, ri, pm);
  222 
  223         KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
  224             ("[cmn600,%d] illegal CPU value %d", __LINE__, cpu));
  225         KASSERT(ri >= 0, ("[cmn600,%d] row-index %d out of range", __LINE__,
  226             ri));
  227 
  228         phw = cmn600desc(ri)->pd_phw;
  229 
  230         KASSERT(pm == NULL || phw->phw_pmc == NULL,
  231             ("[cmn600,%d] pm=%p phw->pm=%p hwpmc not unconfigured",
  232                 __LINE__, pm, phw->phw_pmc));
  233 
  234         phw->phw_pmc = pm;
  235         return (0);
  236 }
  237 
  238 /*
  239  * Retrieve a configured PMC pointer from hardware state.
  240  */
  241 static int
  242 cmn600_get_config(int cpu, int ri, struct pmc **ppm)
  243 {
  244 
  245         *ppm = cmn600desc(ri)->pd_phw->phw_pmc;
  246 
  247         return (0);
  248 }
  249 
  250 #define CASE_DN_VER_EVT(n, id) case PMC_EV_CMN600_PMU_ ## n: { *event = id; \
  251         return (0); }
  252 static int
  253 cmn600_map_ev2event(int ev, int rev, int *node_type, uint8_t *event)
  254 {
  255         if (ev < PMC_EV_CMN600_PMU_dn_rxreq_dvmop ||
  256             ev > PMC_EV_CMN600_PMU_rni_rdb_ord)
  257                 return (EINVAL);
  258         if (ev <= PMC_EV_CMN600_PMU_dn_rxreq_trk_full) {
  259                 *node_type = NODE_TYPE_DVM;
  260                 if (rev < 0x200) {
  261                         switch (ev) {
  262                         CASE_DN_VER_EVT(dn_rxreq_dvmop, 1);
  263                         CASE_DN_VER_EVT(dn_rxreq_dvmsync, 2);
  264                         CASE_DN_VER_EVT(dn_rxreq_dvmop_vmid_filtered, 3);
  265                         CASE_DN_VER_EVT(dn_rxreq_retried, 4);
  266                         CASE_DN_VER_EVT(dn_rxreq_trk_occupancy, 5);
  267                         }
  268                 } else {
  269                         switch (ev) {
  270                         CASE_DN_VER_EVT(dn_rxreq_tlbi_dvmop, 0x01);
  271                         CASE_DN_VER_EVT(dn_rxreq_bpi_dvmop, 0x02);
  272                         CASE_DN_VER_EVT(dn_rxreq_pici_dvmop, 0x03);
  273                         CASE_DN_VER_EVT(dn_rxreq_vivi_dvmop, 0x04);
  274                         CASE_DN_VER_EVT(dn_rxreq_dvmsync, 0x05);
  275                         CASE_DN_VER_EVT(dn_rxreq_dvmop_vmid_filtered, 0x06);
  276                         CASE_DN_VER_EVT(dn_rxreq_dvmop_other_filtered, 0x07);
  277                         CASE_DN_VER_EVT(dn_rxreq_retried, 0x08);
  278                         CASE_DN_VER_EVT(dn_rxreq_snp_sent, 0x09);
  279                         CASE_DN_VER_EVT(dn_rxreq_snp_stalled, 0x0a);
  280                         CASE_DN_VER_EVT(dn_rxreq_trk_full, 0x0b);
  281                         CASE_DN_VER_EVT(dn_rxreq_trk_occupancy, 0x0c);
  282                         }
  283                 }
  284                 return (EINVAL);
  285         } else if (ev <= PMC_EV_CMN600_PMU_hnf_snp_fwded) {
  286                 *node_type = NODE_TYPE_HN_F;
  287                 *event = ev - PMC_EV_CMN600_PMU_hnf_cache_miss;
  288                 return (0);
  289         } else if (ev <= PMC_EV_CMN600_PMU_hni_pcie_serialization) {
  290                 *node_type = NODE_TYPE_HN_I;
  291                 *event = ev - PMC_EV_CMN600_PMU_hni_rrt_rd_occ_cnt_ovfl;
  292                 return (0);
  293         } else if (ev <= PMC_EV_CMN600_PMU_xp_partial_dat_flit) {
  294                 *node_type = NODE_TYPE_XP;
  295                 *event = ev - PMC_EV_CMN600_PMU_xp_txflit_valid;
  296                 return (0);
  297         } else if (ev <= PMC_EV_CMN600_PMU_sbsx_txrsp_stall) {
  298                 *node_type = NODE_TYPE_SBSX;
  299                 *event = ev - PMC_EV_CMN600_PMU_sbsx_rd_req;
  300                 return (0);
  301         } else if (ev <= PMC_EV_CMN600_PMU_rnd_rdb_ord) {
  302                 *node_type = NODE_TYPE_RN_D;
  303                 *event = ev - PMC_EV_CMN600_PMU_rnd_s0_rdata_beats;
  304                 return (0);
  305         } else if (ev <= PMC_EV_CMN600_PMU_rni_rdb_ord) {
  306                 *node_type = NODE_TYPE_RN_I;
  307                 *event = ev - PMC_EV_CMN600_PMU_rni_s0_rdata_beats;
  308                 return (0);
  309         } else if (ev <= PMC_EV_CMN600_PMU_cxha_snphaz_occ) {
  310                 *node_type = NODE_TYPE_CXHA;
  311                 *event = ev - PMC_EV_CMN600_PMU_cxha_rddatbyp;
  312                 return (0);
  313         } else if (ev <= PMC_EV_CMN600_PMU_cxra_ext_dat_stall) {
  314                 *node_type = NODE_TYPE_CXRA;
  315                 *event = ev - PMC_EV_CMN600_PMU_cxra_req_trk_occ;
  316                 return (0);
  317         } else if (ev <= PMC_EV_CMN600_PMU_cxla_avg_latency_form_tx_tlp) {
  318                 *node_type = NODE_TYPE_CXLA;
  319                 *event = ev - PMC_EV_CMN600_PMU_cxla_rx_tlp_link0;
  320                 return (0);
  321         }
  322         return (EINVAL);
  323 }
  324 
  325 /*
  326  * Check if a given allocation is feasible.
  327  */
  328 
  329 static int
  330 cmn600_allocate_pmc(int cpu, int ri, struct pmc *pm,
  331     const struct pmc_op_pmcallocate *a)
  332 {
  333         struct cmn600_descr *desc;
  334         const struct pmc_descr *pd;
  335         uint64_t caps __unused;
  336         int local_counter, node_type;
  337         enum pmc_event pe;
  338         void *arg;
  339         uint8_t e;
  340         int err;
  341 
  342         (void) cpu;
  343 
  344         KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
  345             ("[cmn600,%d] illegal CPU value %d", __LINE__, cpu));
  346         KASSERT(ri >= 0, ("[cmn600,%d] row-index %d out of range", __LINE__,
  347             ri));
  348 
  349         desc = cmn600desc(ri);
  350         arg = desc->pd_rw_arg;
  351         pd = &desc->pd_descr;
  352         if (cmn600_pmcs[class_ri2unit(ri)].domain != pcpu_find(cpu)->pc_domain)
  353                 return (EINVAL);
  354 
  355         /* check class match */
  356         if (pd->pd_class != a->pm_class)
  357                 return (EINVAL);
  358 
  359         caps = pm->pm_caps;
  360 
  361         PMCDBG3(MDP, ALL, 1, "%s ri=%d caps=0x%x", __func__, ri, caps);
  362 
  363         pe = a->pm_ev;
  364         err = cmn600_map_ev2event(pe, pmu_cmn600_rev(arg), &node_type, &e);
  365         if (err != 0)
  366                 return (err);
  367         err = pmu_cmn600_alloc_localpmc(arg,
  368             a->pm_md.pm_cmn600.pma_cmn600_nodeid, node_type, &local_counter);
  369         if (err != 0)
  370                 return (err);
  371 
  372         pm->pm_md.pm_cmn600.pm_cmn600_config =
  373             a->pm_md.pm_cmn600.pma_cmn600_config;
  374         pm->pm_md.pm_cmn600.pm_cmn600_occupancy =
  375             a->pm_md.pm_cmn600.pma_cmn600_occupancy;
  376         desc->pd_nodeid = pm->pm_md.pm_cmn600.pm_cmn600_nodeid =
  377             a->pm_md.pm_cmn600.pma_cmn600_nodeid;
  378         desc->pd_node_type = pm->pm_md.pm_cmn600.pm_cmn600_node_type =
  379             node_type;
  380         pm->pm_md.pm_cmn600.pm_cmn600_event = e;
  381         desc->pd_local_counter = pm->pm_md.pm_cmn600.pm_cmn600_local_counter =
  382             local_counter;
  383 
  384         return (0);
  385 }
  386 
  387 /* Release machine dependent state associated with a PMC. */
  388 
  389 static int
  390 cmn600_release_pmc(int cpu, int ri, struct pmc *pmc)
  391 {
  392         struct cmn600_descr *desc;
  393         struct pmc_hw *phw;
  394         struct pmc *pm __diagused;
  395         int err;
  396 
  397         (void) pmc;
  398 
  399         KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
  400             ("[cmn600,%d] illegal CPU value %d", __LINE__, cpu));
  401         KASSERT(ri >= 0, ("[cmn600,%d] row-index %d out of range", __LINE__,
  402             ri));
  403 
  404         desc = cmn600desc(ri);
  405         phw = desc->pd_phw;
  406         pm  = phw->phw_pmc;
  407         err = pmu_cmn600_free_localpmc(desc->pd_rw_arg, desc->pd_nodeid,
  408             desc->pd_node_type, desc->pd_local_counter);
  409         if (err != 0)
  410                 return (err);
  411 
  412         KASSERT(pm == NULL, ("[cmn600,%d] PHW pmc %p non-NULL", __LINE__, pm));
  413 
  414         return (0);
  415 }
  416 
  417 static inline uint64_t
  418 cmn600_encode_source(int node_type, int counter, int port, int sub)
  419 {
  420 
  421         /* Calculate pmevcnt0_input_sel based on list in Table 3-794. */
  422         if (node_type == NODE_TYPE_XP)
  423                 return (0x4 | counter);
  424         
  425         return (((port + 1) << 4) | (sub << 2) | counter);
  426 }
  427 
  428 /*
  429  * start a PMC.
  430  */
  431 
  432 static int
  433 cmn600_start_pmc(int cpu, int ri)
  434 {
  435         int counter, local_counter, node_type, shift;
  436         uint64_t config, occupancy, source, xp_pmucfg;
  437         struct cmn600_descr *desc;
  438         struct pmc_hw *phw;
  439         struct pmc *pm;
  440         uint8_t event, port, sub;
  441         uint16_t nodeid;
  442         void *arg;
  443 
  444         KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
  445             ("[cmn600,%d] illegal CPU value %d", __LINE__, cpu));
  446         KASSERT(ri >= 0, ("[cmn600,%d] row-index %d out of range", __LINE__,
  447             ri));
  448 
  449         counter = ri % CMN600_COUNTERS_N;
  450         desc = cmn600desc(ri);
  451         phw = desc->pd_phw;
  452         pm  = phw->phw_pmc;
  453         arg = desc->pd_rw_arg;
  454 
  455         KASSERT(pm != NULL,
  456             ("[cmn600,%d] starting cpu%d,pmc%d with null pmc record", __LINE__,
  457                 cpu, ri));
  458 
  459         PMCDBG3(MDP, STA, 1, "%s cpu=%d ri=%d", __func__, cpu, ri);
  460 
  461         config = pm->pm_md.pm_cmn600.pm_cmn600_config;
  462         occupancy = pm->pm_md.pm_cmn600.pm_cmn600_occupancy;
  463         node_type = pm->pm_md.pm_cmn600.pm_cmn600_node_type;
  464         event = pm->pm_md.pm_cmn600.pm_cmn600_event;
  465         nodeid = pm->pm_md.pm_cmn600.pm_cmn600_nodeid;
  466         local_counter = pm->pm_md.pm_cmn600.pm_cmn600_local_counter;
  467         port = (nodeid >> 2) & 1;
  468         sub = nodeid & 3;
  469 
  470         switch (node_type) {
  471         case NODE_TYPE_DVM:
  472         case NODE_TYPE_HN_F:
  473         case NODE_TYPE_CXHA:
  474         case NODE_TYPE_CXRA:
  475                 pmu_cmn600_md8(arg, nodeid, node_type,
  476                     CMN600_COMMON_PMU_EVENT_SEL,
  477                     CMN600_COMMON_PMU_EVENT_SEL_OCC_MASK,
  478                     occupancy << CMN600_COMMON_PMU_EVENT_SEL_OCC_SHIFT);
  479                 break;
  480         case NODE_TYPE_XP:
  481                 /* Set PC and Interface.*/
  482                 event |= config;
  483         }
  484 
  485         /*
  486          * 5.5.1 Set up PMU counters
  487          * 1. Ensure that the NIDEN input is asserted. HW side. */
  488         /* 2. Select event of target node for one of four outputs. */
  489         pmu_cmn600_md8(arg, nodeid, node_type, CMN600_COMMON_PMU_EVENT_SEL,
  490             0xff << (local_counter * 8),
  491             event << (local_counter * 8));
  492 
  493         xp_pmucfg = pmu_cmn600_rd8(arg, nodeid, NODE_TYPE_XP,
  494             POR_DTM_PMU_CONFIG);
  495         /*
  496          * 3. configure XP to connect one of four target node outputs to local
  497          * counter.
  498          */
  499         source = cmn600_encode_source(node_type, local_counter, port, sub);
  500         shift = (local_counter * POR_DTM_PMU_CONFIG_VCNT_INPUT_SEL_WIDTH) +
  501             POR_DTM_PMU_CONFIG_VCNT_INPUT_SEL_SHIFT;
  502         xp_pmucfg &= ~(0xffUL << shift);
  503         xp_pmucfg |= source << shift;
  504 
  505         /* 4. Pair with global counters A, B, C, ..., H. */
  506         shift = (local_counter * 4) + 16;
  507         xp_pmucfg &= ~(0xfUL << shift);
  508         xp_pmucfg |= counter << shift;
  509         /* Enable pairing.*/
  510         xp_pmucfg |= 1 << (local_counter + 4);
  511 
  512         /* 5. Combine local counters 0 with 1, 2 with 3 or all four. */
  513         xp_pmucfg &= ~0xeUL;
  514 
  515         /* 6. Enable XP's PMU function. */
  516         xp_pmucfg |= POR_DTM_PMU_CONFIG_PMU_EN;
  517         pmu_cmn600_wr8(arg, nodeid, NODE_TYPE_XP, POR_DTM_PMU_CONFIG, xp_pmucfg);
  518         if (node_type == NODE_TYPE_CXLA)
  519                 pmu_cmn600_set8(arg, nodeid, NODE_TYPE_CXLA,
  520                     POR_CXG_RA_CFG_CTL, EN_CXLA_PMUCMD_PROP);
  521 
  522         /* 7. Enable DTM. */
  523         pmu_cmn600_set8(arg, nodeid, NODE_TYPE_XP, POR_DTM_CONTROL,
  524             POR_DTM_CONTROL_DTM_ENABLE);
  525 
  526         /* 8. Reset grouping of global counters. Use 32 bits. */
  527         pmu_cmn600_clr8(arg, nodeid, NODE_TYPE_DTC, POR_DT_PMCR,
  528             POR_DT_PMCR_CNTCFG_MASK);
  529 
  530         /* 9. Enable DTC. */
  531         pmu_cmn600_set8(arg, nodeid, NODE_TYPE_DTC, POR_DT_DTC_CTL,
  532             POR_DT_DTC_CTL_DT_EN);
  533 
  534         /* 10. Enable Overflow Interrupt. */
  535         pmu_cmn600_set8(arg, nodeid, NODE_TYPE_DTC, POR_DT_PMCR,
  536             POR_DT_PMCR_OVFL_INTR_EN);
  537 
  538         /* 11. Run PMC. */
  539         pmu_cmn600_set8(arg, nodeid, NODE_TYPE_DTC, POR_DT_PMCR,
  540             POR_DT_PMCR_PMU_EN);
  541 
  542         return (0);
  543 }
  544 
  545 /*
  546  * Stop a PMC.
  547  */
  548 
  549 static int
  550 cmn600_stop_pmc(int cpu, int ri)
  551 {
  552         struct cmn600_descr *desc;
  553         struct pmc_hw *phw;
  554         struct pmc *pm;
  555         int local_counter;
  556         uint64_t val;
  557 
  558         KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
  559             ("[cmn600,%d] illegal CPU value %d", __LINE__, cpu));
  560         KASSERT(ri >= 0, ("[cmn600,%d] row-index %d out of range", __LINE__,
  561             ri));
  562 
  563         desc = cmn600desc(ri);
  564         phw = desc->pd_phw;
  565         pm  = phw->phw_pmc;
  566 
  567         KASSERT(pm != NULL,
  568             ("[cmn600,%d] cpu%d,pmc%d no PMC to stop", __LINE__,
  569                 cpu, ri));
  570 
  571         PMCDBG2(MDP, STO, 1, "%s ri=%d", __func__, ri);
  572 
  573         /* Disable pairing. */
  574         local_counter = pm->pm_md.pm_cmn600.pm_cmn600_local_counter;
  575         pmu_cmn600_clr8(desc->pd_rw_arg, pm->pm_md.pm_cmn600.pm_cmn600_nodeid,
  576             NODE_TYPE_XP, POR_DTM_PMU_CONFIG, (1 << (local_counter + 4)));
  577 
  578         /* Shutdown XP's DTM function if no paired counters. */
  579         val = pmu_cmn600_rd8(desc->pd_rw_arg,
  580             pm->pm_md.pm_cmn600.pm_cmn600_nodeid, NODE_TYPE_XP,
  581             POR_DTM_PMU_CONFIG);
  582         if ((val & 0xf0) == 0)
  583                 pmu_cmn600_clr8(desc->pd_rw_arg,
  584                     pm->pm_md.pm_cmn600.pm_cmn600_nodeid, NODE_TYPE_XP,
  585                     POR_DTM_PMU_CONFIG, POR_DTM_CONTROL_DTM_ENABLE);
  586 
  587         return (0);
  588 }
  589 
  590 /*
  591  * describe a PMC
  592  */
  593 static int
  594 cmn600_describe(int cpu, int ri, struct pmc_info *pi, struct pmc **ppmc)
  595 {
  596         struct pmc_hw *phw;
  597         size_t copied;
  598         int error;
  599 
  600         KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
  601             ("[cmn600,%d] illegal CPU %d", __LINE__, cpu));
  602         KASSERT(ri >= 0, ("[cmn600,%d] row-index %d out of range", __LINE__,
  603             ri));
  604 
  605         phw = cmn600desc(ri)->pd_phw;
  606 
  607         if ((error = copystr(cmn600desc(ri)->pd_descr.pd_name,
  608             pi->pm_name, PMC_NAME_MAX, &copied)) != 0)
  609                 return (error);
  610 
  611         pi->pm_class = cmn600desc(ri)->pd_descr.pd_class;
  612 
  613         if (phw->phw_state & PMC_PHW_FLAG_IS_ENABLED) {
  614                 pi->pm_enabled = TRUE;
  615                 *ppmc          = phw->phw_pmc;
  616         } else {
  617                 pi->pm_enabled = FALSE;
  618                 *ppmc          = NULL;
  619         }
  620 
  621         return (0);
  622 }
  623 
  624 /*
  625  * processor dependent initialization.
  626  */
  627 
  628 static int
  629 cmn600_pcpu_init(struct pmc_mdep *md, int cpu)
  630 {
  631         int first_ri, n, npmc;
  632         struct pmc_hw  *phw;
  633         struct pmc_cpu *pc;
  634         int mdep_class;
  635 
  636         mdep_class = PMC_MDEP_CLASS_INDEX_CMN600;
  637         KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
  638             ("[cmn600,%d] insane cpu number %d", __LINE__, cpu));
  639 
  640         PMCDBG1(MDP, INI, 1, "cmn600-init cpu=%d", cpu);
  641 
  642         /*
  643          * Set the content of the hardware descriptors to a known
  644          * state and initialize pointers in the MI per-cpu descriptor.
  645          */
  646 
  647         pc = pmc_pcpu[cpu];
  648         first_ri = md->pmd_classdep[mdep_class].pcd_ri;
  649         npmc = md->pmd_classdep[mdep_class].pcd_num;
  650 
  651         for (n = 0; n < npmc; n++, phw++) {
  652                 phw = cmn600desc(n)->pd_phw;
  653                 phw->phw_state = PMC_PHW_CPU_TO_STATE(cpu) |
  654                     PMC_PHW_INDEX_TO_STATE(n);
  655                 /* Set enabled only if unit present. */
  656                 if (cmn600_pmcs[class_ri2unit(n)].arg != NULL)
  657                         phw->phw_state |= PMC_PHW_FLAG_IS_ENABLED;
  658                 phw->phw_pmc = NULL;
  659                 pc->pc_hwpmcs[n + first_ri] = phw;
  660         }
  661         return (0);
  662 }
  663 
  664 /*
  665  * processor dependent cleanup prior to the KLD
  666  * being unloaded
  667  */
  668 
  669 static int
  670 cmn600_pcpu_fini(struct pmc_mdep *md, int cpu)
  671 {
  672 
  673         return (0);
  674 }
  675 
  676 static int
  677 cmn600_pmu_intr(struct trapframe *tf, int unit, int i)
  678 {
  679         struct pmc_cpu *pc __diagused;
  680         struct pmc_hw *phw;
  681         struct pmc *pm;
  682         int error, cpu, ri;
  683 
  684         ri = i + unit * CMN600_COUNTERS_N;
  685         cpu = curcpu;
  686         KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
  687             ("[cmn600,%d] CPU %d out of range", __LINE__, cpu));
  688         pc = pmc_pcpu[cpu];
  689         KASSERT(pc != NULL, ("pc != NULL"));
  690 
  691         phw = cmn600desc(ri)->pd_phw;
  692         KASSERT(phw != NULL, ("phw != NULL"));
  693         pm  = phw->phw_pmc;
  694         if (pm == NULL)
  695                 return (0);
  696 
  697         if (!PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm))) {
  698                 /* Always CPU0. */
  699                 pm->pm_pcpu_state[0].pps_overflowcnt += 1;
  700                 return (0);
  701         }
  702 
  703         if (pm->pm_state != PMC_STATE_RUNNING)
  704                 return (0);
  705 
  706         error = pmc_process_interrupt(PMC_HR, pm, tf);
  707         if (error)
  708                 cmn600_stop_pmc(cpu, ri);
  709 
  710         /* Reload sampling count */
  711         cmn600_write_pmc(cpu, ri, pm->pm_sc.pm_reloadcount);
  712 
  713         return (0);
  714 }
  715 
  716 /*
  717  * Initialize ourselves.
  718  */
  719 static int
  720 cmn600_init_pmc_units(void)
  721 {
  722         int i;
  723 
  724         if (cmn600_units > 0) { /* Already initialized. */
  725                 return (0);
  726         }
  727 
  728         cmn600_units = cmn600_pmc_nunits();
  729         if (cmn600_units == 0)
  730                 return (ENOENT);
  731 
  732         for (i = 0; i < cmn600_units; i++) {
  733                 if (cmn600_pmc_getunit(i, &cmn600_pmcs[i].arg,
  734                     &cmn600_pmcs[i].domain) != 0)
  735                         cmn600_pmcs[i].arg = NULL;
  736         }
  737         return (0);
  738 }
  739 
  740 int
  741 pmc_cmn600_nclasses(void)
  742 {
  743 
  744         if (cmn600_pmc_nunits() > 0)
  745                 return (1);
  746         return (0);
  747 }
  748 
  749 int
  750 pmc_cmn600_initialize(struct pmc_mdep *md)
  751 {
  752         struct pmc_classdep *pcd;
  753         int i, npmc, unit;
  754 
  755         cmn600_init_pmc_units();
  756         KASSERT(md != NULL, ("[cmn600,%d] md is NULL", __LINE__));
  757         KASSERT(cmn600_units < CMN600_UNIT_MAX,
  758             ("[cmn600,%d] cmn600_units too big", __LINE__));
  759 
  760         PMCDBG0(MDP,INI,1, "cmn600-initialize");
  761 
  762         npmc = CMN600_COUNTERS_N * cmn600_units;
  763         pcd = &md->pmd_classdep[PMC_MDEP_CLASS_INDEX_CMN600];
  764 
  765         pcd->pcd_caps           = PMC_CAP_SYSTEM | PMC_CAP_READ |
  766             PMC_CAP_WRITE | PMC_CAP_QUALIFIER | PMC_CAP_INTERRUPT |
  767             PMC_CAP_DOMWIDE;
  768         pcd->pcd_class  = PMC_CLASS_CMN600_PMU;
  769         pcd->pcd_num    = npmc;
  770         pcd->pcd_ri     = md->pmd_npmc;
  771         pcd->pcd_width  = 48;
  772 
  773         pcd->pcd_allocate_pmc   = cmn600_allocate_pmc;
  774         pcd->pcd_config_pmc     = cmn600_config_pmc;
  775         pcd->pcd_describe       = cmn600_describe;
  776         pcd->pcd_get_config     = cmn600_get_config;
  777         pcd->pcd_get_msr        = NULL;
  778         pcd->pcd_pcpu_fini      = cmn600_pcpu_fini;
  779         pcd->pcd_pcpu_init      = cmn600_pcpu_init;
  780         pcd->pcd_read_pmc       = cmn600_read_pmc;
  781         pcd->pcd_release_pmc    = cmn600_release_pmc;
  782         pcd->pcd_start_pmc      = cmn600_start_pmc;
  783         pcd->pcd_stop_pmc       = cmn600_stop_pmc;
  784         pcd->pcd_write_pmc      = cmn600_write_pmc;
  785 
  786         md->pmd_npmc           += npmc;
  787         cmn600_pmcdesc = malloc(sizeof(struct cmn600_descr *) * npmc *
  788             CMN600_PMU_DEFAULT_UNITS_N, M_PMC, M_WAITOK|M_ZERO);
  789         for (i = 0; i < npmc; i++) {
  790                 cmn600_pmcdesc[i] = malloc(sizeof(struct cmn600_descr), M_PMC,
  791                     M_WAITOK|M_ZERO);
  792 
  793                 unit = i / CMN600_COUNTERS_N;
  794                 KASSERT(unit >= 0, ("unit >= 0"));
  795                 KASSERT(cmn600_pmcs[unit].arg != NULL, ("arg != NULL"));
  796 
  797                 cmn600_pmcdesc[i]->pd_rw_arg = cmn600_pmcs[unit].arg;
  798                 cmn600_pmcdesc[i]->pd_descr.pd_class =
  799                     PMC_CLASS_CMN600_PMU;
  800                 cmn600_pmcdesc[i]->pd_descr.pd_caps = pcd->pcd_caps;
  801                 cmn600_pmcdesc[i]->pd_phw = (struct pmc_hw *)malloc(
  802                     sizeof(struct pmc_hw), M_PMC, M_WAITOK|M_ZERO);
  803                 snprintf(cmn600_pmcdesc[i]->pd_descr.pd_name, 63,
  804                     "CMN600_%d", i);
  805                 cmn600_pmu_intr_cb(cmn600_pmcs[unit].arg, cmn600_pmu_intr);
  806         }
  807 
  808         return (0);
  809 }
  810 
  811 void
  812 pmc_cmn600_finalize(struct pmc_mdep *md)
  813 {
  814         struct pmc_classdep *pcd;
  815         int i, npmc;
  816 
  817         KASSERT(md->pmd_classdep[PMC_MDEP_CLASS_INDEX_CMN600].pcd_class ==
  818             PMC_CLASS_CMN600_PMU, ("[cmn600,%d] pmc class mismatch",
  819             __LINE__));
  820 
  821         pcd = &md->pmd_classdep[PMC_MDEP_CLASS_INDEX_CMN600];
  822 
  823         npmc = pcd->pcd_num;
  824         for (i = 0; i < npmc; i++) {
  825                 free(cmn600_pmcdesc[i]->pd_phw, M_PMC);
  826                 free(cmn600_pmcdesc[i], M_PMC);
  827         }
  828         free(cmn600_pmcdesc, M_PMC);
  829         cmn600_pmcdesc = NULL;
  830 }
  831 
  832 MODULE_DEPEND(pmc, cmn600, 1, 1, 1);
  833 #endif /* DEV_ACPI */

Cache object: 41b22f7a1582094728927edac2b1413c


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.