The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/arm64/acpica/acpi_iort.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
    3  *
    4  * Copyright (C) 2018 Marvell International Ltd.
    5  *
    6  * Author: Jayachandran C Nair <jchandra@freebsd.org>
    7  *
    8  * Redistribution and use in source and binary forms, with or without
    9  * modification, are permitted provided that the following conditions
   10  * are met:
   11  * 1. Redistributions of source code must retain the above copyright
   12  *    notice, this list of conditions and the following disclaimer.
   13  * 2. Redistributions in binary form must reproduce the above copyright
   14  *    notice, this list of conditions and the following disclaimer in the
   15  *    documentation and/or other materials provided with the distribution.
   16  *
   17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   27  * SUCH DAMAGE.
   28  */
   29 
   30 #include "opt_acpi.h"
   31 
   32 #include <sys/cdefs.h>
   33 __FBSDID("$FreeBSD$");
   34 
   35 #include <sys/param.h>
   36 #include <sys/bus.h>
   37 #include <sys/kernel.h>
   38 #include <sys/malloc.h>
   39 
   40 #include <machine/intr.h>
   41 
   42 #include <contrib/dev/acpica/include/acpi.h>
   43 #include <contrib/dev/acpica/include/accommon.h>
   44 #include <contrib/dev/acpica/include/actables.h>
   45 
   46 #include <dev/acpica/acpivar.h>
   47 
   48 /*
   49  * Track next XREF available for ITS groups.
   50  */
   51 static u_int acpi_its_xref = ACPI_MSI_XREF;
   52 
   53 /*
   54  * Some types of IORT nodes have a set of mappings.  Each of them map
   55  * a range of device IDs [base..end] from the current node to another
   56  * node. The corresponding device IDs on destination node starts at
   57  * outbase.
   58  */
   59 struct iort_map_entry {
   60         u_int                   base;
   61         u_int                   end;
   62         u_int                   outbase;
   63         u_int                   flags;
   64         u_int                   out_node_offset;
   65         struct iort_node        *out_node;
   66 };
   67 
   68 /*
   69  * The ITS group node does not have any outgoing mappings. It has a
   70  * of a list of GIC ITS blocks which can handle the device ID. We
   71  * will store the PIC XREF used by the block and the blocks proximity
   72  * data here, so that it can be retrieved together.
   73  */
   74 struct iort_its_entry {
   75         u_int                   its_id;
   76         u_int                   xref;
   77         int                     pxm;
   78 };
   79 
   80 struct iort_named_component
   81 {
   82         UINT32                  NodeFlags;
   83         UINT64                  MemoryProperties;
   84         UINT8                   MemoryAddressLimit;
   85         char                    DeviceName[32]; /* Path of namespace object */
   86 };
   87 
   88 /*
   89  * IORT node. Each node has some device specific data depending on the
   90  * type of the node. The node can also have a set of mappings, OR in
   91  * case of ITS group nodes a set of ITS entries.
   92  * The nodes are kept in a TAILQ by type.
   93  */
   94 struct iort_node {
   95         TAILQ_ENTRY(iort_node)  next;           /* next entry with same type */
   96         enum AcpiIortNodeType   type;           /* ACPI type */
   97         u_int                   node_offset;    /* offset in IORT - node ID */
   98         u_int                   nentries;       /* items in array below */
   99         u_int                   usecount;       /* for bookkeeping */
  100         u_int                   revision;       /* node revision */
  101         union {
  102                 ACPI_IORT_ROOT_COMPLEX          pci_rc; /* PCI root complex */
  103                 ACPI_IORT_SMMU                  smmu;
  104                 ACPI_IORT_SMMU_V3               smmu_v3;
  105                 struct iort_named_component     named_comp;
  106         } data;
  107         union {
  108                 struct iort_map_entry   *mappings;      /* node mappings  */
  109                 struct iort_its_entry   *its;           /* ITS IDs array */
  110         } entries;
  111 };
  112 
  113 /* Lists for each of the types. */
  114 static TAILQ_HEAD(, iort_node) pci_nodes = TAILQ_HEAD_INITIALIZER(pci_nodes);
  115 static TAILQ_HEAD(, iort_node) smmu_nodes = TAILQ_HEAD_INITIALIZER(smmu_nodes);
  116 static TAILQ_HEAD(, iort_node) its_groups = TAILQ_HEAD_INITIALIZER(its_groups);
  117 static TAILQ_HEAD(, iort_node) named_nodes = TAILQ_HEAD_INITIALIZER(named_nodes);
  118 
  119 static int
  120 iort_entry_get_id_mapping_index(struct iort_node *node)
  121 {
  122 
  123         switch(node->type) {
  124         case ACPI_IORT_NODE_SMMU_V3:
  125                 /* The ID mapping field was added in version 1 */
  126                 if (node->revision < 1)
  127                         return (-1);
  128 
  129                 /*
  130                  * If all the control interrupts are GISCV based the ID
  131                  * mapping field is ignored.
  132                  */
  133                 if (node->data.smmu_v3.EventGsiv != 0 &&
  134                     node->data.smmu_v3.PriGsiv != 0 &&
  135                     node->data.smmu_v3.GerrGsiv != 0 &&
  136                     node->data.smmu_v3.SyncGsiv != 0)
  137                         return (-1);
  138 
  139                 if (node->data.smmu_v3.IdMappingIndex >= node->nentries)
  140                         return (-1);
  141 
  142                 return (node->data.smmu_v3.IdMappingIndex);
  143         case ACPI_IORT_NODE_PMCG:
  144                 return (0);
  145         default:
  146                 break;
  147         }
  148 
  149         return (-1);
  150 }
  151 
  152 /*
  153  * Lookup an ID in the mappings array. If successful, map the input ID
  154  * to the output ID and return the output node found.
  155  */
  156 static struct iort_node *
  157 iort_entry_lookup(struct iort_node *node, u_int id, u_int *outid)
  158 {
  159         struct iort_map_entry *entry;
  160         int i, id_map;
  161 
  162         id_map = iort_entry_get_id_mapping_index(node);
  163         entry = node->entries.mappings;
  164         for (i = 0; i < node->nentries; i++, entry++) {
  165                 if (i == id_map)
  166                         continue;
  167                 if (entry->base <= id && id <= entry->end)
  168                         break;
  169         }
  170         if (i == node->nentries)
  171                 return (NULL);
  172         if ((entry->flags & ACPI_IORT_ID_SINGLE_MAPPING) == 0)
  173                 *outid = entry->outbase + (id - entry->base);
  174         else
  175                 *outid = entry->outbase;
  176         return (entry->out_node);
  177 }
  178 
  179 /*
  180  * Perform an additional lookup in case of SMMU node and ITS outtype.
  181  */
  182 static struct iort_node *
  183 iort_smmu_trymap(struct iort_node *node, u_int outtype, u_int *outid)
  184 {
  185         /* Original node can be not found. */
  186         if (!node)
  187                 return (NULL);
  188 
  189         /* Node can be SMMU or ITS. If SMMU, we need another lookup. */
  190         if (outtype == ACPI_IORT_NODE_ITS_GROUP &&
  191             (node->type == ACPI_IORT_NODE_SMMU_V3 ||
  192              node->type == ACPI_IORT_NODE_SMMU)) {
  193                 node = iort_entry_lookup(node, *outid, outid);
  194                 if (node == NULL)
  195                         return (NULL);
  196         }
  197 
  198         KASSERT(node->type == outtype, ("mapping fail"));
  199         return (node);
  200 }
  201 
  202 /*
  203  * Map a PCI RID to a SMMU node or an ITS node, based on outtype.
  204  */
  205 static struct iort_node *
  206 iort_pci_rc_map(u_int seg, u_int rid, u_int outtype, u_int *outid)
  207 {
  208         struct iort_node *node, *out_node;
  209         u_int nxtid;
  210 
  211         out_node = NULL;
  212         TAILQ_FOREACH(node, &pci_nodes, next) {
  213                 if (node->data.pci_rc.PciSegmentNumber != seg)
  214                         continue;
  215                 out_node = iort_entry_lookup(node, rid, &nxtid);
  216                 if (out_node != NULL)
  217                         break;
  218         }
  219 
  220         out_node = iort_smmu_trymap(out_node, outtype, &nxtid);
  221         if (out_node)
  222                 *outid = nxtid;
  223 
  224         return (out_node);
  225 }
  226 
  227 /*
  228  * Map a named component node to a SMMU node or an ITS node, based on outtype.
  229  */
  230 static struct iort_node *
  231 iort_named_comp_map(const char *devname, u_int rid, u_int outtype, u_int *outid)
  232 {
  233         struct iort_node *node, *out_node;
  234         u_int nxtid;
  235 
  236         out_node = NULL;
  237         TAILQ_FOREACH(node, &named_nodes, next) {
  238                 if (strstr(node->data.named_comp.DeviceName, devname) == NULL)
  239                         continue;
  240                 out_node = iort_entry_lookup(node, rid, &nxtid);
  241                 if (out_node != NULL)
  242                         break;
  243         }
  244 
  245         out_node = iort_smmu_trymap(out_node, outtype, &nxtid);
  246         if (out_node)
  247                 *outid = nxtid;
  248 
  249         return (out_node);
  250 }
  251 
  252 #ifdef notyet
  253 /*
  254  * Not implemented, map a PCIe device to the SMMU it is associated with.
  255  */
  256 int
  257 acpi_iort_map_smmu(u_int seg, u_int devid, void **smmu, u_int *sid)
  258 {
  259         /* XXX: convert oref to SMMU device */
  260         return (ENXIO);
  261 }
  262 #endif
  263 
  264 /*
  265  * Allocate memory for a node, initialize and copy mappings. 'start'
  266  * argument provides the table start used to calculate the node offset.
  267  */
  268 static void
  269 iort_copy_data(struct iort_node *node, ACPI_IORT_NODE *node_entry)
  270 {
  271         ACPI_IORT_ID_MAPPING *map_entry;
  272         struct iort_map_entry *mapping;
  273         int i;
  274 
  275         map_entry = ACPI_ADD_PTR(ACPI_IORT_ID_MAPPING, node_entry,
  276             node_entry->MappingOffset);
  277         node->nentries = node_entry->MappingCount;
  278         node->usecount = 0;
  279         mapping = malloc(sizeof(*mapping) * node->nentries, M_DEVBUF,
  280             M_WAITOK | M_ZERO);
  281         node->entries.mappings = mapping;
  282         for (i = 0; i < node->nentries; i++, mapping++, map_entry++) {
  283                 mapping->base = map_entry->InputBase;
  284                 /*
  285                  * IdCount means "The number of IDs in the range minus one" (ARM DEN 0049D).
  286                  * We use <= for comparison against this field, so don't add one here.
  287                  */
  288                 mapping->end = map_entry->InputBase + map_entry->IdCount;
  289                 mapping->outbase = map_entry->OutputBase;
  290                 mapping->out_node_offset = map_entry->OutputReference;
  291                 mapping->flags = map_entry->Flags;
  292                 mapping->out_node = NULL;
  293         }
  294 }
  295 
  296 /*
  297  * Allocate and copy an ITS group.
  298  */
  299 static void
  300 iort_copy_its(struct iort_node *node, ACPI_IORT_NODE *node_entry)
  301 {
  302         struct iort_its_entry *its;
  303         ACPI_IORT_ITS_GROUP *itsg_entry;
  304         UINT32 *id;
  305         int i;
  306 
  307         itsg_entry = (ACPI_IORT_ITS_GROUP *)node_entry->NodeData;
  308         node->nentries = itsg_entry->ItsCount;
  309         node->usecount = 0;
  310         its = malloc(sizeof(*its) * node->nentries, M_DEVBUF, M_WAITOK | M_ZERO);
  311         node->entries.its = its;
  312         id = &itsg_entry->Identifiers[0];
  313         for (i = 0; i < node->nentries; i++, its++, id++) {
  314                 its->its_id = *id;
  315                 its->pxm = -1;
  316                 its->xref = 0;
  317         }
  318 }
  319 
  320 /*
  321  * Walk the IORT table and add nodes to corresponding list.
  322  */
  323 static void
  324 iort_add_nodes(ACPI_IORT_NODE *node_entry, u_int node_offset)
  325 {
  326         ACPI_IORT_ROOT_COMPLEX *pci_rc;
  327         ACPI_IORT_SMMU *smmu;
  328         ACPI_IORT_SMMU_V3 *smmu_v3;
  329         ACPI_IORT_NAMED_COMPONENT *named_comp;
  330         struct iort_node *node;
  331 
  332         node = malloc(sizeof(*node), M_DEVBUF, M_WAITOK | M_ZERO);
  333         node->type =  node_entry->Type;
  334         node->node_offset = node_offset;
  335         node->revision = node_entry->Revision;
  336 
  337         /* copy nodes depending on type */
  338         switch(node_entry->Type) {
  339         case ACPI_IORT_NODE_PCI_ROOT_COMPLEX:
  340                 pci_rc = (ACPI_IORT_ROOT_COMPLEX *)node_entry->NodeData;
  341                 memcpy(&node->data.pci_rc, pci_rc, sizeof(*pci_rc));
  342                 iort_copy_data(node, node_entry);
  343                 TAILQ_INSERT_TAIL(&pci_nodes, node, next);
  344                 break;
  345         case ACPI_IORT_NODE_SMMU:
  346                 smmu = (ACPI_IORT_SMMU *)node_entry->NodeData;
  347                 memcpy(&node->data.smmu, smmu, sizeof(*smmu));
  348                 iort_copy_data(node, node_entry);
  349                 TAILQ_INSERT_TAIL(&smmu_nodes, node, next);
  350                 break;
  351         case ACPI_IORT_NODE_SMMU_V3:
  352                 smmu_v3 = (ACPI_IORT_SMMU_V3 *)node_entry->NodeData;
  353                 memcpy(&node->data.smmu_v3, smmu_v3, sizeof(*smmu_v3));
  354                 iort_copy_data(node, node_entry);
  355                 TAILQ_INSERT_TAIL(&smmu_nodes, node, next);
  356                 break;
  357         case ACPI_IORT_NODE_ITS_GROUP:
  358                 iort_copy_its(node, node_entry);
  359                 TAILQ_INSERT_TAIL(&its_groups, node, next);
  360                 break;
  361         case ACPI_IORT_NODE_NAMED_COMPONENT:
  362                 named_comp = (ACPI_IORT_NAMED_COMPONENT *)node_entry->NodeData;
  363                 memcpy(&node->data.named_comp, named_comp, sizeof(*named_comp));
  364 
  365                 /* Copy name of the node separately. */
  366                 strncpy(node->data.named_comp.DeviceName,
  367                     named_comp->DeviceName,
  368                     sizeof(node->data.named_comp.DeviceName));
  369                 node->data.named_comp.DeviceName[31] = 0;
  370 
  371                 iort_copy_data(node, node_entry);
  372                 TAILQ_INSERT_TAIL(&named_nodes, node, next);
  373                 break;
  374         default:
  375                 printf("ACPI: IORT: Dropping unhandled type %u\n",
  376                     node_entry->Type);
  377                 free(node, M_DEVBUF);
  378                 break;
  379         }
  380 }
  381 
  382 /*
  383  * For the mapping entry given, walk thru all the possible destination
  384  * nodes and resolve the output reference.
  385  */
  386 static void
  387 iort_resolve_node(struct iort_map_entry *entry, int check_smmu)
  388 {
  389         struct iort_node *node, *np;
  390 
  391         node = NULL;
  392         if (check_smmu) {
  393                 TAILQ_FOREACH(np, &smmu_nodes, next) {
  394                         if (entry->out_node_offset == np->node_offset) {
  395                                 node = np;
  396                                 break;
  397                         }
  398                 }
  399         }
  400         if (node == NULL) {
  401                 TAILQ_FOREACH(np, &its_groups, next) {
  402                         if (entry->out_node_offset == np->node_offset) {
  403                                 node = np;
  404                                 break;
  405                         }
  406                 }
  407         }
  408         if (node != NULL) {
  409                 node->usecount++;
  410                 entry->out_node = node;
  411         } else {
  412                 printf("ACPI: IORT: Firmware Bug: no mapping for node %u\n",
  413                     entry->out_node_offset);
  414         }
  415 }
  416 
  417 /*
  418  * Resolve all output node references to node pointers.
  419  */
  420 static void
  421 iort_post_process_mappings(void)
  422 {
  423         struct iort_node *node;
  424         int i;
  425 
  426         TAILQ_FOREACH(node, &pci_nodes, next)
  427                 for (i = 0; i < node->nentries; i++)
  428                         iort_resolve_node(&node->entries.mappings[i], TRUE);
  429         TAILQ_FOREACH(node, &smmu_nodes, next)
  430                 for (i = 0; i < node->nentries; i++)
  431                         iort_resolve_node(&node->entries.mappings[i], FALSE);
  432         TAILQ_FOREACH(node, &named_nodes, next)
  433                 for (i = 0; i < node->nentries; i++)
  434                         iort_resolve_node(&node->entries.mappings[i], TRUE);
  435 }
  436 
  437 /*
  438  * Walk MADT table, assign PIC xrefs to all ITS entries.
  439  */
  440 static void
  441 madt_resolve_its_xref(ACPI_SUBTABLE_HEADER *entry, void *arg)
  442 {
  443         ACPI_MADT_GENERIC_TRANSLATOR *gict;
  444         struct iort_node *its_node;
  445         struct iort_its_entry *its_entry;
  446         u_int xref;
  447         int i, matches;
  448 
  449         if (entry->Type != ACPI_MADT_TYPE_GENERIC_TRANSLATOR)
  450                 return;
  451 
  452         gict = (ACPI_MADT_GENERIC_TRANSLATOR *)entry;
  453         matches = 0;
  454         xref = acpi_its_xref++;
  455         TAILQ_FOREACH(its_node, &its_groups, next) {
  456                 its_entry = its_node->entries.its;
  457                 for (i = 0; i < its_node->nentries; i++, its_entry++) {
  458                         if (its_entry->its_id == gict->TranslationId) {
  459                                 its_entry->xref = xref;
  460                                 matches++;
  461                         }
  462                 }
  463         }
  464         if (matches == 0)
  465                 printf("ACPI: IORT: Unused ITS block, ID %u\n",
  466                     gict->TranslationId);
  467 }
  468 
  469 /*
  470  * Walk SRAT, assign proximity to all ITS entries.
  471  */
  472 static void
  473 srat_resolve_its_pxm(ACPI_SUBTABLE_HEADER *entry, void *arg)
  474 {
  475         ACPI_SRAT_GIC_ITS_AFFINITY *gicits;
  476         struct iort_node *its_node;
  477         struct iort_its_entry *its_entry;
  478         int *map_counts;
  479         int i, matches, dom;
  480 
  481         if (entry->Type != ACPI_SRAT_TYPE_GIC_ITS_AFFINITY)
  482                 return;
  483 
  484         matches = 0;
  485         map_counts = arg;
  486         gicits = (ACPI_SRAT_GIC_ITS_AFFINITY *)entry;
  487         dom = acpi_map_pxm_to_vm_domainid(gicits->ProximityDomain);
  488 
  489         /*
  490          * Catch firmware and config errors. map_counts keeps a
  491          * count of ProximityDomain values mapping to a domain ID
  492          */
  493 #if MAXMEMDOM > 1
  494         if (dom == -1)
  495                 printf("Firmware Error: Proximity Domain %d could not be"
  496                     " mapped for GIC ITS ID %d!\n",
  497                     gicits->ProximityDomain, gicits->ItsId);
  498 #endif
  499         /* use dom + 1 as index to handle the case where dom == -1 */
  500         i = ++map_counts[dom + 1];
  501         if (i > 1) {
  502 #ifdef NUMA
  503                 if (dom != -1)
  504                         printf("ERROR: Multiple Proximity Domains map to the"
  505                             " same NUMA domain %d!\n", dom);
  506 #else
  507                 printf("WARNING: multiple Proximity Domains in SRAT but NUMA"
  508                     " NOT enabled!\n");
  509 #endif
  510         }
  511         TAILQ_FOREACH(its_node, &its_groups, next) {
  512                 its_entry = its_node->entries.its;
  513                 for (i = 0; i < its_node->nentries; i++, its_entry++) {
  514                         if (its_entry->its_id == gicits->ItsId) {
  515                                 its_entry->pxm = dom;
  516                                 matches++;
  517                         }
  518                 }
  519         }
  520         if (matches == 0)
  521                 printf("ACPI: IORT: ITS block %u in SRAT not found in IORT!\n",
  522                     gicits->ItsId);
  523 }
  524 
  525 /*
  526  * Cross check the ITS Id with MADT and (if available) SRAT.
  527  */
  528 static int
  529 iort_post_process_its(void)
  530 {
  531         ACPI_TABLE_MADT *madt;
  532         ACPI_TABLE_SRAT *srat;
  533         vm_paddr_t madt_pa, srat_pa;
  534         int map_counts[MAXMEMDOM + 1] = { 0 };
  535 
  536         /* Check ITS block in MADT */
  537         madt_pa = acpi_find_table(ACPI_SIG_MADT);
  538         KASSERT(madt_pa != 0, ("no MADT!"));
  539         madt = acpi_map_table(madt_pa, ACPI_SIG_MADT);
  540         KASSERT(madt != NULL, ("can't map MADT!"));
  541         acpi_walk_subtables(madt + 1, (char *)madt + madt->Header.Length,
  542             madt_resolve_its_xref, NULL);
  543         acpi_unmap_table(madt);
  544 
  545         /* Get proximtiy if available */
  546         srat_pa = acpi_find_table(ACPI_SIG_SRAT);
  547         if (srat_pa != 0) {
  548                 srat = acpi_map_table(srat_pa, ACPI_SIG_SRAT);
  549                 KASSERT(srat != NULL, ("can't map SRAT!"));
  550                 acpi_walk_subtables(srat + 1, (char *)srat + srat->Header.Length,
  551                     srat_resolve_its_pxm, map_counts);
  552                 acpi_unmap_table(srat);
  553         }
  554         return (0);
  555 }
  556 
  557 /*
  558  * Find, parse, and save IO Remapping Table ("IORT").
  559  */
  560 static int
  561 acpi_parse_iort(void *dummy __unused)
  562 {
  563         ACPI_TABLE_IORT *iort;
  564         ACPI_IORT_NODE *node_entry;
  565         vm_paddr_t iort_pa;
  566         u_int node_offset;
  567 
  568         iort_pa = acpi_find_table(ACPI_SIG_IORT);
  569         if (iort_pa == 0)
  570                 return (ENXIO);
  571 
  572         iort = acpi_map_table(iort_pa, ACPI_SIG_IORT);
  573         if (iort == NULL) {
  574                 printf("ACPI: Unable to map the IORT table!\n");
  575                 return (ENXIO);
  576         }
  577         for (node_offset = iort->NodeOffset;
  578             node_offset < iort->Header.Length;
  579             node_offset += node_entry->Length) {
  580                 node_entry = ACPI_ADD_PTR(ACPI_IORT_NODE, iort, node_offset);
  581                 iort_add_nodes(node_entry, node_offset);
  582         }
  583         acpi_unmap_table(iort);
  584         iort_post_process_mappings();
  585         iort_post_process_its();
  586         return (0);
  587 }
  588 SYSINIT(acpi_parse_iort, SI_SUB_DRIVERS, SI_ORDER_FIRST, acpi_parse_iort, NULL);
  589 
  590 /*
  591  * Provide ITS ID to PIC xref mapping.
  592  */
  593 int
  594 acpi_iort_its_lookup(u_int its_id, u_int *xref, int *pxm)
  595 {
  596         struct iort_node *its_node;
  597         struct iort_its_entry *its_entry;
  598         int i;
  599 
  600         TAILQ_FOREACH(its_node, &its_groups, next) {
  601                 its_entry = its_node->entries.its;
  602                 for  (i = 0; i < its_node->nentries; i++, its_entry++) {
  603                         if (its_entry->its_id == its_id) {
  604                                 *xref = its_entry->xref;
  605                                 *pxm = its_entry->pxm;
  606                                 return (0);
  607                         }
  608                 }
  609         }
  610         return (ENOENT);
  611 }
  612 
  613 /*
  614  * Find mapping for a PCIe device given segment and device ID
  615  * returns the XREF for MSI interrupt setup and the device ID to
  616  * use for the interrupt setup
  617  */
  618 int
  619 acpi_iort_map_pci_msi(u_int seg, u_int rid, u_int *xref, u_int *devid)
  620 {
  621         struct iort_node *node;
  622 
  623         node = iort_pci_rc_map(seg, rid, ACPI_IORT_NODE_ITS_GROUP, devid);
  624         if (node == NULL)
  625                 return (ENOENT);
  626 
  627         /* This should be an ITS node */
  628         KASSERT(node->type == ACPI_IORT_NODE_ITS_GROUP, ("bad group"));
  629 
  630         /* return first node, we don't handle more than that now. */
  631         *xref = node->entries.its[0].xref;
  632         return (0);
  633 }
  634 
  635 int
  636 acpi_iort_map_pci_smmuv3(u_int seg, u_int rid, u_int *xref, u_int *sid)
  637 {
  638         ACPI_IORT_SMMU_V3 *smmu;
  639         struct iort_node *node;
  640 
  641         node = iort_pci_rc_map(seg, rid, ACPI_IORT_NODE_SMMU_V3, sid);
  642         if (node == NULL)
  643                 return (ENOENT);
  644 
  645         /* This should be an SMMU node. */
  646         KASSERT(node->type == ACPI_IORT_NODE_SMMU_V3, ("bad node"));
  647 
  648         smmu = (ACPI_IORT_SMMU_V3 *)&node->data.smmu_v3;
  649         *xref = smmu->BaseAddress;
  650 
  651         return (0);
  652 }
  653 
  654 /*
  655  * Finds mapping for a named node given name and resource ID and returns the
  656  * XREF for MSI interrupt setup and the device ID to use for the interrupt setup.
  657  */
  658 int
  659 acpi_iort_map_named_msi(const char *devname, u_int rid, u_int *xref,
  660     u_int *devid)
  661 {
  662         struct iort_node *node;
  663 
  664         node = iort_named_comp_map(devname, rid, ACPI_IORT_NODE_ITS_GROUP,
  665             devid);
  666         if (node == NULL)
  667                 return (ENOENT);
  668 
  669         /* This should be an ITS node */
  670         KASSERT(node->type == ACPI_IORT_NODE_ITS_GROUP, ("bad group"));
  671 
  672         /* Return first node, we don't handle more than that now. */
  673         *xref = node->entries.its[0].xref;
  674         return (0);
  675 }
  676 
  677 int
  678 acpi_iort_map_named_smmuv3(const char *devname, u_int rid, u_int *xref,
  679     u_int *devid)
  680 {
  681         ACPI_IORT_SMMU_V3 *smmu;
  682         struct iort_node *node;
  683 
  684         node = iort_named_comp_map(devname, rid, ACPI_IORT_NODE_SMMU_V3, devid);
  685         if (node == NULL)
  686                 return (ENOENT);
  687 
  688         /* This should be an SMMU node. */
  689         KASSERT(node->type == ACPI_IORT_NODE_SMMU_V3, ("bad node"));
  690 
  691         smmu = (ACPI_IORT_SMMU_V3 *)&node->data.smmu_v3;
  692         *xref = smmu->BaseAddress;
  693 
  694         return (0);
  695 }

Cache object: 0dfca7079813cb0cedcd14a853f77245


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.