The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/arm64/iommu/smmu.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * SPDX-License-Identifier: BSD-2-Clause
    3  *
    4  * Copyright (c) 2019-2020 Ruslan Bukin <br@bsdpad.com>
    5  *
    6  * This software was developed by SRI International and the University of
    7  * Cambridge Computer Laboratory (Department of Computer Science and
    8  * Technology) under DARPA contract HR0011-18-C-0016 ("ECATS"), as part of the
    9  * DARPA SSITH research programme.
   10  *
   11  * Redistribution and use in source and binary forms, with or without
   12  * modification, are permitted provided that the following conditions
   13  * are met:
   14  * 1. Redistributions of source code must retain the above copyright
   15  *    notice, this list of conditions and the following disclaimer.
   16  * 2. Redistributions in binary form must reproduce the above copyright
   17  *    notice, this list of conditions and the following disclaimer in the
   18  *    documentation and/or other materials provided with the distribution.
   19  *
   20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   30  * SUCH DAMAGE.
   31  */
   32 
   33 /*
   34  * Hardware overview.
   35  *
   36  * An incoming transaction from a peripheral device has an address, size,
   37  * attributes and StreamID.
   38  *
   39  * In case of PCI-based devices, StreamID is a PCI rid.
   40  *
   41  * The StreamID is used to select a Stream Table Entry (STE) in a Stream table,
   42  * which contains per-device configuration.
   43  *
   44  * Stream table is a linear or 2-level walk table (this driver supports both).
   45  * Note that a linear table could occupy 1GB or more of memory depending on
   46  * sid_bits value.
   47  *
   48  * STE is used to locate a Context Descriptor, which is a struct in memory
   49  * that describes stages of translation, translation table type, pointer to
   50  * level 0 of page tables, ASID, etc.
   51  *
   52  * Hardware supports two stages of translation: Stage1 (S1) and Stage2 (S2):
   53  *  o S1 is used for the host machine traffic translation
   54  *  o S2 is for a hypervisor
   55  *
   56  * This driver enables S1 stage with standard AArch64 page tables.
   57  *
   58  * Note that SMMU does not share TLB with a main CPU.
   59  * Command queue is used by this driver to Invalidate SMMU TLB, STE cache.
   60  *
   61  * An arm64 SoC could have more than one SMMU instance.
   62  * ACPI IORT table describes which SMMU unit is assigned for a particular
   63  * peripheral device.
   64  *
   65  * Queues.
   66  *
   67  * Register interface and Memory-based circular buffer queues are used
   68  * to inferface SMMU.
   69  *
   70  * These are a Command queue for commands to send to the SMMU and an Event
   71  * queue for event/fault reports from the SMMU. Optionally PRI queue is
   72  * designed for PCIe page requests reception.
   73  *
   74  * Note that not every hardware supports PRI services. For instance they were
   75  * not found in Neoverse N1 SDP machine.
   76  * (This drivers does not implement PRI queue.)
   77  *
   78  * All SMMU queues are arranged as circular buffers in memory. They are used
   79  * in a producer-consumer fashion so that an output queue contains data
   80  * produced by the SMMU and consumed by software.
   81  * An input queue contains data produced by software, consumed by the SMMU.
   82  *
   83  * Interrupts.
   84  *
   85  * Interrupts are not required by this driver for normal operation.
   86  * The standard wired interrupt is only triggered when an event comes from
   87  * the SMMU, which is only in a case of errors (e.g. translation fault).
   88  */
   89 
   90 #include "opt_platform.h"
   91 #include "opt_acpi.h"
   92 
   93 #include <sys/cdefs.h>
   94 __FBSDID("$FreeBSD$");
   95 
   96 #include <sys/param.h>
   97 #include <sys/bitstring.h>
   98 #include <sys/bus.h>
   99 #include <sys/kernel.h>
  100 #include <sys/malloc.h>
  101 #include <sys/mutex.h>
  102 #include <sys/rman.h>
  103 #include <sys/lock.h>
  104 #include <sys/sysctl.h>
  105 #include <sys/tree.h>
  106 #include <sys/taskqueue.h>
  107 #include <vm/vm.h>
  108 #include <vm/vm_page.h>
  109 #ifdef DEV_ACPI
  110 #include <contrib/dev/acpica/include/acpi.h>
  111 #include <dev/acpica/acpivar.h>
  112 #endif
  113 #include <dev/pci/pcireg.h>
  114 #include <dev/pci/pcivar.h>
  115 #include <dev/iommu/iommu.h>
  116 #include <arm64/iommu/iommu_pmap.h>
  117 
  118 #include <machine/bus.h>
  119 
  120 #ifdef FDT
  121 #include <dev/fdt/fdt_common.h>
  122 #include <dev/ofw/ofw_bus.h>
  123 #include <dev/ofw/ofw_bus_subr.h>
  124 #endif
  125 
  126 #include "iommu.h"
  127 #include "iommu_if.h"
  128 
  129 #include "smmureg.h"
  130 #include "smmuvar.h"
  131 
  132 #define STRTAB_L1_SZ_SHIFT      20
  133 #define STRTAB_SPLIT            8
  134 
  135 #define STRTAB_L1_DESC_L2PTR_M  (0x3fffffffffff << 6)
  136 #define STRTAB_L1_DESC_DWORDS   1
  137 
  138 #define STRTAB_STE_DWORDS       8
  139 
  140 #define CMDQ_ENTRY_DWORDS       2
  141 #define EVTQ_ENTRY_DWORDS       4
  142 #define PRIQ_ENTRY_DWORDS       2
  143 
  144 #define CD_DWORDS               8
  145 
  146 #define Q_WRP(q, p)             ((p) & (1 << (q)->size_log2))
  147 #define Q_IDX(q, p)             ((p) & ((1 << (q)->size_log2) - 1))
  148 #define Q_OVF(p)                ((p) & (1 << 31)) /* Event queue overflowed */
  149 
  150 #define SMMU_Q_ALIGN            (64 * 1024)
  151 
  152 #define         MAXADDR_48BIT   0xFFFFFFFFFFFFUL
  153 #define         MAXADDR_52BIT   0xFFFFFFFFFFFFFUL
  154 
  155 static struct resource_spec smmu_spec[] = {
  156         { SYS_RES_MEMORY, 0, RF_ACTIVE },
  157         { SYS_RES_IRQ, 0, RF_ACTIVE },
  158         { SYS_RES_IRQ, 1, RF_ACTIVE | RF_OPTIONAL },
  159         { SYS_RES_IRQ, 2, RF_ACTIVE },
  160         { SYS_RES_IRQ, 3, RF_ACTIVE },
  161         RESOURCE_SPEC_END
  162 };
  163 
  164 MALLOC_DEFINE(M_SMMU, "SMMU", SMMU_DEVSTR);
  165 
  166 #define dprintf(fmt, ...)
  167 
  168 struct smmu_event {
  169         int ident;
  170         char *str;
  171         char *msg;
  172 };
  173 
  174 static struct smmu_event events[] = {
  175         { 0x01, "F_UUT",
  176                 "Unsupported Upstream Transaction."},
  177         { 0x02, "C_BAD_STREAMID",
  178                 "Transaction StreamID out of range."},
  179         { 0x03, "F_STE_FETCH",
  180                 "Fetch of STE caused external abort."},
  181         { 0x04, "C_BAD_STE",
  182                 "Used STE invalid."},
  183         { 0x05, "F_BAD_ATS_TREQ",
  184                 "Address Translation Request disallowed for a StreamID "
  185                 "and a PCIe ATS Translation Request received."},
  186         { 0x06, "F_STREAM_DISABLED",
  187                 "The STE of a transaction marks non-substream transactions "
  188                 "disabled."},
  189         { 0x07, "F_TRANSL_FORBIDDEN",
  190                 "An incoming PCIe transaction is marked Translated but "
  191                 "SMMU bypass is disallowed for this StreamID."},
  192         { 0x08, "C_BAD_SUBSTREAMID",
  193                 "Incoming SubstreamID present, but configuration is invalid."},
  194         { 0x09, "F_CD_FETCH",
  195                 "Fetch of CD caused external abort."},
  196         { 0x0a, "C_BAD_CD",
  197                 "Fetched CD invalid."},
  198         { 0x0b, "F_WALK_EABT",
  199                 "An external abort occurred fetching (or updating) "
  200                 "a translation table descriptor."},
  201         { 0x10, "F_TRANSLATION",
  202                 "Translation fault."},
  203         { 0x11, "F_ADDR_SIZE",
  204                 "Address Size fault."},
  205         { 0x12, "F_ACCESS",
  206                 "Access flag fault due to AF == 0 in a page or block TTD."},
  207         { 0x13, "F_PERMISSION",
  208                 "Permission fault occurred on page access."},
  209         { 0x20, "F_TLB_CONFLICT",
  210                 "A TLB conflict occurred because of the transaction."},
  211         { 0x21, "F_CFG_CONFLICT",
  212                 "A configuration cache conflict occurred due to "
  213                 "the transaction."},
  214         { 0x24, "E_PAGE_REQUEST",
  215                 "Speculative page request hint."},
  216         { 0x25, "F_VMS_FETCH",
  217                 "Fetch of VMS caused external abort."},
  218         { 0, NULL, NULL },
  219 };
  220 
  221 static int
  222 smmu_q_has_space(struct smmu_queue *q)
  223 {
  224 
  225         /*
  226          * See 6.3.27 SMMU_CMDQ_PROD
  227          *
  228          * There is space in the queue for additional commands if:
  229          *  SMMU_CMDQ_CONS.RD != SMMU_CMDQ_PROD.WR ||
  230          *  SMMU_CMDQ_CONS.RD_WRAP == SMMU_CMDQ_PROD.WR_WRAP
  231          */
  232 
  233         if (Q_IDX(q, q->lc.cons) != Q_IDX(q, q->lc.prod) ||
  234             Q_WRP(q, q->lc.cons) == Q_WRP(q, q->lc.prod))
  235                 return (1);
  236 
  237         return (0);
  238 }
  239 
  240 static int
  241 smmu_q_empty(struct smmu_queue *q)
  242 {
  243 
  244         if (Q_IDX(q, q->lc.cons) == Q_IDX(q, q->lc.prod) &&
  245             Q_WRP(q, q->lc.cons) == Q_WRP(q, q->lc.prod))
  246                 return (1);
  247 
  248         return (0);
  249 }
  250 
  251 static int __unused
  252 smmu_q_consumed(struct smmu_queue *q, uint32_t prod)
  253 {
  254 
  255         if ((Q_WRP(q, q->lc.cons) == Q_WRP(q, prod)) &&
  256             (Q_IDX(q, q->lc.cons) >= Q_IDX(q, prod)))
  257                 return (1);
  258 
  259         if ((Q_WRP(q, q->lc.cons) != Q_WRP(q, prod)) &&
  260             (Q_IDX(q, q->lc.cons) <= Q_IDX(q, prod)))
  261                 return (1);
  262 
  263         return (0);
  264 }
  265 
  266 static uint32_t
  267 smmu_q_inc_cons(struct smmu_queue *q)
  268 {
  269         uint32_t cons;
  270         uint32_t val;
  271 
  272         cons = (Q_WRP(q, q->lc.cons) | Q_IDX(q, q->lc.cons)) + 1;
  273         val = (Q_OVF(q->lc.cons) | Q_WRP(q, cons) | Q_IDX(q, cons));
  274 
  275         return (val);
  276 }
  277 
  278 static uint32_t
  279 smmu_q_inc_prod(struct smmu_queue *q)
  280 {
  281         uint32_t prod;
  282         uint32_t val;
  283 
  284         prod = (Q_WRP(q, q->lc.prod) | Q_IDX(q, q->lc.prod)) + 1;
  285         val = (Q_OVF(q->lc.prod) | Q_WRP(q, prod) | Q_IDX(q, prod));
  286 
  287         return (val);
  288 }
  289 
  290 static int
  291 smmu_write_ack(struct smmu_softc *sc, uint32_t reg,
  292     uint32_t reg_ack, uint32_t val)
  293 {
  294         uint32_t v;
  295         int timeout;
  296 
  297         timeout = 100000;
  298 
  299         bus_write_4(sc->res[0], reg, val);
  300 
  301         do {
  302                 v = bus_read_4(sc->res[0], reg_ack);
  303                 if (v == val)
  304                         break;
  305         } while (timeout--);
  306 
  307         if (timeout <= 0) {
  308                 device_printf(sc->dev, "Failed to write reg.\n");
  309                 return (-1);
  310         }
  311 
  312         return (0);
  313 }
  314 
  315 static inline int
  316 ilog2(long x)
  317 {
  318 
  319         KASSERT(x > 0 && powerof2(x), ("%s: invalid arg %ld", __func__, x));
  320 
  321         return (flsl(x) - 1);
  322 }
  323 
  324 static int
  325 smmu_init_queue(struct smmu_softc *sc, struct smmu_queue *q,
  326     uint32_t prod_off, uint32_t cons_off, uint32_t dwords)
  327 {
  328         int sz;
  329 
  330         sz = (1 << q->size_log2) * dwords * 8;
  331 
  332         /* Set up the command circular buffer */
  333         q->vaddr = contigmalloc(sz, M_SMMU,
  334             M_WAITOK | M_ZERO, 0, (1ul << 48) - 1, SMMU_Q_ALIGN, 0);
  335         if (q->vaddr == NULL) {
  336                 device_printf(sc->dev, "failed to allocate %d bytes\n", sz);
  337                 return (-1);
  338         }
  339 
  340         q->prod_off = prod_off;
  341         q->cons_off = cons_off;
  342         q->paddr = vtophys(q->vaddr);
  343 
  344         q->base = CMDQ_BASE_RA | EVENTQ_BASE_WA | PRIQ_BASE_WA;
  345         q->base |= q->paddr & Q_BASE_ADDR_M;
  346         q->base |= q->size_log2 << Q_LOG2SIZE_S;
  347 
  348         return (0);
  349 }
  350 
  351 static int
  352 smmu_init_queues(struct smmu_softc *sc)
  353 {
  354         int err;
  355 
  356         /* Command queue. */
  357         err = smmu_init_queue(sc, &sc->cmdq,
  358             SMMU_CMDQ_PROD, SMMU_CMDQ_CONS, CMDQ_ENTRY_DWORDS);
  359         if (err)
  360                 return (ENXIO);
  361 
  362         /* Event queue. */
  363         err = smmu_init_queue(sc, &sc->evtq,
  364             SMMU_EVENTQ_PROD, SMMU_EVENTQ_CONS, EVTQ_ENTRY_DWORDS);
  365         if (err)
  366                 return (ENXIO);
  367 
  368         if (!(sc->features & SMMU_FEATURE_PRI))
  369                 return (0);
  370 
  371         /* PRI queue. */
  372         err = smmu_init_queue(sc, &sc->priq,
  373             SMMU_PRIQ_PROD, SMMU_PRIQ_CONS, PRIQ_ENTRY_DWORDS);
  374         if (err)
  375                 return (ENXIO);
  376 
  377         return (0);
  378 }
  379 
  380 /*
  381  * Dump 2LVL or linear STE.
  382  */
  383 static void
  384 smmu_dump_ste(struct smmu_softc *sc, int sid)
  385 {
  386         struct smmu_strtab *strtab;
  387         struct l1_desc *l1_desc;
  388         uint64_t *ste, *l1;
  389         int i;
  390 
  391         strtab = &sc->strtab;
  392 
  393         if (sc->features & SMMU_FEATURE_2_LVL_STREAM_TABLE) {
  394                 i = sid >> STRTAB_SPLIT;
  395                 l1 = (void *)((uint64_t)strtab->vaddr +
  396                     STRTAB_L1_DESC_DWORDS * 8 * i);
  397                 device_printf(sc->dev, "L1 ste == %lx\n", l1[0]);
  398 
  399                 l1_desc = &strtab->l1[i];
  400                 ste = l1_desc->va;
  401                 if (ste == NULL) /* L2 is not initialized */
  402                         return;
  403         } else {
  404                 ste = (void *)((uint64_t)strtab->vaddr +
  405                     sid * (STRTAB_STE_DWORDS << 3));
  406         }
  407 
  408         /* Dump L2 or linear STE. */
  409         for (i = 0; i < STRTAB_STE_DWORDS; i++)
  410                 device_printf(sc->dev, "ste[%d] == %lx\n", i, ste[i]);
  411 }
  412 
  413 static void __unused
  414 smmu_dump_cd(struct smmu_softc *sc, struct smmu_cd *cd)
  415 {
  416         uint64_t *vaddr;
  417         int i;
  418 
  419         device_printf(sc->dev, "%s\n", __func__);
  420 
  421         vaddr = cd->vaddr;
  422         for (i = 0; i < CD_DWORDS; i++)
  423                 device_printf(sc->dev, "cd[%d] == %lx\n", i, vaddr[i]);
  424 }
  425 
  426 static void
  427 smmu_evtq_dequeue(struct smmu_softc *sc, uint32_t *evt)
  428 {
  429         struct smmu_queue *evtq;
  430         void *entry_addr;
  431 
  432         evtq = &sc->evtq;
  433 
  434         evtq->lc.val = bus_read_8(sc->res[0], evtq->prod_off);
  435         entry_addr = (void *)((uint64_t)evtq->vaddr +
  436             evtq->lc.cons * EVTQ_ENTRY_DWORDS * 8);
  437         memcpy(evt, entry_addr, EVTQ_ENTRY_DWORDS * 8);
  438         evtq->lc.cons = smmu_q_inc_cons(evtq);
  439         bus_write_4(sc->res[0], evtq->cons_off, evtq->lc.cons);
  440 }
  441 
  442 static void
  443 smmu_print_event(struct smmu_softc *sc, uint32_t *evt)
  444 {
  445         struct smmu_event *ev;
  446         uintptr_t input_addr;
  447         uint8_t event_id;
  448         device_t dev;
  449         int sid;
  450         int i;
  451 
  452         dev = sc->dev;
  453 
  454         ev = NULL;
  455         event_id = evt[0] & 0xff;
  456         for (i = 0; events[i].ident != 0; i++) {
  457                 if (events[i].ident == event_id) {
  458                         ev = &events[i];
  459                         break;
  460                 }
  461         }
  462 
  463         sid = evt[1];
  464         input_addr = evt[5];
  465         input_addr <<= 32;
  466         input_addr |= evt[4];
  467 
  468         if (smmu_quirks_check(dev, sid, event_id, input_addr)) {
  469                 /* The event is known. Don't print anything. */
  470                 return;
  471         }
  472 
  473         if (ev) {
  474                 device_printf(sc->dev,
  475                     "Event %s (%s) received.\n", ev->str, ev->msg);
  476         } else
  477                 device_printf(sc->dev, "Event 0x%x received\n", event_id);
  478 
  479         device_printf(sc->dev, "SID %x, Input Address: %jx\n",
  480             sid, input_addr);
  481 
  482         for (i = 0; i < 8; i++)
  483                 device_printf(sc->dev, "evt[%d] %x\n", i, evt[i]);
  484 
  485         smmu_dump_ste(sc, sid);
  486 }
  487 
  488 static void
  489 make_cmd(struct smmu_softc *sc, uint64_t *cmd,
  490     struct smmu_cmdq_entry *entry)
  491 {
  492 
  493         memset(cmd, 0, CMDQ_ENTRY_DWORDS * 8);
  494         cmd[0] = entry->opcode << CMD_QUEUE_OPCODE_S;
  495 
  496         switch (entry->opcode) {
  497         case CMD_TLBI_NH_VA:
  498                 cmd[0] |= (uint64_t)entry->tlbi.asid << TLBI_0_ASID_S;
  499                 cmd[1] = entry->tlbi.addr & TLBI_1_ADDR_M;
  500                 if (entry->tlbi.leaf) {
  501                         /*
  502                          * Leaf flag means that only cached entries
  503                          * for the last level of translation table walk
  504                          * are required to be invalidated.
  505                          */
  506                         cmd[1] |= TLBI_1_LEAF;
  507                 }
  508                 break;
  509         case CMD_TLBI_NH_ASID:
  510                 cmd[0] |= (uint64_t)entry->tlbi.asid << TLBI_0_ASID_S;
  511                 break;
  512         case CMD_TLBI_NSNH_ALL:
  513         case CMD_TLBI_NH_ALL:
  514         case CMD_TLBI_EL2_ALL:
  515                 break;
  516         case CMD_CFGI_CD:
  517                 cmd[0] |= ((uint64_t)entry->cfgi.ssid << CFGI_0_SSID_S);
  518                 /* FALLTROUGH */
  519         case CMD_CFGI_STE:
  520                 cmd[0] |= ((uint64_t)entry->cfgi.sid << CFGI_0_STE_SID_S);
  521                 cmd[1] |= ((uint64_t)entry->cfgi.leaf << CFGI_1_LEAF_S);
  522                 break;
  523         case CMD_CFGI_STE_RANGE:
  524                 cmd[1] = (31 << CFGI_1_STE_RANGE_S);
  525                 break;
  526         case CMD_SYNC:
  527                 cmd[0] |= SYNC_0_MSH_IS | SYNC_0_MSIATTR_OIWB;
  528                 if (entry->sync.msiaddr) {
  529                         cmd[0] |= SYNC_0_CS_SIG_IRQ;
  530                         cmd[1] |= (entry->sync.msiaddr & SYNC_1_MSIADDRESS_M);
  531                 } else
  532                         cmd[0] |= SYNC_0_CS_SIG_SEV;
  533                 break;
  534         case CMD_PREFETCH_CONFIG:
  535                 cmd[0] |= ((uint64_t)entry->prefetch.sid << PREFETCH_0_SID_S);
  536                 break;
  537         };
  538 }
  539 
  540 static void
  541 smmu_cmdq_enqueue_cmd(struct smmu_softc *sc, struct smmu_cmdq_entry *entry)
  542 {
  543         uint64_t cmd[CMDQ_ENTRY_DWORDS];
  544         struct smmu_queue *cmdq;
  545         void *entry_addr;
  546 
  547         cmdq = &sc->cmdq;
  548 
  549         make_cmd(sc, cmd, entry);
  550 
  551         SMMU_LOCK(sc);
  552 
  553         /* Ensure that a space is available. */
  554         do {
  555                 cmdq->lc.cons = bus_read_4(sc->res[0], cmdq->cons_off);
  556         } while (smmu_q_has_space(cmdq) == 0);
  557 
  558         /* Write the command to the current prod entry. */
  559         entry_addr = (void *)((uint64_t)cmdq->vaddr +
  560             Q_IDX(cmdq, cmdq->lc.prod) * CMDQ_ENTRY_DWORDS * 8);
  561         memcpy(entry_addr, cmd, CMDQ_ENTRY_DWORDS * 8);
  562 
  563         /* Increment prod index. */
  564         cmdq->lc.prod = smmu_q_inc_prod(cmdq);
  565         bus_write_4(sc->res[0], cmdq->prod_off, cmdq->lc.prod);
  566 
  567         SMMU_UNLOCK(sc);
  568 }
  569 
  570 static void __unused
  571 smmu_poll_until_consumed(struct smmu_softc *sc, struct smmu_queue *q)
  572 {
  573 
  574         while (1) {
  575                 q->lc.val = bus_read_8(sc->res[0], q->prod_off);
  576                 if (smmu_q_empty(q))
  577                         break;
  578                 cpu_spinwait();
  579         }
  580 }
  581 
  582 static int
  583 smmu_sync(struct smmu_softc *sc)
  584 {
  585         struct smmu_cmdq_entry cmd;
  586         struct smmu_queue *q;
  587         uint32_t *base;
  588         int timeout;
  589         int prod;
  590 
  591         q = &sc->cmdq;
  592         prod = q->lc.prod;
  593 
  594         /* Enqueue sync command. */
  595         cmd.opcode = CMD_SYNC;
  596         cmd.sync.msiaddr = q->paddr + Q_IDX(q, prod) * CMDQ_ENTRY_DWORDS * 8;
  597         smmu_cmdq_enqueue_cmd(sc, &cmd);
  598 
  599         /* Wait for the sync completion. */
  600         base = (void *)((uint64_t)q->vaddr +
  601             Q_IDX(q, prod) * CMDQ_ENTRY_DWORDS * 8);
  602 
  603         /*
  604          * It takes around 200 loops (6 instructions each)
  605          * on Neoverse N1 to complete the sync.
  606          */
  607         timeout = 10000;
  608 
  609         do {
  610                 if (*base == 0) {
  611                         /* MSI write completed. */
  612                         break;
  613                 }
  614                 cpu_spinwait();
  615         } while (timeout--);
  616 
  617         if (timeout < 0)
  618                 device_printf(sc->dev, "Failed to sync\n");
  619 
  620         return (0);
  621 }
  622 
  623 static int
  624 smmu_sync_cd(struct smmu_softc *sc, int sid, int ssid, bool leaf)
  625 {
  626         struct smmu_cmdq_entry cmd;
  627 
  628         cmd.opcode = CMD_CFGI_CD;
  629         cmd.cfgi.sid = sid;
  630         cmd.cfgi.ssid = ssid;
  631         cmd.cfgi.leaf = leaf;
  632         smmu_cmdq_enqueue_cmd(sc, &cmd);
  633 
  634         return (0);
  635 }
  636 
  637 static void
  638 smmu_invalidate_all_sid(struct smmu_softc *sc)
  639 {
  640         struct smmu_cmdq_entry cmd;
  641 
  642         /* Invalidate cached config */
  643         cmd.opcode = CMD_CFGI_STE_RANGE;
  644         smmu_cmdq_enqueue_cmd(sc, &cmd);
  645         smmu_sync(sc);
  646 }
  647 
  648 static void
  649 smmu_tlbi_all(struct smmu_softc *sc)
  650 {
  651         struct smmu_cmdq_entry cmd;
  652 
  653         /* Invalidate entire TLB */
  654         cmd.opcode = CMD_TLBI_NSNH_ALL;
  655         smmu_cmdq_enqueue_cmd(sc, &cmd);
  656         smmu_sync(sc);
  657 }
  658 
  659 static void
  660 smmu_tlbi_asid(struct smmu_softc *sc, uint16_t asid)
  661 {
  662         struct smmu_cmdq_entry cmd;
  663 
  664         /* Invalidate TLB for an ASID. */
  665         cmd.opcode = CMD_TLBI_NH_ASID;
  666         cmd.tlbi.asid = asid;
  667         smmu_cmdq_enqueue_cmd(sc, &cmd);
  668         smmu_sync(sc);
  669 }
  670 
  671 static void
  672 smmu_tlbi_va(struct smmu_softc *sc, vm_offset_t va, uint16_t asid)
  673 {
  674         struct smmu_cmdq_entry cmd;
  675 
  676         /* Invalidate specific range */
  677         cmd.opcode = CMD_TLBI_NH_VA;
  678         cmd.tlbi.asid = asid;
  679         cmd.tlbi.vmid = 0;
  680         cmd.tlbi.leaf = true; /* We change only L3. */
  681         cmd.tlbi.addr = va;
  682         smmu_cmdq_enqueue_cmd(sc, &cmd);
  683 }
  684 
  685 static void
  686 smmu_invalidate_sid(struct smmu_softc *sc, uint32_t sid)
  687 {
  688         struct smmu_cmdq_entry cmd;
  689 
  690         /* Invalidate cached config */
  691         cmd.opcode = CMD_CFGI_STE;
  692         cmd.cfgi.sid = sid;
  693         smmu_cmdq_enqueue_cmd(sc, &cmd);
  694         smmu_sync(sc);
  695 }
  696 
  697 static void
  698 smmu_prefetch_sid(struct smmu_softc *sc, uint32_t sid)
  699 {
  700         struct smmu_cmdq_entry cmd;
  701 
  702         cmd.opcode = CMD_PREFETCH_CONFIG;
  703         cmd.prefetch.sid = sid;
  704         smmu_cmdq_enqueue_cmd(sc, &cmd);
  705         smmu_sync(sc);
  706 }
  707 
  708 /*
  709  * Init STE in bypass mode. Traffic is not translated for the sid.
  710  */
  711 static void
  712 smmu_init_ste_bypass(struct smmu_softc *sc, uint32_t sid, uint64_t *ste)
  713 {
  714         uint64_t val;
  715 
  716         val = STE0_VALID | STE0_CONFIG_BYPASS;
  717 
  718         ste[1] = STE1_SHCFG_INCOMING | STE1_EATS_FULLATS;
  719         ste[2] = 0;
  720         ste[3] = 0;
  721         ste[4] = 0;
  722         ste[5] = 0;
  723         ste[6] = 0;
  724         ste[7] = 0;
  725 
  726         smmu_invalidate_sid(sc, sid);
  727         ste[0] = val;
  728         dsb(sy);
  729         smmu_invalidate_sid(sc, sid);
  730 
  731         smmu_prefetch_sid(sc, sid);
  732 }
  733 
  734 /*
  735  * Enable Stage1 (S1) translation for the sid.
  736  */
  737 static int
  738 smmu_init_ste_s1(struct smmu_softc *sc, struct smmu_cd *cd,
  739     uint32_t sid, uint64_t *ste)
  740 {
  741         uint64_t val;
  742 
  743         val = STE0_VALID;
  744 
  745         /* S1 */
  746         ste[1] = STE1_EATS_FULLATS      |
  747                  STE1_S1CSH_IS          |
  748                  STE1_S1CIR_WBRA        |
  749                  STE1_S1COR_WBRA        |
  750                  STE1_STRW_NS_EL1;
  751         ste[2] = 0;
  752         ste[3] = 0;
  753         ste[4] = 0;
  754         ste[5] = 0;
  755         ste[6] = 0;
  756         ste[7] = 0;
  757 
  758         if (sc->features & SMMU_FEATURE_STALL &&
  759             ((sc->features & SMMU_FEATURE_STALL_FORCE) == 0))
  760                 ste[1] |= STE1_S1STALLD;
  761 
  762         /* Configure STE */
  763         val |= (cd->paddr & STE0_S1CONTEXTPTR_M);
  764         val |= STE0_CONFIG_S1_TRANS;
  765 
  766         smmu_invalidate_sid(sc, sid);
  767 
  768         /* The STE[0] has to be written in a single blast, last of all. */
  769         ste[0] = val;
  770         dsb(sy);
  771 
  772         smmu_invalidate_sid(sc, sid);
  773         smmu_sync_cd(sc, sid, 0, true);
  774         smmu_invalidate_sid(sc, sid);
  775 
  776         /* The sid will be used soon most likely. */
  777         smmu_prefetch_sid(sc, sid);
  778 
  779         return (0);
  780 }
  781 
  782 static uint64_t *
  783 smmu_get_ste_addr(struct smmu_softc *sc, int sid)
  784 {
  785         struct smmu_strtab *strtab;
  786         struct l1_desc *l1_desc;
  787         uint64_t *addr;
  788 
  789         strtab = &sc->strtab;
  790 
  791         if (sc->features & SMMU_FEATURE_2_LVL_STREAM_TABLE) {
  792                 l1_desc = &strtab->l1[sid >> STRTAB_SPLIT];
  793                 addr = l1_desc->va;
  794                 addr += (sid & ((1 << STRTAB_SPLIT) - 1)) * STRTAB_STE_DWORDS;
  795         } else {
  796                 addr = (void *)((uint64_t)strtab->vaddr +
  797                     STRTAB_STE_DWORDS * 8 * sid);
  798         };
  799 
  800         return (addr);
  801 }
  802 
  803 static int
  804 smmu_init_ste(struct smmu_softc *sc, struct smmu_cd *cd, int sid, bool bypass)
  805 {
  806         uint64_t *addr;
  807 
  808         addr = smmu_get_ste_addr(sc, sid);
  809 
  810         if (bypass)
  811                 smmu_init_ste_bypass(sc, sid, addr);
  812         else
  813                 smmu_init_ste_s1(sc, cd, sid, addr);
  814 
  815         smmu_sync(sc);
  816 
  817         return (0);
  818 }
  819 
  820 static void
  821 smmu_deinit_ste(struct smmu_softc *sc, int sid)
  822 {
  823         uint64_t *ste;
  824 
  825         ste = smmu_get_ste_addr(sc, sid);
  826         ste[0] = 0;
  827 
  828         smmu_invalidate_sid(sc, sid);
  829         smmu_sync_cd(sc, sid, 0, true);
  830         smmu_invalidate_sid(sc, sid);
  831 
  832         smmu_sync(sc);
  833 }
  834 
  835 static int
  836 smmu_init_cd(struct smmu_softc *sc, struct smmu_domain *domain)
  837 {
  838         vm_paddr_t paddr;
  839         uint64_t *ptr;
  840         uint64_t val;
  841         vm_size_t size;
  842         struct smmu_cd *cd;
  843         pmap_t p;
  844 
  845         size = 1 * (CD_DWORDS << 3);
  846 
  847         p = &domain->p;
  848         cd = domain->cd = malloc(sizeof(struct smmu_cd),
  849             M_SMMU, M_WAITOK | M_ZERO);
  850 
  851         cd->vaddr = contigmalloc(size, M_SMMU,
  852             M_WAITOK | M_ZERO,  /* flags */
  853             0,                  /* low */
  854             (1ul << 40) - 1,    /* high */
  855             size,               /* alignment */
  856             0);                 /* boundary */
  857         if (cd->vaddr == NULL) {
  858                 device_printf(sc->dev, "Failed to allocate CD\n");
  859                 return (ENXIO);
  860         }
  861 
  862         cd->size = size;
  863         cd->paddr = vtophys(cd->vaddr);
  864 
  865         ptr = cd->vaddr;
  866 
  867         val = CD0_VALID;
  868         val |= CD0_AA64;
  869         val |= CD0_R;
  870         val |= CD0_A;
  871         val |= CD0_ASET;
  872         val |= (uint64_t)domain->asid << CD0_ASID_S;
  873         val |= CD0_TG0_4KB;
  874         val |= CD0_EPD1; /* Disable TT1 */
  875         val |= ((64 - sc->ias) << CD0_T0SZ_S);
  876         val |= CD0_IPS_48BITS;
  877 
  878         paddr = p->pm_l0_paddr & CD1_TTB0_M;
  879         KASSERT(paddr == p->pm_l0_paddr, ("bad allocation 1"));
  880 
  881         ptr[1] = paddr;
  882         ptr[2] = 0;
  883         ptr[3] = MAIR_ATTR(MAIR_DEVICE_nGnRnE, VM_MEMATTR_DEVICE)       |
  884                  MAIR_ATTR(MAIR_NORMAL_NC, VM_MEMATTR_UNCACHEABLE)      |
  885                  MAIR_ATTR(MAIR_NORMAL_WB, VM_MEMATTR_WRITE_BACK)       |
  886                  MAIR_ATTR(MAIR_NORMAL_WT, VM_MEMATTR_WRITE_THROUGH);
  887 
  888         /* Install the CD. */
  889         ptr[0] = val;
  890 
  891         return (0);
  892 }
  893 
  894 static int
  895 smmu_init_strtab_linear(struct smmu_softc *sc)
  896 {
  897         struct smmu_strtab *strtab;
  898         vm_paddr_t base;
  899         uint32_t size;
  900         uint64_t reg;
  901 
  902         strtab = &sc->strtab;
  903         strtab->num_l1_entries = (1 << sc->sid_bits);
  904 
  905         size = strtab->num_l1_entries * (STRTAB_STE_DWORDS << 3);
  906 
  907         if (bootverbose)
  908                 device_printf(sc->dev,
  909                     "%s: linear strtab size %d, num_l1_entries %d\n",
  910                     __func__, size, strtab->num_l1_entries);
  911 
  912         strtab->vaddr = contigmalloc(size, M_SMMU,
  913             M_WAITOK | M_ZERO,  /* flags */
  914             0,                  /* low */
  915             (1ul << 48) - 1,    /* high */
  916             size,               /* alignment */
  917             0);                 /* boundary */
  918         if (strtab->vaddr == NULL) {
  919                 device_printf(sc->dev, "failed to allocate strtab\n");
  920                 return (ENXIO);
  921         }
  922 
  923         reg = STRTAB_BASE_CFG_FMT_LINEAR;
  924         reg |= sc->sid_bits << STRTAB_BASE_CFG_LOG2SIZE_S;
  925         strtab->base_cfg = (uint32_t)reg;
  926 
  927         base = vtophys(strtab->vaddr);
  928 
  929         reg = base & STRTAB_BASE_ADDR_M;
  930         KASSERT(reg == base, ("bad allocation 2"));
  931         reg |= STRTAB_BASE_RA;
  932         strtab->base = reg;
  933 
  934         return (0);
  935 }
  936 
  937 static int
  938 smmu_init_strtab_2lvl(struct smmu_softc *sc)
  939 {
  940         struct smmu_strtab *strtab;
  941         vm_paddr_t base;
  942         uint64_t reg_base;
  943         uint32_t l1size;
  944         uint32_t size;
  945         uint32_t reg;
  946         int sz;
  947 
  948         strtab = &sc->strtab;
  949 
  950         size = STRTAB_L1_SZ_SHIFT - (ilog2(STRTAB_L1_DESC_DWORDS) + 3);
  951         size = min(size, sc->sid_bits - STRTAB_SPLIT);
  952         strtab->num_l1_entries = (1 << size);
  953         size += STRTAB_SPLIT;
  954 
  955         l1size = strtab->num_l1_entries * (STRTAB_L1_DESC_DWORDS << 3);
  956 
  957         if (bootverbose)
  958                 device_printf(sc->dev,
  959                     "%s: size %d, l1 entries %d, l1size %d\n",
  960                     __func__, size, strtab->num_l1_entries, l1size);
  961 
  962         strtab->vaddr = contigmalloc(l1size, M_SMMU,
  963             M_WAITOK | M_ZERO,  /* flags */
  964             0,                  /* low */
  965             (1ul << 48) - 1,    /* high */
  966             l1size,             /* alignment */
  967             0);                 /* boundary */
  968         if (strtab->vaddr == NULL) {
  969                 device_printf(sc->dev, "Failed to allocate 2lvl strtab.\n");
  970                 return (ENOMEM);
  971         }
  972 
  973         sz = strtab->num_l1_entries * sizeof(struct l1_desc);
  974 
  975         strtab->l1 = malloc(sz, M_SMMU, M_WAITOK | M_ZERO);
  976         if (strtab->l1 == NULL) {
  977                 contigfree(strtab->vaddr, l1size, M_SMMU);
  978                 return (ENOMEM);
  979         }
  980 
  981         reg = STRTAB_BASE_CFG_FMT_2LVL;
  982         reg |= size << STRTAB_BASE_CFG_LOG2SIZE_S;
  983         reg |= STRTAB_SPLIT << STRTAB_BASE_CFG_SPLIT_S;
  984         strtab->base_cfg = (uint32_t)reg;
  985 
  986         base = vtophys(strtab->vaddr);
  987 
  988         reg_base = base & STRTAB_BASE_ADDR_M;
  989         KASSERT(reg_base == base, ("bad allocation 3"));
  990         reg_base |= STRTAB_BASE_RA;
  991         strtab->base = reg_base;
  992 
  993         return (0);
  994 }
  995 
  996 static int
  997 smmu_init_strtab(struct smmu_softc *sc)
  998 {
  999         int error;
 1000 
 1001         if (sc->features & SMMU_FEATURE_2_LVL_STREAM_TABLE)
 1002                 error = smmu_init_strtab_2lvl(sc);
 1003         else
 1004                 error = smmu_init_strtab_linear(sc);
 1005 
 1006         return (error);
 1007 }
 1008 
 1009 static int
 1010 smmu_init_l1_entry(struct smmu_softc *sc, int sid)
 1011 {
 1012         struct smmu_strtab *strtab;
 1013         struct l1_desc *l1_desc;
 1014         uint64_t *addr;
 1015         uint64_t val;
 1016         size_t size;
 1017         int i;
 1018 
 1019         strtab = &sc->strtab;
 1020         l1_desc = &strtab->l1[sid >> STRTAB_SPLIT];
 1021         if (l1_desc->va) {
 1022                 /* Already allocated. */
 1023                 return (0);
 1024         }
 1025 
 1026         size = 1 << (STRTAB_SPLIT + ilog2(STRTAB_STE_DWORDS) + 3);
 1027 
 1028         l1_desc->span = STRTAB_SPLIT + 1;
 1029         l1_desc->size = size;
 1030         l1_desc->va = contigmalloc(size, M_SMMU,
 1031             M_WAITOK | M_ZERO,  /* flags */
 1032             0,                  /* low */
 1033             (1ul << 48) - 1,    /* high */
 1034             size,               /* alignment */
 1035             0);                 /* boundary */
 1036         if (l1_desc->va == NULL) {
 1037                 device_printf(sc->dev, "failed to allocate l2 entry\n");
 1038                 return (ENXIO);
 1039         }
 1040 
 1041         l1_desc->pa = vtophys(l1_desc->va);
 1042 
 1043         i = sid >> STRTAB_SPLIT;
 1044         addr = (void *)((uint64_t)strtab->vaddr +
 1045             STRTAB_L1_DESC_DWORDS * 8 * i);
 1046 
 1047         /* Install the L1 entry. */
 1048         val = l1_desc->pa & STRTAB_L1_DESC_L2PTR_M;
 1049         KASSERT(val == l1_desc->pa, ("bad allocation 4"));
 1050         val |= l1_desc->span;
 1051         *addr = val;
 1052 
 1053         return (0);
 1054 }
 1055 
 1056 static void __unused
 1057 smmu_deinit_l1_entry(struct smmu_softc *sc, int sid)
 1058 {
 1059         struct smmu_strtab *strtab;
 1060         struct l1_desc *l1_desc;
 1061         uint64_t *addr;
 1062         int i;
 1063 
 1064         strtab = &sc->strtab;
 1065 
 1066         i = sid >> STRTAB_SPLIT;
 1067         addr = (void *)((uint64_t)strtab->vaddr +
 1068             STRTAB_L1_DESC_DWORDS * 8 * i);
 1069         *addr = 0;
 1070 
 1071         l1_desc = &strtab->l1[sid >> STRTAB_SPLIT];
 1072         contigfree(l1_desc->va, l1_desc->size, M_SMMU);
 1073 }
 1074 
 1075 static int
 1076 smmu_disable(struct smmu_softc *sc)
 1077 {
 1078         uint32_t reg;
 1079         int error;
 1080 
 1081         /* Disable SMMU */
 1082         reg = bus_read_4(sc->res[0], SMMU_CR0);
 1083         reg &= ~CR0_SMMUEN;
 1084         error = smmu_write_ack(sc, SMMU_CR0, SMMU_CR0ACK, reg);
 1085         if (error)
 1086                 device_printf(sc->dev, "Could not disable SMMU.\n");
 1087 
 1088         return (0);
 1089 }
 1090 
 1091 static int
 1092 smmu_event_intr(void *arg)
 1093 {
 1094         uint32_t evt[EVTQ_ENTRY_DWORDS * 2];
 1095         struct smmu_softc *sc;
 1096 
 1097         sc = arg;
 1098 
 1099         do {
 1100                 smmu_evtq_dequeue(sc, evt);
 1101                 smmu_print_event(sc, evt);
 1102         } while (!smmu_q_empty(&sc->evtq));
 1103 
 1104         return (FILTER_HANDLED);
 1105 }
 1106 
 1107 static int __unused
 1108 smmu_sync_intr(void *arg)
 1109 {
 1110         struct smmu_softc *sc;
 1111 
 1112         sc = arg;
 1113 
 1114         device_printf(sc->dev, "%s\n", __func__);
 1115 
 1116         return (FILTER_HANDLED);
 1117 }
 1118 
 1119 static int
 1120 smmu_gerr_intr(void *arg)
 1121 {
 1122         struct smmu_softc *sc;
 1123 
 1124         sc = arg;
 1125 
 1126         device_printf(sc->dev, "SMMU Global Error\n");
 1127 
 1128         return (FILTER_HANDLED);
 1129 }
 1130 
 1131 static int
 1132 smmu_enable_interrupts(struct smmu_softc *sc)
 1133 {
 1134         uint32_t reg;
 1135         int error;
 1136 
 1137         /* Disable MSI. */
 1138         bus_write_8(sc->res[0], SMMU_GERROR_IRQ_CFG0, 0);
 1139         bus_write_4(sc->res[0], SMMU_GERROR_IRQ_CFG1, 0);
 1140         bus_write_4(sc->res[0], SMMU_GERROR_IRQ_CFG2, 0);
 1141 
 1142         bus_write_8(sc->res[0], SMMU_EVENTQ_IRQ_CFG0, 0);
 1143         bus_write_4(sc->res[0], SMMU_EVENTQ_IRQ_CFG1, 0);
 1144         bus_write_4(sc->res[0], SMMU_EVENTQ_IRQ_CFG2, 0);
 1145 
 1146         if (sc->features & CR0_PRIQEN) {
 1147                 bus_write_8(sc->res[0], SMMU_PRIQ_IRQ_CFG0, 0);
 1148                 bus_write_4(sc->res[0], SMMU_PRIQ_IRQ_CFG1, 0);
 1149                 bus_write_4(sc->res[0], SMMU_PRIQ_IRQ_CFG2, 0);
 1150         }
 1151 
 1152         /* Disable any interrupts. */
 1153         error = smmu_write_ack(sc, SMMU_IRQ_CTRL, SMMU_IRQ_CTRLACK, 0);
 1154         if (error) {
 1155                 device_printf(sc->dev, "Could not disable interrupts.\n");
 1156                 return (ENXIO);
 1157         }
 1158 
 1159         /* Enable interrupts. */
 1160         reg = IRQ_CTRL_EVENTQ_IRQEN | IRQ_CTRL_GERROR_IRQEN;
 1161         if (sc->features & SMMU_FEATURE_PRI)
 1162                 reg |= IRQ_CTRL_PRIQ_IRQEN;
 1163 
 1164         error = smmu_write_ack(sc, SMMU_IRQ_CTRL, SMMU_IRQ_CTRLACK, reg);
 1165         if (error) {
 1166                 device_printf(sc->dev, "Could not enable interrupts.\n");
 1167                 return (ENXIO);
 1168         }
 1169 
 1170         return (0);
 1171 }
 1172 
 1173 #ifdef DEV_ACPI
 1174 static void
 1175 smmu_configure_intr(struct smmu_softc *sc, struct resource *res)
 1176 {
 1177         struct intr_map_data_acpi *ad;
 1178         struct intr_map_data *data;
 1179 
 1180         data = rman_get_virtual(res);
 1181         KASSERT(data != NULL, ("data is NULL"));
 1182 
 1183         if (data->type == INTR_MAP_DATA_ACPI) {
 1184                 ad = (struct intr_map_data_acpi *)data;
 1185                 ad->trig = INTR_TRIGGER_EDGE;
 1186                 ad->pol = INTR_POLARITY_HIGH;
 1187         }
 1188 }
 1189 #endif
 1190 
 1191 static int
 1192 smmu_setup_interrupts(struct smmu_softc *sc)
 1193 {
 1194         device_t dev;
 1195         int error;
 1196 
 1197         dev = sc->dev;
 1198 
 1199 #ifdef DEV_ACPI
 1200         /*
 1201          * Configure SMMU interrupts as EDGE triggered manually
 1202          * as ACPI tables carries no information for that.
 1203          */
 1204         smmu_configure_intr(sc, sc->res[1]);
 1205         /* PRIQ is not in use. */
 1206         smmu_configure_intr(sc, sc->res[3]);
 1207         smmu_configure_intr(sc, sc->res[4]);
 1208 #endif
 1209 
 1210         error = bus_setup_intr(dev, sc->res[1], INTR_TYPE_MISC,
 1211             smmu_event_intr, NULL, sc, &sc->intr_cookie[0]);
 1212         if (error) {
 1213                 device_printf(dev, "Couldn't setup Event interrupt handler\n");
 1214                 return (ENXIO);
 1215         }
 1216 
 1217         error = bus_setup_intr(dev, sc->res[4], INTR_TYPE_MISC,
 1218             smmu_gerr_intr, NULL, sc, &sc->intr_cookie[2]);
 1219         if (error) {
 1220                 device_printf(dev, "Couldn't setup Gerr interrupt handler\n");
 1221                 return (ENXIO);
 1222         }
 1223 
 1224         return (0);
 1225 }
 1226 
 1227 static int
 1228 smmu_reset(struct smmu_softc *sc)
 1229 {
 1230         struct smmu_cmdq_entry cmd;
 1231         struct smmu_strtab *strtab;
 1232         int error;
 1233         int reg;
 1234 
 1235         reg = bus_read_4(sc->res[0], SMMU_CR0);
 1236 
 1237         if (reg & CR0_SMMUEN)
 1238                 device_printf(sc->dev,
 1239                     "%s: Warning: SMMU is enabled\n", __func__);
 1240 
 1241         error = smmu_disable(sc);
 1242         if (error)
 1243                 device_printf(sc->dev,
 1244                     "%s: Could not disable SMMU.\n", __func__);
 1245 
 1246         if (smmu_enable_interrupts(sc) != 0) {
 1247                 device_printf(sc->dev, "Could not enable interrupts.\n");
 1248                 return (ENXIO);
 1249         }
 1250 
 1251         reg = CR1_TABLE_SH_IS   |
 1252               CR1_TABLE_OC_WBC  |
 1253               CR1_TABLE_IC_WBC  |
 1254               CR1_QUEUE_SH_IS   |
 1255               CR1_QUEUE_OC_WBC  |
 1256               CR1_QUEUE_IC_WBC;
 1257         bus_write_4(sc->res[0], SMMU_CR1, reg);
 1258 
 1259         reg = CR2_PTM | CR2_RECINVSID | CR2_E2H;
 1260         bus_write_4(sc->res[0], SMMU_CR2, reg);
 1261 
 1262         /* Stream table. */
 1263         strtab = &sc->strtab;
 1264         bus_write_8(sc->res[0], SMMU_STRTAB_BASE, strtab->base);
 1265         bus_write_4(sc->res[0], SMMU_STRTAB_BASE_CFG, strtab->base_cfg);
 1266 
 1267         /* Command queue. */
 1268         bus_write_8(sc->res[0], SMMU_CMDQ_BASE, sc->cmdq.base);
 1269         bus_write_4(sc->res[0], SMMU_CMDQ_PROD, sc->cmdq.lc.prod);
 1270         bus_write_4(sc->res[0], SMMU_CMDQ_CONS, sc->cmdq.lc.cons);
 1271 
 1272         reg = CR0_CMDQEN;
 1273         error = smmu_write_ack(sc, SMMU_CR0, SMMU_CR0ACK, reg);
 1274         if (error) {
 1275                 device_printf(sc->dev, "Could not enable command queue\n");
 1276                 return (ENXIO);
 1277         }
 1278 
 1279         /* Invalidate cached configuration. */
 1280         smmu_invalidate_all_sid(sc);
 1281 
 1282         if (sc->features & SMMU_FEATURE_HYP) {
 1283                 cmd.opcode = CMD_TLBI_EL2_ALL;
 1284                 smmu_cmdq_enqueue_cmd(sc, &cmd);
 1285         };
 1286 
 1287         /* Invalidate TLB. */
 1288         smmu_tlbi_all(sc);
 1289 
 1290         /* Event queue */
 1291         bus_write_8(sc->res[0], SMMU_EVENTQ_BASE, sc->evtq.base);
 1292         bus_write_4(sc->res[0], SMMU_EVENTQ_PROD, sc->evtq.lc.prod);
 1293         bus_write_4(sc->res[0], SMMU_EVENTQ_CONS, sc->evtq.lc.cons);
 1294 
 1295         reg |= CR0_EVENTQEN;
 1296         error = smmu_write_ack(sc, SMMU_CR0, SMMU_CR0ACK, reg);
 1297         if (error) {
 1298                 device_printf(sc->dev, "Could not enable event queue\n");
 1299                 return (ENXIO);
 1300         }
 1301 
 1302         if (sc->features & SMMU_FEATURE_PRI) {
 1303                 /* PRI queue */
 1304                 bus_write_8(sc->res[0], SMMU_PRIQ_BASE, sc->priq.base);
 1305                 bus_write_4(sc->res[0], SMMU_PRIQ_PROD, sc->priq.lc.prod);
 1306                 bus_write_4(sc->res[0], SMMU_PRIQ_CONS, sc->priq.lc.cons);
 1307 
 1308                 reg |= CR0_PRIQEN;
 1309                 error = smmu_write_ack(sc, SMMU_CR0, SMMU_CR0ACK, reg);
 1310                 if (error) {
 1311                         device_printf(sc->dev, "Could not enable PRI queue\n");
 1312                         return (ENXIO);
 1313                 }
 1314         }
 1315 
 1316         if (sc->features & SMMU_FEATURE_ATS) {
 1317                 reg |= CR0_ATSCHK;
 1318                 error = smmu_write_ack(sc, SMMU_CR0, SMMU_CR0ACK, reg);
 1319                 if (error) {
 1320                         device_printf(sc->dev, "Could not enable ATS check.\n");
 1321                         return (ENXIO);
 1322                 }
 1323         }
 1324 
 1325         reg |= CR0_SMMUEN;
 1326         error = smmu_write_ack(sc, SMMU_CR0, SMMU_CR0ACK, reg);
 1327         if (error) {
 1328                 device_printf(sc->dev, "Could not enable SMMU.\n");
 1329                 return (ENXIO);
 1330         }
 1331 
 1332         return (0);
 1333 }
 1334 
 1335 static int
 1336 smmu_check_features(struct smmu_softc *sc)
 1337 {
 1338         uint32_t reg;
 1339         uint32_t val;
 1340 
 1341         sc->features = 0;
 1342 
 1343         reg = bus_read_4(sc->res[0], SMMU_IDR0);
 1344 
 1345         if (reg & IDR0_ST_LVL_2) {
 1346                 if (bootverbose)
 1347                         device_printf(sc->dev,
 1348                             "2-level stream table supported.\n");
 1349                 sc->features |= SMMU_FEATURE_2_LVL_STREAM_TABLE;
 1350         }
 1351 
 1352         if (reg & IDR0_CD2L) {
 1353                 if (bootverbose)
 1354                         device_printf(sc->dev,
 1355                             "2-level CD table supported.\n");
 1356                 sc->features |= SMMU_FEATURE_2_LVL_CD;
 1357         }
 1358 
 1359         switch (reg & IDR0_TTENDIAN_M) {
 1360         case IDR0_TTENDIAN_MIXED:
 1361                 if (bootverbose)
 1362                         device_printf(sc->dev, "Mixed endianess supported.\n");
 1363                 sc->features |= SMMU_FEATURE_TT_LE;
 1364                 sc->features |= SMMU_FEATURE_TT_BE;
 1365                 break;
 1366         case IDR0_TTENDIAN_LITTLE:
 1367                 if (bootverbose)
 1368                         device_printf(sc->dev,
 1369                             "Little endian supported only.\n");
 1370                 sc->features |= SMMU_FEATURE_TT_LE;
 1371                 break;
 1372         case IDR0_TTENDIAN_BIG:
 1373                 if (bootverbose)
 1374                         device_printf(sc->dev, "Big endian supported only.\n");
 1375                 sc->features |= SMMU_FEATURE_TT_BE;
 1376                 break;
 1377         default:
 1378                 device_printf(sc->dev, "Unsupported endianness.\n");
 1379                 return (ENXIO);
 1380         }
 1381 
 1382         if (reg & IDR0_SEV)
 1383                 sc->features |= SMMU_FEATURE_SEV;
 1384 
 1385         if (reg & IDR0_MSI) {
 1386                 if (bootverbose)
 1387                         device_printf(sc->dev, "MSI feature present.\n");
 1388                 sc->features |= SMMU_FEATURE_MSI;
 1389         }
 1390 
 1391         if (reg & IDR0_HYP) {
 1392                 if (bootverbose)
 1393                         device_printf(sc->dev, "HYP feature present.\n");
 1394                 sc->features |= SMMU_FEATURE_HYP;
 1395         }
 1396 
 1397         if (reg & IDR0_ATS)
 1398                 sc->features |= SMMU_FEATURE_ATS;
 1399 
 1400         if (reg & IDR0_PRI)
 1401                 sc->features |= SMMU_FEATURE_PRI;
 1402 
 1403         switch (reg & IDR0_STALL_MODEL_M) {
 1404         case IDR0_STALL_MODEL_FORCE:
 1405                 /* Stall is forced. */
 1406                 sc->features |= SMMU_FEATURE_STALL_FORCE;
 1407                 /* FALLTHROUGH */
 1408         case IDR0_STALL_MODEL_STALL:
 1409                 sc->features |= SMMU_FEATURE_STALL;
 1410                 break;
 1411         }
 1412 
 1413         /* Grab translation stages supported. */
 1414         if (reg & IDR0_S1P) {
 1415                 if (bootverbose)
 1416                         device_printf(sc->dev,
 1417                             "Stage 1 translation supported.\n");
 1418                 sc->features |= SMMU_FEATURE_S1P;
 1419         }
 1420         if (reg & IDR0_S2P) {
 1421                 if (bootverbose)
 1422                         device_printf(sc->dev,
 1423                             "Stage 2 translation supported.\n");
 1424                 sc->features |= SMMU_FEATURE_S2P;
 1425         }
 1426 
 1427         switch (reg & IDR0_TTF_M) {
 1428         case IDR0_TTF_ALL:
 1429         case IDR0_TTF_AA64:
 1430                 sc->ias = 40;
 1431                 break;
 1432         default:
 1433                 device_printf(sc->dev, "No AArch64 table format support.\n");
 1434                 return (ENXIO);
 1435         }
 1436 
 1437         if (reg & IDR0_ASID16)
 1438                 sc->asid_bits = 16;
 1439         else
 1440                 sc->asid_bits = 8;
 1441 
 1442         if (bootverbose)
 1443                 device_printf(sc->dev, "ASID bits %d\n", sc->asid_bits);
 1444 
 1445         if (reg & IDR0_VMID16)
 1446                 sc->vmid_bits = 16;
 1447         else
 1448                 sc->vmid_bits = 8;
 1449 
 1450         reg = bus_read_4(sc->res[0], SMMU_IDR1);
 1451 
 1452         if (reg & (IDR1_TABLES_PRESET | IDR1_QUEUES_PRESET | IDR1_REL)) {
 1453                 device_printf(sc->dev,
 1454                     "Embedded implementations not supported by this driver.\n");
 1455                 return (ENXIO);
 1456         }
 1457 
 1458         val = (reg & IDR1_CMDQS_M) >> IDR1_CMDQS_S;
 1459         sc->cmdq.size_log2 = val;
 1460         if (bootverbose)
 1461                 device_printf(sc->dev, "CMD queue bits %d\n", val);
 1462 
 1463         val = (reg & IDR1_EVENTQS_M) >> IDR1_EVENTQS_S;
 1464         sc->evtq.size_log2 = val;
 1465         if (bootverbose)
 1466                 device_printf(sc->dev, "EVENT queue bits %d\n", val);
 1467 
 1468         if (sc->features & SMMU_FEATURE_PRI) {
 1469                 val = (reg & IDR1_PRIQS_M) >> IDR1_PRIQS_S;
 1470                 sc->priq.size_log2 = val;
 1471                 if (bootverbose)
 1472                         device_printf(sc->dev, "PRI queue bits %d\n", val);
 1473         }
 1474 
 1475         sc->ssid_bits = (reg & IDR1_SSIDSIZE_M) >> IDR1_SSIDSIZE_S;
 1476         sc->sid_bits = (reg & IDR1_SIDSIZE_M) >> IDR1_SIDSIZE_S;
 1477 
 1478         if (sc->sid_bits <= STRTAB_SPLIT)
 1479                 sc->features &= ~SMMU_FEATURE_2_LVL_STREAM_TABLE;
 1480 
 1481         if (bootverbose) {
 1482                 device_printf(sc->dev, "SSID bits %d\n", sc->ssid_bits);
 1483                 device_printf(sc->dev, "SID bits %d\n", sc->sid_bits);
 1484         }
 1485 
 1486         /* IDR3 */
 1487         reg = bus_read_4(sc->res[0], SMMU_IDR3);
 1488         if (reg & IDR3_RIL)
 1489                 sc->features |= SMMU_FEATURE_RANGE_INV;
 1490 
 1491         /* IDR5 */
 1492         reg = bus_read_4(sc->res[0], SMMU_IDR5);
 1493 
 1494         switch (reg & IDR5_OAS_M) {
 1495         case IDR5_OAS_32:
 1496                 sc->oas = 32;
 1497                 break;
 1498         case IDR5_OAS_36:
 1499                 sc->oas = 36;
 1500                 break;
 1501         case IDR5_OAS_40:
 1502                 sc->oas = 40;
 1503                 break;
 1504         case IDR5_OAS_42:
 1505                 sc->oas = 42;
 1506                 break;
 1507         case IDR5_OAS_44:
 1508                 sc->oas = 44;
 1509                 break;
 1510         case IDR5_OAS_48:
 1511                 sc->oas = 48;
 1512                 break;
 1513         case IDR5_OAS_52:
 1514                 sc->oas = 52;
 1515                 break;
 1516         }
 1517 
 1518         sc->pgsizes = 0;
 1519         if (reg & IDR5_GRAN64K)
 1520                 sc->pgsizes |= 64 * 1024;
 1521         if (reg & IDR5_GRAN16K)
 1522                 sc->pgsizes |= 16 * 1024;
 1523         if (reg & IDR5_GRAN4K)
 1524                 sc->pgsizes |= 4 * 1024;
 1525 
 1526         if ((reg & IDR5_VAX_M) == IDR5_VAX_52)
 1527                 sc->features |= SMMU_FEATURE_VAX;
 1528 
 1529         return (0);
 1530 }
 1531 
 1532 static void
 1533 smmu_init_asids(struct smmu_softc *sc)
 1534 {
 1535 
 1536         sc->asid_set_size = (1 << sc->asid_bits);
 1537         sc->asid_set = bit_alloc(sc->asid_set_size, M_SMMU, M_WAITOK);
 1538         mtx_init(&sc->asid_set_mutex, "asid set", NULL, MTX_SPIN);
 1539 }
 1540 
 1541 static int
 1542 smmu_asid_alloc(struct smmu_softc *sc, int *new_asid)
 1543 {
 1544 
 1545         mtx_lock_spin(&sc->asid_set_mutex);
 1546         bit_ffc(sc->asid_set, sc->asid_set_size, new_asid);
 1547         if (*new_asid == -1) {
 1548                 mtx_unlock_spin(&sc->asid_set_mutex);
 1549                 return (ENOMEM);
 1550         }
 1551         bit_set(sc->asid_set, *new_asid);
 1552         mtx_unlock_spin(&sc->asid_set_mutex);
 1553 
 1554         return (0);
 1555 }
 1556 
 1557 static void
 1558 smmu_asid_free(struct smmu_softc *sc, int asid)
 1559 {
 1560 
 1561         mtx_lock_spin(&sc->asid_set_mutex);
 1562         bit_clear(sc->asid_set, asid);
 1563         mtx_unlock_spin(&sc->asid_set_mutex);
 1564 }
 1565 
 1566 /*
 1567  * Device interface.
 1568  */
 1569 int
 1570 smmu_attach(device_t dev)
 1571 {
 1572         struct smmu_softc *sc;
 1573         int error;
 1574 
 1575         sc = device_get_softc(dev);
 1576         sc->dev = dev;
 1577 
 1578         mtx_init(&sc->sc_mtx, device_get_nameunit(sc->dev), "smmu", MTX_DEF);
 1579 
 1580         error = smmu_setup_interrupts(sc);
 1581         if (error) {
 1582                 bus_release_resources(dev, smmu_spec, sc->res);
 1583                 return (ENXIO);
 1584         }
 1585 
 1586         error = smmu_check_features(sc);
 1587         if (error) {
 1588                 device_printf(dev, "Some features are required "
 1589                     "but not supported by hardware.\n");
 1590                 return (ENXIO);
 1591         }
 1592 
 1593         smmu_init_asids(sc);
 1594 
 1595         error = smmu_init_queues(sc);
 1596         if (error) {
 1597                 device_printf(dev, "Couldn't allocate queues.\n");
 1598                 return (ENXIO);
 1599         }
 1600 
 1601         error = smmu_init_strtab(sc);
 1602         if (error) {
 1603                 device_printf(dev, "Couldn't allocate strtab.\n");
 1604                 return (ENXIO);
 1605         }
 1606 
 1607         error = smmu_reset(sc);
 1608         if (error) {
 1609                 device_printf(dev, "Couldn't reset SMMU.\n");
 1610                 return (ENXIO);
 1611         }
 1612 
 1613         return (0);
 1614 }
 1615 
 1616 int
 1617 smmu_detach(device_t dev)
 1618 {
 1619         struct smmu_softc *sc;
 1620 
 1621         sc = device_get_softc(dev);
 1622 
 1623         bus_release_resources(dev, smmu_spec, sc->res);
 1624 
 1625         return (0);
 1626 }
 1627 
 1628 static int
 1629 smmu_read_ivar(device_t dev, device_t child, int which, uintptr_t *result)
 1630 {
 1631         struct smmu_softc *sc;
 1632 
 1633         sc = device_get_softc(dev);
 1634 
 1635         device_printf(sc->dev, "%s\n", __func__);
 1636 
 1637         return (ENOENT);
 1638 }
 1639 
 1640 static int
 1641 smmu_unmap(device_t dev, struct iommu_domain *iodom,
 1642     vm_offset_t va, bus_size_t size)
 1643 {
 1644         struct smmu_domain *domain;
 1645         struct smmu_softc *sc;
 1646         int err;
 1647         int i;
 1648 
 1649         sc = device_get_softc(dev);
 1650 
 1651         domain = (struct smmu_domain *)iodom;
 1652 
 1653         err = 0;
 1654 
 1655         dprintf("%s: %lx, %ld, domain %d\n", __func__, va, size, domain->asid);
 1656 
 1657         for (i = 0; i < size; i += PAGE_SIZE) {
 1658                 if (pmap_smmu_remove(&domain->p, va) == 0) {
 1659                         /* pmap entry removed, invalidate TLB. */
 1660                         smmu_tlbi_va(sc, va, domain->asid);
 1661                 } else {
 1662                         err = ENOENT;
 1663                         break;
 1664                 }
 1665                 va += PAGE_SIZE;
 1666         }
 1667 
 1668         smmu_sync(sc);
 1669 
 1670         return (err);
 1671 }
 1672 
 1673 static int
 1674 smmu_map(device_t dev, struct iommu_domain *iodom,
 1675     vm_offset_t va, vm_page_t *ma, vm_size_t size,
 1676     vm_prot_t prot)
 1677 {
 1678         struct smmu_domain *domain;
 1679         struct smmu_softc *sc;
 1680         vm_paddr_t pa;
 1681         int error;
 1682         int i;
 1683 
 1684         sc = device_get_softc(dev);
 1685 
 1686         domain = (struct smmu_domain *)iodom;
 1687 
 1688         dprintf("%s: %lx -> %lx, %ld, domain %d\n", __func__, va, pa, size,
 1689             domain->asid);
 1690 
 1691         for (i = 0; size > 0; size -= PAGE_SIZE) {
 1692                 pa = VM_PAGE_TO_PHYS(ma[i++]);
 1693                 error = pmap_smmu_enter(&domain->p, va, pa, prot, 0);
 1694                 if (error)
 1695                         return (error);
 1696                 smmu_tlbi_va(sc, va, domain->asid);
 1697                 va += PAGE_SIZE;
 1698         }
 1699 
 1700         smmu_sync(sc);
 1701 
 1702         return (0);
 1703 }
 1704 
 1705 static struct iommu_domain *
 1706 smmu_domain_alloc(device_t dev, struct iommu_unit *iommu)
 1707 {
 1708         struct iommu_domain *iodom;
 1709         struct smmu_domain *domain;
 1710         struct smmu_unit *unit;
 1711         struct smmu_softc *sc;
 1712         int error;
 1713         int new_asid;
 1714 
 1715         sc = device_get_softc(dev);
 1716 
 1717         unit = (struct smmu_unit *)iommu;
 1718 
 1719         domain = malloc(sizeof(*domain), M_SMMU, M_WAITOK | M_ZERO);
 1720 
 1721         error = smmu_asid_alloc(sc, &new_asid);
 1722         if (error) {
 1723                 free(domain, M_SMMU);
 1724                 device_printf(sc->dev,
 1725                     "Could not allocate ASID for a new domain.\n");
 1726                 return (NULL);
 1727         }
 1728 
 1729         domain->asid = (uint16_t)new_asid;
 1730 
 1731         iommu_pmap_pinit(&domain->p);
 1732         PMAP_LOCK_INIT(&domain->p);
 1733 
 1734         error = smmu_init_cd(sc, domain);
 1735         if (error) {
 1736                 free(domain, M_SMMU);
 1737                 device_printf(sc->dev, "Could not initialize CD\n");
 1738                 return (NULL);
 1739         }
 1740 
 1741         smmu_tlbi_asid(sc, domain->asid);
 1742 
 1743         LIST_INIT(&domain->ctx_list);
 1744 
 1745         IOMMU_LOCK(iommu);
 1746         LIST_INSERT_HEAD(&unit->domain_list, domain, next);
 1747         IOMMU_UNLOCK(iommu);
 1748 
 1749         iodom = &domain->iodom;
 1750 
 1751         /*
 1752          * Use 48-bit address space regardless of VAX bit
 1753          * as we need 64k IOMMU_PAGE_SIZE for 52-bit space.
 1754          */
 1755         iodom->end = MAXADDR_48BIT;
 1756 
 1757         return (iodom);
 1758 }
 1759 
 1760 static void
 1761 smmu_domain_free(device_t dev, struct iommu_domain *iodom)
 1762 {
 1763         struct smmu_domain *domain;
 1764         struct smmu_softc *sc;
 1765         struct smmu_cd *cd;
 1766 
 1767         sc = device_get_softc(dev);
 1768 
 1769         domain = (struct smmu_domain *)iodom;
 1770 
 1771         LIST_REMOVE(domain, next);
 1772 
 1773         cd = domain->cd;
 1774 
 1775         iommu_pmap_remove_pages(&domain->p);
 1776         iommu_pmap_release(&domain->p);
 1777 
 1778         smmu_tlbi_asid(sc, domain->asid);
 1779         smmu_asid_free(sc, domain->asid);
 1780 
 1781         contigfree(cd->vaddr, cd->size, M_SMMU);
 1782         free(cd, M_SMMU);
 1783 
 1784         free(domain, M_SMMU);
 1785 }
 1786 
 1787 static int
 1788 smmu_set_buswide(device_t dev, struct smmu_domain *domain,
 1789     struct smmu_ctx *ctx)
 1790 {
 1791         struct smmu_softc *sc;
 1792         int i;
 1793 
 1794         sc = device_get_softc(dev);
 1795 
 1796         for (i = 0; i < PCI_SLOTMAX; i++)
 1797                 smmu_init_ste(sc, domain->cd, (ctx->sid | i), ctx->bypass);
 1798 
 1799         return (0);
 1800 }
 1801 
 1802 #ifdef DEV_ACPI
 1803 static int
 1804 smmu_pci_get_sid_acpi(device_t child, u_int *xref0, u_int *sid0)
 1805 {
 1806         uint16_t rid;
 1807         u_int xref;
 1808         int seg;
 1809         int err;
 1810         int sid;
 1811 
 1812         seg = pci_get_domain(child);
 1813         rid = pci_get_rid(child);
 1814 
 1815         err = acpi_iort_map_pci_smmuv3(seg, rid, &xref, &sid);
 1816         if (err == 0) {
 1817                 if (sid0)
 1818                         *sid0 = sid;
 1819                 if (xref0)
 1820                         *xref0 = xref;
 1821         }
 1822 
 1823         return (err);
 1824 }
 1825 #endif
 1826 
 1827 #ifdef FDT
 1828 static int
 1829 smmu_pci_get_sid_fdt(device_t child, u_int *xref0, u_int *sid0)
 1830 {
 1831         struct pci_id_ofw_iommu pi;
 1832         uint64_t base, size;
 1833         phandle_t node;
 1834         u_int xref;
 1835         int err;
 1836 
 1837         err = pci_get_id(child, PCI_ID_OFW_IOMMU, (uintptr_t *)&pi);
 1838         if (err == 0) {
 1839                 /* Our xref is memory base address. */
 1840                 node = OF_node_from_xref(pi.xref);
 1841                 fdt_regsize(node, &base, &size);
 1842                 xref = base;
 1843 
 1844                 if (sid0)
 1845                         *sid0 = pi.id;
 1846                 if (xref0)
 1847                         *xref0 = xref;
 1848         }
 1849 
 1850         return (err);
 1851 }
 1852 #endif
 1853 
 1854 static struct iommu_ctx *
 1855 smmu_ctx_alloc(device_t dev, struct iommu_domain *iodom, device_t child,
 1856     bool disabled)
 1857 {
 1858         struct smmu_domain *domain;
 1859         struct smmu_ctx *ctx;
 1860 
 1861         domain = (struct smmu_domain *)iodom;
 1862 
 1863         ctx = malloc(sizeof(struct smmu_ctx), M_SMMU, M_WAITOK | M_ZERO);
 1864         ctx->dev = child;
 1865         ctx->domain = domain;
 1866         if (disabled)
 1867                 ctx->bypass = true;
 1868 
 1869         IOMMU_DOMAIN_LOCK(iodom);
 1870         LIST_INSERT_HEAD(&domain->ctx_list, ctx, next);
 1871         IOMMU_DOMAIN_UNLOCK(iodom);
 1872 
 1873         return (&ctx->ioctx);
 1874 }
 1875 
 1876 static int
 1877 smmu_ctx_init(device_t dev, struct iommu_ctx *ioctx)
 1878 {
 1879         struct smmu_domain *domain;
 1880         struct iommu_domain *iodom;
 1881         struct smmu_softc *sc;
 1882         struct smmu_ctx *ctx;
 1883         devclass_t pci_class;
 1884         u_int sid;
 1885         int err;
 1886 
 1887         ctx = (struct smmu_ctx *)ioctx;
 1888 
 1889         sc = device_get_softc(dev);
 1890 
 1891         domain = ctx->domain;
 1892         iodom = (struct iommu_domain *)domain;
 1893 
 1894         pci_class = devclass_find("pci");
 1895         if (device_get_devclass(device_get_parent(ctx->dev)) == pci_class) {
 1896 #ifdef DEV_ACPI
 1897                 err = smmu_pci_get_sid_acpi(ctx->dev, NULL, &sid);
 1898 #else
 1899                 err = smmu_pci_get_sid_fdt(ctx->dev, NULL, &sid);
 1900 #endif
 1901                 if (err)
 1902                         return (err);
 1903 
 1904                 ioctx->rid = pci_get_rid(dev);
 1905                 ctx->sid = sid;
 1906                 ctx->vendor = pci_get_vendor(ctx->dev);
 1907                 ctx->device = pci_get_device(ctx->dev);
 1908         }
 1909 
 1910         if (sc->features & SMMU_FEATURE_2_LVL_STREAM_TABLE) {
 1911                 err = smmu_init_l1_entry(sc, ctx->sid);
 1912                 if (err)
 1913                         return (err);
 1914         }
 1915 
 1916         /*
 1917          * Neoverse N1 SDP:
 1918          * 0x800 xhci
 1919          * 0x700 re
 1920          * 0x600 sata
 1921          */
 1922 
 1923         smmu_init_ste(sc, domain->cd, ctx->sid, ctx->bypass);
 1924 
 1925         if (device_get_devclass(device_get_parent(ctx->dev)) == pci_class)
 1926                 if (iommu_is_buswide_ctx(iodom->iommu, pci_get_bus(ctx->dev)))
 1927                         smmu_set_buswide(dev, domain, ctx);
 1928 
 1929         return (0);
 1930 }
 1931 
 1932 static void
 1933 smmu_ctx_free(device_t dev, struct iommu_ctx *ioctx)
 1934 {
 1935         struct smmu_softc *sc;
 1936         struct smmu_ctx *ctx;
 1937 
 1938         IOMMU_ASSERT_LOCKED(ioctx->domain->iommu);
 1939 
 1940         sc = device_get_softc(dev);
 1941         ctx = (struct smmu_ctx *)ioctx;
 1942 
 1943         smmu_deinit_ste(sc, ctx->sid);
 1944 
 1945         LIST_REMOVE(ctx, next);
 1946 
 1947         free(ctx, M_SMMU);
 1948 }
 1949 
 1950 struct smmu_ctx *
 1951 smmu_ctx_lookup_by_sid(device_t dev, u_int sid)
 1952 {
 1953         struct smmu_softc *sc;
 1954         struct smmu_domain *domain;
 1955         struct smmu_unit *unit;
 1956         struct smmu_ctx *ctx;
 1957 
 1958         sc = device_get_softc(dev);
 1959 
 1960         unit = &sc->unit;
 1961 
 1962         LIST_FOREACH(domain, &unit->domain_list, next) {
 1963                 LIST_FOREACH(ctx, &domain->ctx_list, next) {
 1964                         if (ctx->sid == sid)
 1965                                 return (ctx);
 1966                 }
 1967         }
 1968 
 1969         return (NULL);
 1970 }
 1971 
 1972 static struct iommu_ctx *
 1973 smmu_ctx_lookup(device_t dev, device_t child)
 1974 {
 1975         struct iommu_unit *iommu __diagused;
 1976         struct smmu_softc *sc;
 1977         struct smmu_domain *domain;
 1978         struct smmu_unit *unit;
 1979         struct smmu_ctx *ctx;
 1980 
 1981         sc = device_get_softc(dev);
 1982 
 1983         unit = &sc->unit;
 1984         iommu = &unit->iommu;
 1985 
 1986         IOMMU_ASSERT_LOCKED(iommu);
 1987 
 1988         LIST_FOREACH(domain, &unit->domain_list, next) {
 1989                 IOMMU_DOMAIN_LOCK(&domain->iodom);
 1990                 LIST_FOREACH(ctx, &domain->ctx_list, next) {
 1991                         if (ctx->dev == child) {
 1992                                 IOMMU_DOMAIN_UNLOCK(&domain->iodom);
 1993                                 return (&ctx->ioctx);
 1994                         }
 1995                 }
 1996                 IOMMU_DOMAIN_UNLOCK(&domain->iodom);
 1997         }
 1998 
 1999         return (NULL);
 2000 }
 2001 
 2002 static int
 2003 smmu_find(device_t dev, device_t child)
 2004 {
 2005         struct smmu_softc *sc;
 2006         u_int xref;
 2007         int err;
 2008 
 2009         sc = device_get_softc(dev);
 2010 
 2011 #ifdef DEV_ACPI
 2012         err = smmu_pci_get_sid_acpi(child, &xref, NULL);
 2013 #else
 2014         err = smmu_pci_get_sid_fdt(child, &xref, NULL);
 2015 #endif
 2016         if (err)
 2017                 return (ENOENT);
 2018 
 2019         /* Check if xref is ours. */
 2020         if (xref != sc->xref)
 2021                 return (EFAULT);
 2022 
 2023         return (0);
 2024 }
 2025 
 2026 #ifdef FDT
 2027 static int
 2028 smmu_ofw_md_data(device_t dev, struct iommu_ctx *ioctx, pcell_t *cells,
 2029     int ncells)
 2030 {
 2031         struct smmu_ctx *ctx;
 2032 
 2033         ctx = (struct smmu_ctx *)ioctx;
 2034 
 2035         if (ncells != 1)
 2036                 return (-1);
 2037 
 2038         ctx->sid = cells[0];
 2039 
 2040         return (0);
 2041 }
 2042 #endif
 2043 
 2044 static device_method_t smmu_methods[] = {
 2045         /* Device interface */
 2046         DEVMETHOD(device_detach,        smmu_detach),
 2047 
 2048         /* SMMU interface */
 2049         DEVMETHOD(iommu_find,           smmu_find),
 2050         DEVMETHOD(iommu_map,            smmu_map),
 2051         DEVMETHOD(iommu_unmap,          smmu_unmap),
 2052         DEVMETHOD(iommu_domain_alloc,   smmu_domain_alloc),
 2053         DEVMETHOD(iommu_domain_free,    smmu_domain_free),
 2054         DEVMETHOD(iommu_ctx_alloc,      smmu_ctx_alloc),
 2055         DEVMETHOD(iommu_ctx_init,       smmu_ctx_init),
 2056         DEVMETHOD(iommu_ctx_free,       smmu_ctx_free),
 2057         DEVMETHOD(iommu_ctx_lookup,     smmu_ctx_lookup),
 2058 #ifdef FDT
 2059         DEVMETHOD(iommu_ofw_md_data,    smmu_ofw_md_data),
 2060 #endif
 2061 
 2062         /* Bus interface */
 2063         DEVMETHOD(bus_read_ivar,        smmu_read_ivar),
 2064 
 2065         /* End */
 2066         DEVMETHOD_END
 2067 };
 2068 
 2069 DEFINE_CLASS_0(smmu, smmu_driver, smmu_methods, sizeof(struct smmu_softc));

Cache object: 04b51515784f49ebbee200caff588e4d


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.