The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/arm64/iommu/smmu.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * SPDX-License-Identifier: BSD-2-Clause
    3  *
    4  * Copyright (c) 2019-2020 Ruslan Bukin <br@bsdpad.com>
    5  *
    6  * This software was developed by SRI International and the University of
    7  * Cambridge Computer Laboratory (Department of Computer Science and
    8  * Technology) under DARPA contract HR0011-18-C-0016 ("ECATS"), as part of the
    9  * DARPA SSITH research programme.
   10  *
   11  * Redistribution and use in source and binary forms, with or without
   12  * modification, are permitted provided that the following conditions
   13  * are met:
   14  * 1. Redistributions of source code must retain the above copyright
   15  *    notice, this list of conditions and the following disclaimer.
   16  * 2. Redistributions in binary form must reproduce the above copyright
   17  *    notice, this list of conditions and the following disclaimer in the
   18  *    documentation and/or other materials provided with the distribution.
   19  *
   20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   30  * SUCH DAMAGE.
   31  */
   32 
   33 /*
   34  * Hardware overview.
   35  *
   36  * An incoming transaction from a peripheral device has an address, size,
   37  * attributes and StreamID.
   38  *
   39  * In case of PCI-based devices, StreamID is a PCI rid.
   40  *
   41  * The StreamID is used to select a Stream Table Entry (STE) in a Stream table,
   42  * which contains per-device configuration.
   43  *
   44  * Stream table is a linear or 2-level walk table (this driver supports both).
   45  * Note that a linear table could occupy 1GB or more of memory depending on
   46  * sid_bits value.
   47  *
   48  * STE is used to locate a Context Descriptor, which is a struct in memory
   49  * that describes stages of translation, translation table type, pointer to
   50  * level 0 of page tables, ASID, etc.
   51  *
   52  * Hardware supports two stages of translation: Stage1 (S1) and Stage2 (S2):
   53  *  o S1 is used for the host machine traffic translation
   54  *  o S2 is for a hypervisor
   55  *
   56  * This driver enables S1 stage with standard AArch64 page tables.
   57  *
   58  * Note that SMMU does not share TLB with a main CPU.
   59  * Command queue is used by this driver to Invalidate SMMU TLB, STE cache.
   60  *
   61  * An arm64 SoC could have more than one SMMU instance.
   62  * ACPI IORT table describes which SMMU unit is assigned for a particular
   63  * peripheral device.
   64  *
   65  * Queues.
   66  *
   67  * Register interface and Memory-based circular buffer queues are used
   68  * to inferface SMMU.
   69  *
   70  * These are a Command queue for commands to send to the SMMU and an Event
   71  * queue for event/fault reports from the SMMU. Optionally PRI queue is
   72  * designed for PCIe page requests reception.
   73  *
   74  * Note that not every hardware supports PRI services. For instance they were
   75  * not found in Neoverse N1 SDP machine.
   76  * (This drivers does not implement PRI queue.)
   77  *
   78  * All SMMU queues are arranged as circular buffers in memory. They are used
   79  * in a producer-consumer fashion so that an output queue contains data
   80  * produced by the SMMU and consumed by software.
   81  * An input queue contains data produced by software, consumed by the SMMU.
   82  *
   83  * Interrupts.
   84  *
   85  * Interrupts are not required by this driver for normal operation.
   86  * The standard wired interrupt is only triggered when an event comes from
   87  * the SMMU, which is only in a case of errors (e.g. translation fault).
   88  */
   89 
   90 #include "opt_platform.h"
   91 #include "opt_acpi.h"
   92 
   93 #include <sys/cdefs.h>
   94 __FBSDID("$FreeBSD$");
   95 
   96 #include <sys/param.h>
   97 #include <sys/bitstring.h>
   98 #include <sys/bus.h>
   99 #include <sys/kernel.h>
  100 #include <sys/malloc.h>
  101 #include <sys/rman.h>
  102 #include <sys/lock.h>
  103 #include <sys/tree.h>
  104 #include <sys/taskqueue.h>
  105 #include <vm/vm.h>
  106 #include <vm/vm_page.h>
  107 #if DEV_ACPI
  108 #include <contrib/dev/acpica/include/acpi.h>
  109 #include <dev/acpica/acpivar.h>
  110 #endif
  111 #include <dev/pci/pcireg.h>
  112 #include <dev/pci/pcivar.h>
  113 #include <dev/iommu/iommu.h>
  114 
  115 #include "iommu.h"
  116 #include "iommu_if.h"
  117 
  118 #include "smmureg.h"
  119 #include "smmuvar.h"
  120 
  121 #define STRTAB_L1_SZ_SHIFT      20
  122 #define STRTAB_SPLIT            8
  123 
  124 #define STRTAB_L1_DESC_L2PTR_M  (0x3fffffffffff << 6)
  125 #define STRTAB_L1_DESC_DWORDS   1
  126 
  127 #define STRTAB_STE_DWORDS       8
  128 
  129 #define CMDQ_ENTRY_DWORDS       2
  130 #define EVTQ_ENTRY_DWORDS       4
  131 #define PRIQ_ENTRY_DWORDS       2
  132 
  133 #define CD_DWORDS               8
  134 
  135 #define Q_WRP(q, p)             ((p) & (1 << (q)->size_log2))
  136 #define Q_IDX(q, p)             ((p) & ((1 << (q)->size_log2) - 1))
  137 #define Q_OVF(p)                ((p) & (1 << 31)) /* Event queue overflowed */
  138 
  139 #define SMMU_Q_ALIGN            (64 * 1024)
  140 
  141 static struct resource_spec smmu_spec[] = {
  142         { SYS_RES_MEMORY, 0, RF_ACTIVE },
  143         { SYS_RES_IRQ, 0, RF_ACTIVE },
  144         { SYS_RES_IRQ, 1, RF_ACTIVE },
  145         { SYS_RES_IRQ, 2, RF_ACTIVE },
  146         RESOURCE_SPEC_END
  147 };
  148 
  149 MALLOC_DEFINE(M_SMMU, "SMMU", SMMU_DEVSTR);
  150 
  151 #define dprintf(fmt, ...)
  152 
  153 struct smmu_event {
  154         int ident;
  155         char *str;
  156         char *msg;
  157 };
  158 
  159 static struct smmu_event events[] = {
  160         { 0x01, "F_UUT",
  161                 "Unsupported Upstream Transaction."},
  162         { 0x02, "C_BAD_STREAMID",
  163                 "Transaction StreamID out of range."},
  164         { 0x03, "F_STE_FETCH",
  165                 "Fetch of STE caused external abort."},
  166         { 0x04, "C_BAD_STE",
  167                 "Used STE invalid."},
  168         { 0x05, "F_BAD_ATS_TREQ",
  169                 "Address Translation Request disallowed for a StreamID "
  170                 "and a PCIe ATS Translation Request received."},
  171         { 0x06, "F_STREAM_DISABLED",
  172                 "The STE of a transaction marks non-substream transactions "
  173                 "disabled."},
  174         { 0x07, "F_TRANSL_FORBIDDEN",
  175                 "An incoming PCIe transaction is marked Translated but "
  176                 "SMMU bypass is disallowed for this StreamID."},
  177         { 0x08, "C_BAD_SUBSTREAMID",
  178                 "Incoming SubstreamID present, but configuration is invalid."},
  179         { 0x09, "F_CD_FETCH",
  180                 "Fetch of CD caused external abort."},
  181         { 0x0a, "C_BAD_CD",
  182                 "Fetched CD invalid."},
  183         { 0x0b, "F_WALK_EABT",
  184                 "An external abort occurred fetching (or updating) "
  185                 "a translation table descriptor."},
  186         { 0x10, "F_TRANSLATION",
  187                 "Translation fault."},
  188         { 0x11, "F_ADDR_SIZE",
  189                 "Address Size fault."},
  190         { 0x12, "F_ACCESS",
  191                 "Access flag fault due to AF == 0 in a page or block TTD."},
  192         { 0x13, "F_PERMISSION",
  193                 "Permission fault occurred on page access."},
  194         { 0x20, "F_TLB_CONFLICT",
  195                 "A TLB conflict occurred because of the transaction."},
  196         { 0x21, "F_CFG_CONFLICT",
  197                 "A configuration cache conflict occurred due to "
  198                 "the transaction."},
  199         { 0x24, "E_PAGE_REQUEST",
  200                 "Speculative page request hint."},
  201         { 0x25, "F_VMS_FETCH",
  202                 "Fetch of VMS caused external abort."},
  203         { 0, NULL, NULL },
  204 };
  205 
  206 static int
  207 smmu_q_has_space(struct smmu_queue *q)
  208 {
  209 
  210         /*
  211          * See 6.3.27 SMMU_CMDQ_PROD
  212          *
  213          * There is space in the queue for additional commands if:
  214          *  SMMU_CMDQ_CONS.RD != SMMU_CMDQ_PROD.WR ||
  215          *  SMMU_CMDQ_CONS.RD_WRAP == SMMU_CMDQ_PROD.WR_WRAP
  216          */
  217 
  218         if (Q_IDX(q, q->lc.cons) != Q_IDX(q, q->lc.prod) ||
  219             Q_WRP(q, q->lc.cons) == Q_WRP(q, q->lc.prod))
  220                 return (1);
  221 
  222         return (0);
  223 }
  224 
  225 static int
  226 smmu_q_empty(struct smmu_queue *q)
  227 {
  228 
  229         if (Q_IDX(q, q->lc.cons) == Q_IDX(q, q->lc.prod) &&
  230             Q_WRP(q, q->lc.cons) == Q_WRP(q, q->lc.prod))
  231                 return (1);
  232 
  233         return (0);
  234 }
  235 
  236 static int __unused
  237 smmu_q_consumed(struct smmu_queue *q, uint32_t prod)
  238 {
  239 
  240         if ((Q_WRP(q, q->lc.cons) == Q_WRP(q, prod)) &&
  241             (Q_IDX(q, q->lc.cons) >= Q_IDX(q, prod)))
  242                 return (1);
  243 
  244         if ((Q_WRP(q, q->lc.cons) != Q_WRP(q, prod)) &&
  245             (Q_IDX(q, q->lc.cons) <= Q_IDX(q, prod)))
  246                 return (1);
  247 
  248         return (0);
  249 }
  250 
  251 static uint32_t
  252 smmu_q_inc_cons(struct smmu_queue *q)
  253 {
  254         uint32_t cons;
  255         uint32_t val;
  256 
  257         cons = (Q_WRP(q, q->lc.cons) | Q_IDX(q, q->lc.cons)) + 1;
  258         val = (Q_OVF(q->lc.cons) | Q_WRP(q, cons) | Q_IDX(q, cons));
  259 
  260         return (val);
  261 }
  262 
  263 static uint32_t
  264 smmu_q_inc_prod(struct smmu_queue *q)
  265 {
  266         uint32_t prod;
  267         uint32_t val;
  268 
  269         prod = (Q_WRP(q, q->lc.prod) | Q_IDX(q, q->lc.prod)) + 1;
  270         val = (Q_OVF(q->lc.prod) | Q_WRP(q, prod) | Q_IDX(q, prod));
  271 
  272         return (val);
  273 }
  274 
  275 static int
  276 smmu_write_ack(struct smmu_softc *sc, uint32_t reg,
  277     uint32_t reg_ack, uint32_t val)
  278 {
  279         uint32_t v;
  280         int timeout;
  281 
  282         timeout = 100000;
  283 
  284         bus_write_4(sc->res[0], reg, val);
  285 
  286         do {
  287                 v = bus_read_4(sc->res[0], reg_ack);
  288                 if (v == val)
  289                         break;
  290         } while (timeout--);
  291 
  292         if (timeout <= 0) {
  293                 device_printf(sc->dev, "Failed to write reg.\n");
  294                 return (-1);
  295         }
  296 
  297         return (0);
  298 }
  299 
  300 static inline int
  301 ilog2(long x)
  302 {
  303 
  304         KASSERT(x > 0 && powerof2(x), ("%s: invalid arg %ld", __func__, x));
  305 
  306         return (flsl(x) - 1);
  307 }
  308 
  309 static int
  310 smmu_init_queue(struct smmu_softc *sc, struct smmu_queue *q,
  311     uint32_t prod_off, uint32_t cons_off, uint32_t dwords)
  312 {
  313         int sz;
  314 
  315         sz = (1 << q->size_log2) * dwords * 8;
  316 
  317         /* Set up the command circular buffer */
  318         q->vaddr = contigmalloc(sz, M_SMMU,
  319             M_WAITOK | M_ZERO, 0, (1ul << 48) - 1, SMMU_Q_ALIGN, 0);
  320         if (q->vaddr == NULL) {
  321                 device_printf(sc->dev, "failed to allocate %d bytes\n", sz);
  322                 return (-1);
  323         }
  324 
  325         q->prod_off = prod_off;
  326         q->cons_off = cons_off;
  327         q->paddr = vtophys(q->vaddr);
  328 
  329         q->base = CMDQ_BASE_RA | EVENTQ_BASE_WA | PRIQ_BASE_WA;
  330         q->base |= q->paddr & Q_BASE_ADDR_M;
  331         q->base |= q->size_log2 << Q_LOG2SIZE_S;
  332 
  333         return (0);
  334 }
  335 
  336 static int
  337 smmu_init_queues(struct smmu_softc *sc)
  338 {
  339         int err;
  340 
  341         /* Command queue. */
  342         err = smmu_init_queue(sc, &sc->cmdq,
  343             SMMU_CMDQ_PROD, SMMU_CMDQ_CONS, CMDQ_ENTRY_DWORDS);
  344         if (err)
  345                 return (ENXIO);
  346 
  347         /* Event queue. */
  348         err = smmu_init_queue(sc, &sc->evtq,
  349             SMMU_EVENTQ_PROD, SMMU_EVENTQ_CONS, EVTQ_ENTRY_DWORDS);
  350         if (err)
  351                 return (ENXIO);
  352 
  353         if (!(sc->features & SMMU_FEATURE_PRI))
  354                 return (0);
  355 
  356         /* PRI queue. */
  357         err = smmu_init_queue(sc, &sc->priq,
  358             SMMU_PRIQ_PROD, SMMU_PRIQ_CONS, PRIQ_ENTRY_DWORDS);
  359         if (err)
  360                 return (ENXIO);
  361 
  362         return (0);
  363 }
  364 
  365 /*
  366  * Dump 2LVL or linear STE.
  367  */
  368 static void
  369 smmu_dump_ste(struct smmu_softc *sc, int sid)
  370 {
  371         struct smmu_strtab *strtab;
  372         struct l1_desc *l1_desc;
  373         uint64_t *ste, *l1;
  374         int i;
  375 
  376         strtab = &sc->strtab;
  377 
  378         if (sc->features & SMMU_FEATURE_2_LVL_STREAM_TABLE) {
  379                 i = sid >> STRTAB_SPLIT;
  380                 l1 = (void *)((uint64_t)strtab->vaddr +
  381                     STRTAB_L1_DESC_DWORDS * 8 * i);
  382                 device_printf(sc->dev, "L1 ste == %lx\n", l1[0]);
  383 
  384                 l1_desc = &strtab->l1[i];
  385                 ste = l1_desc->va;
  386                 if (ste == NULL) /* L2 is not initialized */
  387                         return;
  388         } else {
  389                 ste = (void *)((uint64_t)strtab->vaddr +
  390                     sid * (STRTAB_STE_DWORDS << 3));
  391         }
  392 
  393         /* Dump L2 or linear STE. */
  394         for (i = 0; i < STRTAB_STE_DWORDS; i++)
  395                 device_printf(sc->dev, "ste[%d] == %lx\n", i, ste[i]);
  396 }
  397 
  398 static void __unused
  399 smmu_dump_cd(struct smmu_softc *sc, struct smmu_cd *cd)
  400 {
  401         uint64_t *vaddr;
  402         int i;
  403 
  404         device_printf(sc->dev, "%s\n", __func__);
  405 
  406         vaddr = cd->vaddr;
  407         for (i = 0; i < CD_DWORDS; i++)
  408                 device_printf(sc->dev, "cd[%d] == %lx\n", i, vaddr[i]);
  409 }
  410 
  411 static void
  412 smmu_evtq_dequeue(struct smmu_softc *sc, uint32_t *evt)
  413 {
  414         struct smmu_queue *evtq;
  415         void *entry_addr;
  416 
  417         evtq = &sc->evtq;
  418 
  419         evtq->lc.val = bus_read_8(sc->res[0], evtq->prod_off);
  420         entry_addr = (void *)((uint64_t)evtq->vaddr +
  421             evtq->lc.cons * EVTQ_ENTRY_DWORDS * 8);
  422         memcpy(evt, entry_addr, EVTQ_ENTRY_DWORDS * 8);
  423         evtq->lc.cons = smmu_q_inc_cons(evtq);
  424         bus_write_4(sc->res[0], evtq->cons_off, evtq->lc.cons);
  425 }
  426 
  427 static void
  428 smmu_print_event(struct smmu_softc *sc, uint32_t *evt)
  429 {
  430         struct smmu_event *ev;
  431         uintptr_t input_addr;
  432         uint8_t event_id;
  433         device_t dev;
  434         int sid;
  435         int i;
  436 
  437         dev = sc->dev;
  438 
  439         ev = NULL;
  440         event_id = evt[0] & 0xff;
  441         for (i = 0; events[i].ident != 0; i++) {
  442                 if (events[i].ident == event_id) {
  443                         ev = &events[i];
  444                         break;
  445                 }
  446         }
  447 
  448         sid = evt[1];
  449         input_addr = evt[5];
  450         input_addr <<= 32;
  451         input_addr |= evt[4];
  452 
  453         if (smmu_quirks_check(dev, sid, event_id, input_addr)) {
  454                 /* The event is known. Don't print anything. */
  455                 return;
  456         }
  457 
  458         if (ev) {
  459                 device_printf(sc->dev,
  460                     "Event %s (%s) received.\n", ev->str, ev->msg);
  461         } else
  462                 device_printf(sc->dev, "Event 0x%x received\n", event_id);
  463 
  464         device_printf(sc->dev, "SID %x, Input Address: %jx\n",
  465             sid, input_addr);
  466 
  467         for (i = 0; i < 8; i++)
  468                 device_printf(sc->dev, "evt[%d] %x\n", i, evt[i]);
  469 
  470         smmu_dump_ste(sc, sid);
  471 }
  472 
  473 static void
  474 make_cmd(struct smmu_softc *sc, uint64_t *cmd,
  475     struct smmu_cmdq_entry *entry)
  476 {
  477 
  478         memset(cmd, 0, CMDQ_ENTRY_DWORDS * 8);
  479         cmd[0] = entry->opcode << CMD_QUEUE_OPCODE_S;
  480 
  481         switch (entry->opcode) {
  482         case CMD_TLBI_NH_VA:
  483                 cmd[0] |= (uint64_t)entry->tlbi.asid << TLBI_0_ASID_S;
  484                 cmd[1] = entry->tlbi.addr & TLBI_1_ADDR_M;
  485                 if (entry->tlbi.leaf) {
  486                         /*
  487                          * Leaf flag means that only cached entries
  488                          * for the last level of translation table walk
  489                          * are required to be invalidated.
  490                          */
  491                         cmd[1] |= TLBI_1_LEAF;
  492                 }
  493                 break;
  494         case CMD_TLBI_NH_ASID:
  495                 cmd[0] |= (uint64_t)entry->tlbi.asid << TLBI_0_ASID_S;
  496                 break;
  497         case CMD_TLBI_NSNH_ALL:
  498         case CMD_TLBI_NH_ALL:
  499         case CMD_TLBI_EL2_ALL:
  500                 break;
  501         case CMD_CFGI_CD:
  502                 cmd[0] |= ((uint64_t)entry->cfgi.ssid << CFGI_0_SSID_S);
  503                 /* FALLTROUGH */
  504         case CMD_CFGI_STE:
  505                 cmd[0] |= ((uint64_t)entry->cfgi.sid << CFGI_0_STE_SID_S);
  506                 cmd[1] |= ((uint64_t)entry->cfgi.leaf << CFGI_1_LEAF_S);
  507                 break;
  508         case CMD_CFGI_STE_RANGE:
  509                 cmd[1] = (31 << CFGI_1_STE_RANGE_S);
  510                 break;
  511         case CMD_SYNC:
  512                 cmd[0] |= SYNC_0_MSH_IS | SYNC_0_MSIATTR_OIWB;
  513                 if (entry->sync.msiaddr) {
  514                         cmd[0] |= SYNC_0_CS_SIG_IRQ;
  515                         cmd[1] |= (entry->sync.msiaddr & SYNC_1_MSIADDRESS_M);
  516                 } else
  517                         cmd[0] |= SYNC_0_CS_SIG_SEV;
  518                 break;
  519         case CMD_PREFETCH_CONFIG:
  520                 cmd[0] |= ((uint64_t)entry->prefetch.sid << PREFETCH_0_SID_S);
  521                 break;
  522         };
  523 }
  524 
  525 static void
  526 smmu_cmdq_enqueue_cmd(struct smmu_softc *sc, struct smmu_cmdq_entry *entry)
  527 {
  528         uint64_t cmd[CMDQ_ENTRY_DWORDS];
  529         struct smmu_queue *cmdq;
  530         void *entry_addr;
  531 
  532         cmdq = &sc->cmdq;
  533 
  534         make_cmd(sc, cmd, entry);
  535 
  536         SMMU_LOCK(sc);
  537 
  538         /* Ensure that a space is available. */
  539         do {
  540                 cmdq->lc.cons = bus_read_4(sc->res[0], cmdq->cons_off);
  541         } while (smmu_q_has_space(cmdq) == 0);
  542 
  543         /* Write the command to the current prod entry. */
  544         entry_addr = (void *)((uint64_t)cmdq->vaddr +
  545             Q_IDX(cmdq, cmdq->lc.prod) * CMDQ_ENTRY_DWORDS * 8);
  546         memcpy(entry_addr, cmd, CMDQ_ENTRY_DWORDS * 8);
  547 
  548         /* Increment prod index. */
  549         cmdq->lc.prod = smmu_q_inc_prod(cmdq);
  550         bus_write_4(sc->res[0], cmdq->prod_off, cmdq->lc.prod);
  551 
  552         SMMU_UNLOCK(sc);
  553 }
  554 
  555 static void __unused
  556 smmu_poll_until_consumed(struct smmu_softc *sc, struct smmu_queue *q)
  557 {
  558 
  559         while (1) {
  560                 q->lc.val = bus_read_8(sc->res[0], q->prod_off);
  561                 if (smmu_q_empty(q))
  562                         break;
  563                 cpu_spinwait();
  564         }
  565 }
  566 
  567 static int
  568 smmu_sync(struct smmu_softc *sc)
  569 {
  570         struct smmu_cmdq_entry cmd;
  571         struct smmu_queue *q;
  572         uint32_t *base;
  573         int timeout;
  574         int prod;
  575 
  576         q = &sc->cmdq;
  577         prod = q->lc.prod;
  578 
  579         /* Enqueue sync command. */
  580         cmd.opcode = CMD_SYNC;
  581         cmd.sync.msiaddr = q->paddr + Q_IDX(q, prod) * CMDQ_ENTRY_DWORDS * 8;
  582         smmu_cmdq_enqueue_cmd(sc, &cmd);
  583 
  584         /* Wait for the sync completion. */
  585         base = (void *)((uint64_t)q->vaddr +
  586             Q_IDX(q, prod) * CMDQ_ENTRY_DWORDS * 8);
  587 
  588         /*
  589          * It takes around 200 loops (6 instructions each)
  590          * on Neoverse N1 to complete the sync.
  591          */
  592         timeout = 10000;
  593 
  594         do {
  595                 if (*base == 0) {
  596                         /* MSI write completed. */
  597                         break;
  598                 }
  599                 cpu_spinwait();
  600         } while (timeout--);
  601 
  602         if (timeout < 0)
  603                 device_printf(sc->dev, "Failed to sync\n");
  604 
  605         return (0);
  606 }
  607 
  608 static int
  609 smmu_sync_cd(struct smmu_softc *sc, int sid, int ssid, bool leaf)
  610 {
  611         struct smmu_cmdq_entry cmd;
  612 
  613         cmd.opcode = CMD_CFGI_CD;
  614         cmd.cfgi.sid = sid;
  615         cmd.cfgi.ssid = ssid;
  616         cmd.cfgi.leaf = leaf;
  617         smmu_cmdq_enqueue_cmd(sc, &cmd);
  618 
  619         return (0);
  620 }
  621 
  622 static void
  623 smmu_invalidate_all_sid(struct smmu_softc *sc)
  624 {
  625         struct smmu_cmdq_entry cmd;
  626 
  627         /* Invalidate cached config */
  628         cmd.opcode = CMD_CFGI_STE_RANGE;
  629         smmu_cmdq_enqueue_cmd(sc, &cmd);
  630         smmu_sync(sc);
  631 }
  632 
  633 static void
  634 smmu_tlbi_all(struct smmu_softc *sc)
  635 {
  636         struct smmu_cmdq_entry cmd;
  637 
  638         /* Invalidate entire TLB */
  639         cmd.opcode = CMD_TLBI_NSNH_ALL;
  640         smmu_cmdq_enqueue_cmd(sc, &cmd);
  641         smmu_sync(sc);
  642 }
  643 
  644 static void
  645 smmu_tlbi_asid(struct smmu_softc *sc, uint16_t asid)
  646 {
  647         struct smmu_cmdq_entry cmd;
  648 
  649         /* Invalidate TLB for an ASID. */
  650         cmd.opcode = CMD_TLBI_NH_ASID;
  651         cmd.tlbi.asid = asid;
  652         smmu_cmdq_enqueue_cmd(sc, &cmd);
  653         smmu_sync(sc);
  654 }
  655 
  656 static void
  657 smmu_tlbi_va(struct smmu_softc *sc, vm_offset_t va, uint16_t asid)
  658 {
  659         struct smmu_cmdq_entry cmd;
  660 
  661         /* Invalidate specific range */
  662         cmd.opcode = CMD_TLBI_NH_VA;
  663         cmd.tlbi.asid = asid;
  664         cmd.tlbi.vmid = 0;
  665         cmd.tlbi.leaf = true; /* We change only L3. */
  666         cmd.tlbi.addr = va;
  667         smmu_cmdq_enqueue_cmd(sc, &cmd);
  668 }
  669 
  670 static void
  671 smmu_invalidate_sid(struct smmu_softc *sc, uint32_t sid)
  672 {
  673         struct smmu_cmdq_entry cmd;
  674 
  675         /* Invalidate cached config */
  676         cmd.opcode = CMD_CFGI_STE;
  677         cmd.cfgi.sid = sid;
  678         smmu_cmdq_enqueue_cmd(sc, &cmd);
  679         smmu_sync(sc);
  680 }
  681 
  682 static void
  683 smmu_prefetch_sid(struct smmu_softc *sc, uint32_t sid)
  684 {
  685         struct smmu_cmdq_entry cmd;
  686 
  687         cmd.opcode = CMD_PREFETCH_CONFIG;
  688         cmd.prefetch.sid = sid;
  689         smmu_cmdq_enqueue_cmd(sc, &cmd);
  690         smmu_sync(sc);
  691 }
  692 
  693 /*
  694  * Init STE in bypass mode. Traffic is not translated for the sid.
  695  */
  696 static void
  697 smmu_init_ste_bypass(struct smmu_softc *sc, uint32_t sid, uint64_t *ste)
  698 {
  699         uint64_t val;
  700 
  701         val = STE0_VALID | STE0_CONFIG_BYPASS;
  702 
  703         ste[1] = STE1_SHCFG_INCOMING | STE1_EATS_FULLATS;
  704         ste[2] = 0;
  705         ste[3] = 0;
  706         ste[4] = 0;
  707         ste[5] = 0;
  708         ste[6] = 0;
  709         ste[7] = 0;
  710 
  711         smmu_invalidate_sid(sc, sid);
  712         ste[0] = val;
  713         dsb(sy);
  714         smmu_invalidate_sid(sc, sid);
  715 
  716         smmu_prefetch_sid(sc, sid);
  717 }
  718 
  719 /*
  720  * Enable Stage1 (S1) translation for the sid.
  721  */
  722 static int
  723 smmu_init_ste_s1(struct smmu_softc *sc, struct smmu_cd *cd,
  724     uint32_t sid, uint64_t *ste)
  725 {
  726         uint64_t val;
  727 
  728         val = STE0_VALID;
  729 
  730         /* S1 */
  731         ste[1] = STE1_EATS_FULLATS      |
  732                  STE1_S1CSH_IS          |
  733                  STE1_S1CIR_WBRA        |
  734                  STE1_S1COR_WBRA        |
  735                  STE1_STRW_NS_EL1;
  736         ste[2] = 0;
  737         ste[3] = 0;
  738         ste[4] = 0;
  739         ste[5] = 0;
  740         ste[6] = 0;
  741         ste[7] = 0;
  742 
  743         if (sc->features & SMMU_FEATURE_STALL &&
  744             ((sc->features & SMMU_FEATURE_STALL_FORCE) == 0))
  745                 ste[1] |= STE1_S1STALLD;
  746 
  747         /* Configure STE */
  748         val |= (cd->paddr & STE0_S1CONTEXTPTR_M);
  749         val |= STE0_CONFIG_S1_TRANS;
  750 
  751         smmu_invalidate_sid(sc, sid);
  752 
  753         /* The STE[0] has to be written in a single blast, last of all. */
  754         ste[0] = val;
  755         dsb(sy);
  756 
  757         smmu_invalidate_sid(sc, sid);
  758         smmu_sync_cd(sc, sid, 0, true);
  759         smmu_invalidate_sid(sc, sid);
  760 
  761         /* The sid will be used soon most likely. */
  762         smmu_prefetch_sid(sc, sid);
  763 
  764         return (0);
  765 }
  766 
  767 static int
  768 smmu_init_ste(struct smmu_softc *sc, struct smmu_cd *cd, int sid, bool bypass)
  769 {
  770         struct smmu_strtab *strtab;
  771         struct l1_desc *l1_desc;
  772         uint64_t *addr;
  773 
  774         strtab = &sc->strtab;
  775 
  776         if (sc->features & SMMU_FEATURE_2_LVL_STREAM_TABLE) {
  777                 l1_desc = &strtab->l1[sid >> STRTAB_SPLIT];
  778                 addr = l1_desc->va;
  779                 addr += (sid & ((1 << STRTAB_SPLIT) - 1)) * STRTAB_STE_DWORDS;
  780         } else {
  781                 addr = (void *)((uint64_t)strtab->vaddr +
  782                     STRTAB_STE_DWORDS * 8 * sid);
  783         };
  784 
  785         if (bypass)
  786                 smmu_init_ste_bypass(sc, sid, addr);
  787         else
  788                 smmu_init_ste_s1(sc, cd, sid, addr);
  789 
  790         smmu_sync(sc);
  791 
  792         return (0);
  793 }
  794 
  795 static int
  796 smmu_init_cd(struct smmu_softc *sc, struct smmu_domain *domain)
  797 {
  798         vm_paddr_t paddr;
  799         uint64_t *ptr;
  800         uint64_t val;
  801         vm_size_t size;
  802         struct smmu_cd *cd;
  803         pmap_t p;
  804 
  805         size = 1 * (CD_DWORDS << 3);
  806 
  807         p = &domain->p;
  808         cd = domain->cd = malloc(sizeof(struct smmu_cd),
  809             M_SMMU, M_WAITOK | M_ZERO);
  810 
  811         cd->vaddr = contigmalloc(size, M_SMMU,
  812             M_WAITOK | M_ZERO,  /* flags */
  813             0,                  /* low */
  814             (1ul << 40) - 1,    /* high */
  815             size,               /* alignment */
  816             0);                 /* boundary */
  817         if (cd->vaddr == NULL) {
  818                 device_printf(sc->dev, "Failed to allocate CD\n");
  819                 return (ENXIO);
  820         }
  821 
  822         cd->size = size;
  823         cd->paddr = vtophys(cd->vaddr);
  824 
  825         ptr = cd->vaddr;
  826 
  827         val = CD0_VALID;
  828         val |= CD0_AA64;
  829         val |= CD0_R;
  830         val |= CD0_A;
  831         val |= CD0_ASET;
  832         val |= (uint64_t)domain->asid << CD0_ASID_S;
  833         val |= CD0_TG0_4KB;
  834         val |= CD0_EPD1; /* Disable TT1 */
  835         val |= ((64 - sc->ias) << CD0_T0SZ_S);
  836         val |= CD0_IPS_48BITS;
  837 
  838         paddr = p->pm_l0_paddr & CD1_TTB0_M;
  839         KASSERT(paddr == p->pm_l0_paddr, ("bad allocation 1"));
  840 
  841         ptr[1] = paddr;
  842         ptr[2] = 0;
  843         ptr[3] = MAIR_ATTR(MAIR_DEVICE_nGnRnE, VM_MEMATTR_DEVICE)       |
  844                  MAIR_ATTR(MAIR_NORMAL_NC, VM_MEMATTR_UNCACHEABLE)      |
  845                  MAIR_ATTR(MAIR_NORMAL_WB, VM_MEMATTR_WRITE_BACK)       |
  846                  MAIR_ATTR(MAIR_NORMAL_WT, VM_MEMATTR_WRITE_THROUGH);
  847 
  848         /* Install the CD. */
  849         ptr[0] = val;
  850 
  851         return (0);
  852 }
  853 
  854 static int
  855 smmu_init_strtab_linear(struct smmu_softc *sc)
  856 {
  857         struct smmu_strtab *strtab;
  858         vm_paddr_t base;
  859         uint32_t size;
  860         uint64_t reg;
  861 
  862         strtab = &sc->strtab;
  863         strtab->num_l1_entries = (1 << sc->sid_bits);
  864 
  865         size = strtab->num_l1_entries * (STRTAB_STE_DWORDS << 3);
  866 
  867         if (bootverbose)
  868                 device_printf(sc->dev,
  869                     "%s: linear strtab size %d, num_l1_entries %d\n",
  870                     __func__, size, strtab->num_l1_entries);
  871 
  872         strtab->vaddr = contigmalloc(size, M_SMMU,
  873             M_WAITOK | M_ZERO,  /* flags */
  874             0,                  /* low */
  875             (1ul << 48) - 1,    /* high */
  876             size,               /* alignment */
  877             0);                 /* boundary */
  878         if (strtab->vaddr == NULL) {
  879                 device_printf(sc->dev, "failed to allocate strtab\n");
  880                 return (ENXIO);
  881         }
  882 
  883         reg = STRTAB_BASE_CFG_FMT_LINEAR;
  884         reg |= sc->sid_bits << STRTAB_BASE_CFG_LOG2SIZE_S;
  885         strtab->base_cfg = (uint32_t)reg;
  886 
  887         base = vtophys(strtab->vaddr);
  888 
  889         reg = base & STRTAB_BASE_ADDR_M;
  890         KASSERT(reg == base, ("bad allocation 2"));
  891         reg |= STRTAB_BASE_RA;
  892         strtab->base = reg;
  893 
  894         return (0);
  895 }
  896 
  897 static int
  898 smmu_init_strtab_2lvl(struct smmu_softc *sc)
  899 {
  900         struct smmu_strtab *strtab;
  901         vm_paddr_t base;
  902         uint64_t reg_base;
  903         uint32_t l1size;
  904         uint32_t size;
  905         uint32_t reg;
  906         int sz;
  907 
  908         strtab = &sc->strtab;
  909 
  910         size = STRTAB_L1_SZ_SHIFT - (ilog2(STRTAB_L1_DESC_DWORDS) + 3);
  911         size = min(size, sc->sid_bits - STRTAB_SPLIT);
  912         strtab->num_l1_entries = (1 << size);
  913         size += STRTAB_SPLIT;
  914 
  915         l1size = strtab->num_l1_entries * (STRTAB_L1_DESC_DWORDS << 3);
  916 
  917         if (bootverbose)
  918                 device_printf(sc->dev,
  919                     "%s: size %d, l1 entries %d, l1size %d\n",
  920                     __func__, size, strtab->num_l1_entries, l1size);
  921 
  922         strtab->vaddr = contigmalloc(l1size, M_SMMU,
  923             M_WAITOK | M_ZERO,  /* flags */
  924             0,                  /* low */
  925             (1ul << 48) - 1,    /* high */
  926             l1size,             /* alignment */
  927             0);                 /* boundary */
  928         if (strtab->vaddr == NULL) {
  929                 device_printf(sc->dev, "Failed to allocate 2lvl strtab.\n");
  930                 return (ENOMEM);
  931         }
  932 
  933         sz = strtab->num_l1_entries * sizeof(struct l1_desc);
  934 
  935         strtab->l1 = malloc(sz, M_SMMU, M_WAITOK | M_ZERO);
  936         if (strtab->l1 == NULL) {
  937                 contigfree(strtab->vaddr, l1size, M_SMMU);
  938                 return (ENOMEM);
  939         }
  940 
  941         reg = STRTAB_BASE_CFG_FMT_2LVL;
  942         reg |= size << STRTAB_BASE_CFG_LOG2SIZE_S;
  943         reg |= STRTAB_SPLIT << STRTAB_BASE_CFG_SPLIT_S;
  944         strtab->base_cfg = (uint32_t)reg;
  945 
  946         base = vtophys(strtab->vaddr);
  947 
  948         reg_base = base & STRTAB_BASE_ADDR_M;
  949         KASSERT(reg_base == base, ("bad allocation 3"));
  950         reg_base |= STRTAB_BASE_RA;
  951         strtab->base = reg_base;
  952 
  953         return (0);
  954 }
  955 
  956 static int
  957 smmu_init_strtab(struct smmu_softc *sc)
  958 {
  959         int error;
  960 
  961         if (sc->features & SMMU_FEATURE_2_LVL_STREAM_TABLE)
  962                 error = smmu_init_strtab_2lvl(sc);
  963         else
  964                 error = smmu_init_strtab_linear(sc);
  965 
  966         return (error);
  967 }
  968 
  969 static int
  970 smmu_init_l1_entry(struct smmu_softc *sc, int sid)
  971 {
  972         struct smmu_strtab *strtab;
  973         struct l1_desc *l1_desc;
  974         uint64_t *addr;
  975         uint64_t val;
  976         size_t size;
  977         int i;
  978 
  979         strtab = &sc->strtab;
  980         l1_desc = &strtab->l1[sid >> STRTAB_SPLIT];
  981 
  982         size = 1 << (STRTAB_SPLIT + ilog2(STRTAB_STE_DWORDS) + 3);
  983 
  984         l1_desc->span = STRTAB_SPLIT + 1;
  985         l1_desc->size = size;
  986         l1_desc->va = contigmalloc(size, M_SMMU,
  987             M_WAITOK | M_ZERO,  /* flags */
  988             0,                  /* low */
  989             (1ul << 48) - 1,    /* high */
  990             size,               /* alignment */
  991             0);                 /* boundary */
  992         if (l1_desc->va == NULL) {
  993                 device_printf(sc->dev, "failed to allocate l2 entry\n");
  994                 return (ENXIO);
  995         }
  996 
  997         l1_desc->pa = vtophys(l1_desc->va);
  998 
  999         i = sid >> STRTAB_SPLIT;
 1000         addr = (void *)((uint64_t)strtab->vaddr +
 1001             STRTAB_L1_DESC_DWORDS * 8 * i);
 1002 
 1003         /* Install the L1 entry. */
 1004         val = l1_desc->pa & STRTAB_L1_DESC_L2PTR_M;
 1005         KASSERT(val == l1_desc->pa, ("bad allocation 4"));
 1006         val |= l1_desc->span;
 1007         *addr = val;
 1008 
 1009         return (0);
 1010 }
 1011 
 1012 static void
 1013 smmu_deinit_l1_entry(struct smmu_softc *sc, int sid)
 1014 {
 1015         struct smmu_strtab *strtab;
 1016         struct l1_desc *l1_desc;
 1017         uint64_t *addr;
 1018         int i;
 1019 
 1020         strtab = &sc->strtab;
 1021 
 1022         i = sid >> STRTAB_SPLIT;
 1023         addr = (void *)((uint64_t)strtab->vaddr +
 1024             STRTAB_L1_DESC_DWORDS * 8 * i);
 1025         *addr = 0;
 1026 
 1027         if (sc->features & SMMU_FEATURE_2_LVL_STREAM_TABLE) {
 1028                 l1_desc = &strtab->l1[sid >> STRTAB_SPLIT];
 1029                 contigfree(l1_desc->va, l1_desc->size, M_SMMU);
 1030         }
 1031 }
 1032 
 1033 static int
 1034 smmu_disable(struct smmu_softc *sc)
 1035 {
 1036         uint32_t reg;
 1037         int error;
 1038 
 1039         /* Disable SMMU */
 1040         reg = bus_read_4(sc->res[0], SMMU_CR0);
 1041         reg &= ~CR0_SMMUEN;
 1042         error = smmu_write_ack(sc, SMMU_CR0, SMMU_CR0ACK, reg);
 1043         if (error)
 1044                 device_printf(sc->dev, "Could not disable SMMU.\n");
 1045 
 1046         return (0);
 1047 }
 1048 
 1049 static int
 1050 smmu_event_intr(void *arg)
 1051 {
 1052         uint32_t evt[EVTQ_ENTRY_DWORDS * 2];
 1053         struct smmu_softc *sc;
 1054 
 1055         sc = arg;
 1056 
 1057         do {
 1058                 smmu_evtq_dequeue(sc, evt);
 1059                 smmu_print_event(sc, evt);
 1060         } while (!smmu_q_empty(&sc->evtq));
 1061 
 1062         return (FILTER_HANDLED);
 1063 }
 1064 
 1065 static int __unused
 1066 smmu_sync_intr(void *arg)
 1067 {
 1068         struct smmu_softc *sc;
 1069 
 1070         sc = arg;
 1071 
 1072         device_printf(sc->dev, "%s\n", __func__);
 1073 
 1074         return (FILTER_HANDLED);
 1075 }
 1076 
 1077 static int
 1078 smmu_gerr_intr(void *arg)
 1079 {
 1080         struct smmu_softc *sc;
 1081 
 1082         sc = arg;
 1083 
 1084         device_printf(sc->dev, "SMMU Global Error\n");
 1085 
 1086         return (FILTER_HANDLED);
 1087 }
 1088 
 1089 static int
 1090 smmu_enable_interrupts(struct smmu_softc *sc)
 1091 {
 1092         uint32_t reg;
 1093         int error;
 1094 
 1095         /* Disable MSI. */
 1096         bus_write_8(sc->res[0], SMMU_GERROR_IRQ_CFG0, 0);
 1097         bus_write_4(sc->res[0], SMMU_GERROR_IRQ_CFG1, 0);
 1098         bus_write_4(sc->res[0], SMMU_GERROR_IRQ_CFG2, 0);
 1099 
 1100         bus_write_8(sc->res[0], SMMU_EVENTQ_IRQ_CFG0, 0);
 1101         bus_write_4(sc->res[0], SMMU_EVENTQ_IRQ_CFG1, 0);
 1102         bus_write_4(sc->res[0], SMMU_EVENTQ_IRQ_CFG2, 0);
 1103 
 1104         if (sc->features & CR0_PRIQEN) {
 1105                 bus_write_8(sc->res[0], SMMU_PRIQ_IRQ_CFG0, 0);
 1106                 bus_write_4(sc->res[0], SMMU_PRIQ_IRQ_CFG1, 0);
 1107                 bus_write_4(sc->res[0], SMMU_PRIQ_IRQ_CFG2, 0);
 1108         }
 1109 
 1110         /* Disable any interrupts. */
 1111         error = smmu_write_ack(sc, SMMU_IRQ_CTRL, SMMU_IRQ_CTRLACK, 0);
 1112         if (error) {
 1113                 device_printf(sc->dev, "Could not disable interrupts.\n");
 1114                 return (ENXIO);
 1115         }
 1116 
 1117         /* Enable interrupts. */
 1118         reg = IRQ_CTRL_EVENTQ_IRQEN | IRQ_CTRL_GERROR_IRQEN;
 1119         if (sc->features & SMMU_FEATURE_PRI)
 1120                 reg |= IRQ_CTRL_PRIQ_IRQEN;
 1121 
 1122         error = smmu_write_ack(sc, SMMU_IRQ_CTRL, SMMU_IRQ_CTRLACK, reg);
 1123         if (error) {
 1124                 device_printf(sc->dev, "Could not enable interrupts.\n");
 1125                 return (ENXIO);
 1126         }
 1127 
 1128         return (0);
 1129 }
 1130 
 1131 #if DEV_ACPI
 1132 static void
 1133 smmu_configure_intr(struct smmu_softc *sc, struct resource *res)
 1134 {
 1135         struct intr_map_data_acpi *ad;
 1136         struct intr_map_data *data;
 1137 
 1138         data = rman_get_virtual(res);
 1139         KASSERT(data != NULL, ("data is NULL"));
 1140 
 1141         if (data->type == INTR_MAP_DATA_ACPI) {
 1142                 ad = (struct intr_map_data_acpi *)data;
 1143                 ad->trig = INTR_TRIGGER_EDGE;
 1144                 ad->pol = INTR_POLARITY_HIGH;
 1145         }
 1146 }
 1147 #endif
 1148 
 1149 static int
 1150 smmu_setup_interrupts(struct smmu_softc *sc)
 1151 {
 1152         device_t dev;
 1153         int error;
 1154 
 1155         dev = sc->dev;
 1156 
 1157 #if DEV_ACPI
 1158         /*
 1159          * Configure SMMU interrupts as EDGE triggered manually
 1160          * as ACPI tables carries no information for that.
 1161          */
 1162         smmu_configure_intr(sc, sc->res[1]);
 1163         smmu_configure_intr(sc, sc->res[2]);
 1164         smmu_configure_intr(sc, sc->res[3]);
 1165 #endif
 1166 
 1167         error = bus_setup_intr(dev, sc->res[1], INTR_TYPE_MISC,
 1168             smmu_event_intr, NULL, sc, &sc->intr_cookie[0]);
 1169         if (error) {
 1170                 device_printf(dev, "Couldn't setup Event interrupt handler\n");
 1171                 return (ENXIO);
 1172         }
 1173 
 1174         error = bus_setup_intr(dev, sc->res[3], INTR_TYPE_MISC,
 1175             smmu_gerr_intr, NULL, sc, &sc->intr_cookie[2]);
 1176         if (error) {
 1177                 device_printf(dev, "Couldn't setup Gerr interrupt handler\n");
 1178                 return (ENXIO);
 1179         }
 1180 
 1181         return (0);
 1182 }
 1183 
 1184 static int
 1185 smmu_reset(struct smmu_softc *sc)
 1186 {
 1187         struct smmu_cmdq_entry cmd;
 1188         struct smmu_strtab *strtab;
 1189         int error;
 1190         int reg;
 1191 
 1192         reg = bus_read_4(sc->res[0], SMMU_CR0);
 1193 
 1194         if (reg & CR0_SMMUEN)
 1195                 device_printf(sc->dev,
 1196                     "%s: Warning: SMMU is enabled\n", __func__);
 1197 
 1198         error = smmu_disable(sc);
 1199         if (error)
 1200                 device_printf(sc->dev,
 1201                     "%s: Could not disable SMMU.\n", __func__);
 1202 
 1203         if (smmu_enable_interrupts(sc) != 0) {
 1204                 device_printf(sc->dev, "Could not enable interrupts.\n");
 1205                 return (ENXIO);
 1206         }
 1207 
 1208         reg = CR1_TABLE_SH_IS   |
 1209               CR1_TABLE_OC_WBC  |
 1210               CR1_TABLE_IC_WBC  |
 1211               CR1_QUEUE_SH_IS   |
 1212               CR1_QUEUE_OC_WBC  |
 1213               CR1_QUEUE_IC_WBC;
 1214         bus_write_4(sc->res[0], SMMU_CR1, reg);
 1215 
 1216         reg = CR2_PTM | CR2_RECINVSID | CR2_E2H;
 1217         bus_write_4(sc->res[0], SMMU_CR2, reg);
 1218 
 1219         /* Stream table. */
 1220         strtab = &sc->strtab;
 1221         bus_write_8(sc->res[0], SMMU_STRTAB_BASE, strtab->base);
 1222         bus_write_4(sc->res[0], SMMU_STRTAB_BASE_CFG, strtab->base_cfg);
 1223 
 1224         /* Command queue. */
 1225         bus_write_8(sc->res[0], SMMU_CMDQ_BASE, sc->cmdq.base);
 1226         bus_write_4(sc->res[0], SMMU_CMDQ_PROD, sc->cmdq.lc.prod);
 1227         bus_write_4(sc->res[0], SMMU_CMDQ_CONS, sc->cmdq.lc.cons);
 1228 
 1229         reg = CR0_CMDQEN;
 1230         error = smmu_write_ack(sc, SMMU_CR0, SMMU_CR0ACK, reg);
 1231         if (error) {
 1232                 device_printf(sc->dev, "Could not enable command queue\n");
 1233                 return (ENXIO);
 1234         }
 1235 
 1236         /* Invalidate cached configuration. */
 1237         smmu_invalidate_all_sid(sc);
 1238 
 1239         if (sc->features & SMMU_FEATURE_HYP) {
 1240                 cmd.opcode = CMD_TLBI_EL2_ALL;
 1241                 smmu_cmdq_enqueue_cmd(sc, &cmd);
 1242         };
 1243 
 1244         /* Invalidate TLB. */
 1245         smmu_tlbi_all(sc);
 1246 
 1247         /* Event queue */
 1248         bus_write_8(sc->res[0], SMMU_EVENTQ_BASE, sc->evtq.base);
 1249         bus_write_4(sc->res[0], SMMU_EVENTQ_PROD, sc->evtq.lc.prod);
 1250         bus_write_4(sc->res[0], SMMU_EVENTQ_CONS, sc->evtq.lc.cons);
 1251 
 1252         reg |= CR0_EVENTQEN;
 1253         error = smmu_write_ack(sc, SMMU_CR0, SMMU_CR0ACK, reg);
 1254         if (error) {
 1255                 device_printf(sc->dev, "Could not enable event queue\n");
 1256                 return (ENXIO);
 1257         }
 1258 
 1259         if (sc->features & SMMU_FEATURE_PRI) {
 1260                 /* PRI queue */
 1261                 bus_write_8(sc->res[0], SMMU_PRIQ_BASE, sc->priq.base);
 1262                 bus_write_4(sc->res[0], SMMU_PRIQ_PROD, sc->priq.lc.prod);
 1263                 bus_write_4(sc->res[0], SMMU_PRIQ_CONS, sc->priq.lc.cons);
 1264 
 1265                 reg |= CR0_PRIQEN;
 1266                 error = smmu_write_ack(sc, SMMU_CR0, SMMU_CR0ACK, reg);
 1267                 if (error) {
 1268                         device_printf(sc->dev, "Could not enable PRI queue\n");
 1269                         return (ENXIO);
 1270                 }
 1271         }
 1272 
 1273         if (sc->features & SMMU_FEATURE_ATS) {
 1274                 reg |= CR0_ATSCHK;
 1275                 error = smmu_write_ack(sc, SMMU_CR0, SMMU_CR0ACK, reg);
 1276                 if (error) {
 1277                         device_printf(sc->dev, "Could not enable ATS check.\n");
 1278                         return (ENXIO);
 1279                 }
 1280         }
 1281 
 1282         reg |= CR0_SMMUEN;
 1283         error = smmu_write_ack(sc, SMMU_CR0, SMMU_CR0ACK, reg);
 1284         if (error) {
 1285                 device_printf(sc->dev, "Could not enable SMMU.\n");
 1286                 return (ENXIO);
 1287         }
 1288 
 1289         return (0);
 1290 }
 1291 
 1292 static int
 1293 smmu_check_features(struct smmu_softc *sc)
 1294 {
 1295         uint32_t reg;
 1296         uint32_t val;
 1297 
 1298         sc->features = 0;
 1299 
 1300         reg = bus_read_4(sc->res[0], SMMU_IDR0);
 1301 
 1302         if (reg & IDR0_ST_LVL_2) {
 1303                 if (bootverbose)
 1304                         device_printf(sc->dev,
 1305                             "2-level stream table supported.\n");
 1306                 sc->features |= SMMU_FEATURE_2_LVL_STREAM_TABLE;
 1307         }
 1308 
 1309         if (reg & IDR0_CD2L) {
 1310                 if (bootverbose)
 1311                         device_printf(sc->dev,
 1312                             "2-level CD table supported.\n");
 1313                 sc->features |= SMMU_FEATURE_2_LVL_CD;
 1314         }
 1315 
 1316         switch (reg & IDR0_TTENDIAN_M) {
 1317         case IDR0_TTENDIAN_MIXED:
 1318                 if (bootverbose)
 1319                         device_printf(sc->dev, "Mixed endianess supported.\n");
 1320                 sc->features |= SMMU_FEATURE_TT_LE;
 1321                 sc->features |= SMMU_FEATURE_TT_BE;
 1322                 break;
 1323         case IDR0_TTENDIAN_LITTLE:
 1324                 if (bootverbose)
 1325                         device_printf(sc->dev,
 1326                             "Little endian supported only.\n");
 1327                 sc->features |= SMMU_FEATURE_TT_LE;
 1328                 break;
 1329         case IDR0_TTENDIAN_BIG:
 1330                 if (bootverbose)
 1331                         device_printf(sc->dev, "Big endian supported only.\n");
 1332                 sc->features |= SMMU_FEATURE_TT_BE;
 1333                 break;
 1334         default:
 1335                 device_printf(sc->dev, "Unsupported endianness.\n");
 1336                 return (ENXIO);
 1337         }
 1338 
 1339         if (reg & IDR0_SEV)
 1340                 sc->features |= SMMU_FEATURE_SEV;
 1341 
 1342         if (reg & IDR0_MSI) {
 1343                 if (bootverbose)
 1344                         device_printf(sc->dev, "MSI feature present.\n");
 1345                 sc->features |= SMMU_FEATURE_MSI;
 1346         }
 1347 
 1348         if (reg & IDR0_HYP) {
 1349                 if (bootverbose)
 1350                         device_printf(sc->dev, "HYP feature present.\n");
 1351                 sc->features |= SMMU_FEATURE_HYP;
 1352         }
 1353 
 1354         if (reg & IDR0_ATS)
 1355                 sc->features |= SMMU_FEATURE_ATS;
 1356 
 1357         if (reg & IDR0_PRI)
 1358                 sc->features |= SMMU_FEATURE_PRI;
 1359 
 1360         switch (reg & IDR0_STALL_MODEL_M) {
 1361         case IDR0_STALL_MODEL_FORCE:
 1362                 /* Stall is forced. */
 1363                 sc->features |= SMMU_FEATURE_STALL_FORCE;
 1364                 /* FALLTHROUGH */
 1365         case IDR0_STALL_MODEL_STALL:
 1366                 sc->features |= SMMU_FEATURE_STALL;
 1367                 break;
 1368         }
 1369 
 1370         /* Grab translation stages supported. */
 1371         if (reg & IDR0_S1P) {
 1372                 if (bootverbose)
 1373                         device_printf(sc->dev,
 1374                             "Stage 1 translation supported.\n");
 1375                 sc->features |= SMMU_FEATURE_S1P;
 1376         }
 1377         if (reg & IDR0_S2P) {
 1378                 if (bootverbose)
 1379                         device_printf(sc->dev,
 1380                             "Stage 2 translation supported.\n");
 1381                 sc->features |= SMMU_FEATURE_S2P;
 1382         }
 1383 
 1384         switch (reg & IDR0_TTF_M) {
 1385         case IDR0_TTF_ALL:
 1386         case IDR0_TTF_AA64:
 1387                 sc->ias = 40;
 1388                 break;
 1389         default:
 1390                 device_printf(sc->dev, "No AArch64 table format support.\n");
 1391                 return (ENXIO);
 1392         }
 1393 
 1394         if (reg & IDR0_ASID16)
 1395                 sc->asid_bits = 16;
 1396         else
 1397                 sc->asid_bits = 8;
 1398 
 1399         if (bootverbose)
 1400                 device_printf(sc->dev, "ASID bits %d\n", sc->asid_bits);
 1401 
 1402         if (reg & IDR0_VMID16)
 1403                 sc->vmid_bits = 16;
 1404         else
 1405                 sc->vmid_bits = 8;
 1406 
 1407         reg = bus_read_4(sc->res[0], SMMU_IDR1);
 1408 
 1409         if (reg & (IDR1_TABLES_PRESET | IDR1_QUEUES_PRESET | IDR1_REL)) {
 1410                 device_printf(sc->dev,
 1411                     "Embedded implementations not supported by this driver.\n");
 1412                 return (ENXIO);
 1413         }
 1414 
 1415         val = (reg & IDR1_CMDQS_M) >> IDR1_CMDQS_S;
 1416         sc->cmdq.size_log2 = val;
 1417         if (bootverbose)
 1418                 device_printf(sc->dev, "CMD queue bits %d\n", val);
 1419 
 1420         val = (reg & IDR1_EVENTQS_M) >> IDR1_EVENTQS_S;
 1421         sc->evtq.size_log2 = val;
 1422         if (bootverbose)
 1423                 device_printf(sc->dev, "EVENT queue bits %d\n", val);
 1424 
 1425         if (sc->features & SMMU_FEATURE_PRI) {
 1426                 val = (reg & IDR1_PRIQS_M) >> IDR1_PRIQS_S;
 1427                 sc->priq.size_log2 = val;
 1428                 if (bootverbose)
 1429                         device_printf(sc->dev, "PRI queue bits %d\n", val);
 1430         }
 1431 
 1432         sc->ssid_bits = (reg & IDR1_SSIDSIZE_M) >> IDR1_SSIDSIZE_S;
 1433         sc->sid_bits = (reg & IDR1_SIDSIZE_M) >> IDR1_SIDSIZE_S;
 1434 
 1435         if (sc->sid_bits <= STRTAB_SPLIT)
 1436                 sc->features &= ~SMMU_FEATURE_2_LVL_STREAM_TABLE;
 1437 
 1438         if (bootverbose) {
 1439                 device_printf(sc->dev, "SSID bits %d\n", sc->ssid_bits);
 1440                 device_printf(sc->dev, "SID bits %d\n", sc->sid_bits);
 1441         }
 1442 
 1443         /* IDR3 */
 1444         reg = bus_read_4(sc->res[0], SMMU_IDR3);
 1445         if (reg & IDR3_RIL)
 1446                 sc->features |= SMMU_FEATURE_RANGE_INV;
 1447 
 1448         /* IDR5 */
 1449         reg = bus_read_4(sc->res[0], SMMU_IDR5);
 1450 
 1451         switch (reg & IDR5_OAS_M) {
 1452         case IDR5_OAS_32:
 1453                 sc->oas = 32;
 1454                 break;
 1455         case IDR5_OAS_36:
 1456                 sc->oas = 36;
 1457                 break;
 1458         case IDR5_OAS_40:
 1459                 sc->oas = 40;
 1460                 break;
 1461         case IDR5_OAS_42:
 1462                 sc->oas = 42;
 1463                 break;
 1464         case IDR5_OAS_44:
 1465                 sc->oas = 44;
 1466                 break;
 1467         case IDR5_OAS_48:
 1468                 sc->oas = 48;
 1469                 break;
 1470         case IDR5_OAS_52:
 1471                 sc->oas = 52;
 1472                 break;
 1473         }
 1474 
 1475         sc->pgsizes = 0;
 1476         if (reg & IDR5_GRAN64K)
 1477                 sc->pgsizes |= 64 * 1024;
 1478         if (reg & IDR5_GRAN16K)
 1479                 sc->pgsizes |= 16 * 1024;
 1480         if (reg & IDR5_GRAN4K)
 1481                 sc->pgsizes |= 4 * 1024;
 1482 
 1483         if ((reg & IDR5_VAX_M) == IDR5_VAX_52)
 1484                 sc->features |= SMMU_FEATURE_VAX;
 1485 
 1486         return (0);
 1487 }
 1488 
 1489 static void
 1490 smmu_init_asids(struct smmu_softc *sc)
 1491 {
 1492 
 1493         sc->asid_set_size = (1 << sc->asid_bits);
 1494         sc->asid_set = bit_alloc(sc->asid_set_size, M_SMMU, M_WAITOK);
 1495         mtx_init(&sc->asid_set_mutex, "asid set", NULL, MTX_SPIN);
 1496 }
 1497 
 1498 static int
 1499 smmu_asid_alloc(struct smmu_softc *sc, int *new_asid)
 1500 {
 1501 
 1502         mtx_lock_spin(&sc->asid_set_mutex);
 1503         bit_ffc(sc->asid_set, sc->asid_set_size, new_asid);
 1504         if (*new_asid == -1) {
 1505                 mtx_unlock_spin(&sc->asid_set_mutex);
 1506                 return (ENOMEM);
 1507         }
 1508         bit_set(sc->asid_set, *new_asid);
 1509         mtx_unlock_spin(&sc->asid_set_mutex);
 1510 
 1511         return (0);
 1512 }
 1513 
 1514 static void
 1515 smmu_asid_free(struct smmu_softc *sc, int asid)
 1516 {
 1517 
 1518         mtx_lock_spin(&sc->asid_set_mutex);
 1519         bit_clear(sc->asid_set, asid);
 1520         mtx_unlock_spin(&sc->asid_set_mutex);
 1521 }
 1522 
 1523 /*
 1524  * Device interface.
 1525  */
 1526 int
 1527 smmu_attach(device_t dev)
 1528 {
 1529         struct smmu_softc *sc;
 1530         int error;
 1531 
 1532         sc = device_get_softc(dev);
 1533         sc->dev = dev;
 1534 
 1535         mtx_init(&sc->sc_mtx, device_get_nameunit(sc->dev), "smmu", MTX_DEF);
 1536 
 1537         error = bus_alloc_resources(dev, smmu_spec, sc->res);
 1538         if (error) {
 1539                 device_printf(dev, "Couldn't allocate resources.\n");
 1540                 return (ENXIO);
 1541         }
 1542 
 1543         error = smmu_setup_interrupts(sc);
 1544         if (error) {
 1545                 bus_release_resources(dev, smmu_spec, sc->res);
 1546                 return (ENXIO);
 1547         }
 1548 
 1549         error = smmu_check_features(sc);
 1550         if (error) {
 1551                 device_printf(dev, "Some features are required "
 1552                     "but not supported by hardware.\n");
 1553                 return (ENXIO);
 1554         }
 1555 
 1556         smmu_init_asids(sc);
 1557 
 1558         error = smmu_init_queues(sc);
 1559         if (error) {
 1560                 device_printf(dev, "Couldn't allocate queues.\n");
 1561                 return (ENXIO);
 1562         }
 1563 
 1564         error = smmu_init_strtab(sc);
 1565         if (error) {
 1566                 device_printf(dev, "Couldn't allocate strtab.\n");
 1567                 return (ENXIO);
 1568         }
 1569 
 1570         error = smmu_reset(sc);
 1571         if (error) {
 1572                 device_printf(dev, "Couldn't reset SMMU.\n");
 1573                 return (ENXIO);
 1574         }
 1575 
 1576         return (0);
 1577 }
 1578 
 1579 int
 1580 smmu_detach(device_t dev)
 1581 {
 1582         struct smmu_softc *sc;
 1583 
 1584         sc = device_get_softc(dev);
 1585 
 1586         bus_release_resources(dev, smmu_spec, sc->res);
 1587 
 1588         return (0);
 1589 }
 1590 
 1591 static int
 1592 smmu_read_ivar(device_t dev, device_t child, int which, uintptr_t *result)
 1593 {
 1594         struct smmu_softc *sc;
 1595 
 1596         sc = device_get_softc(dev);
 1597 
 1598         device_printf(sc->dev, "%s\n", __func__);
 1599 
 1600         return (ENOENT);
 1601 }
 1602 
 1603 static int
 1604 smmu_unmap(device_t dev, struct iommu_domain *iodom,
 1605     vm_offset_t va, bus_size_t size)
 1606 {
 1607         struct smmu_domain *domain;
 1608         struct smmu_softc *sc;
 1609         int err;
 1610         int i;
 1611 
 1612         sc = device_get_softc(dev);
 1613 
 1614         domain = (struct smmu_domain *)iodom;
 1615 
 1616         err = 0;
 1617 
 1618         dprintf("%s: %lx, %ld, domain %d\n", __func__, va, size, domain->asid);
 1619 
 1620         for (i = 0; i < size; i += PAGE_SIZE) {
 1621                 if (pmap_sremove(&domain->p, va) == 0) {
 1622                         /* pmap entry removed, invalidate TLB. */
 1623                         smmu_tlbi_va(sc, va, domain->asid);
 1624                 } else {
 1625                         err = ENOENT;
 1626                         break;
 1627                 }
 1628                 va += PAGE_SIZE;
 1629         }
 1630 
 1631         smmu_sync(sc);
 1632 
 1633         return (err);
 1634 }
 1635 
 1636 static int
 1637 smmu_map(device_t dev, struct iommu_domain *iodom,
 1638     vm_offset_t va, vm_page_t *ma, vm_size_t size,
 1639     vm_prot_t prot)
 1640 {
 1641         struct smmu_domain *domain;
 1642         struct smmu_softc *sc;
 1643         vm_paddr_t pa;
 1644         int error;
 1645         int i;
 1646 
 1647         sc = device_get_softc(dev);
 1648 
 1649         domain = (struct smmu_domain *)iodom;
 1650 
 1651         dprintf("%s: %lx -> %lx, %ld, domain %d\n", __func__, va, pa, size,
 1652             domain->asid);
 1653 
 1654         for (i = 0; size > 0; size -= PAGE_SIZE) {
 1655                 pa = VM_PAGE_TO_PHYS(ma[i++]);
 1656                 error = pmap_senter(&domain->p, va, pa, prot, 0);
 1657                 if (error)
 1658                         return (error);
 1659                 smmu_tlbi_va(sc, va, domain->asid);
 1660                 va += PAGE_SIZE;
 1661         }
 1662 
 1663         smmu_sync(sc);
 1664 
 1665         return (0);
 1666 }
 1667 
 1668 static struct iommu_domain *
 1669 smmu_domain_alloc(device_t dev, struct iommu_unit *iommu)
 1670 {
 1671         struct smmu_domain *domain;
 1672         struct smmu_unit *unit;
 1673         struct smmu_softc *sc;
 1674         int error;
 1675         int new_asid;
 1676 
 1677         sc = device_get_softc(dev);
 1678 
 1679         unit = (struct smmu_unit *)iommu;
 1680 
 1681         domain = malloc(sizeof(*domain), M_SMMU, M_WAITOK | M_ZERO);
 1682 
 1683         error = smmu_asid_alloc(sc, &new_asid);
 1684         if (error) {
 1685                 free(domain, M_SMMU);
 1686                 device_printf(sc->dev,
 1687                     "Could not allocate ASID for a new domain.\n");
 1688                 return (NULL);
 1689         }
 1690 
 1691         domain->asid = (uint16_t)new_asid;
 1692 
 1693         pmap_pinit(&domain->p);
 1694         PMAP_LOCK_INIT(&domain->p);
 1695 
 1696         error = smmu_init_cd(sc, domain);
 1697         if (error) {
 1698                 free(domain, M_SMMU);
 1699                 device_printf(sc->dev, "Could not initialize CD\n");
 1700                 return (NULL);
 1701         }
 1702 
 1703         smmu_tlbi_asid(sc, domain->asid);
 1704 
 1705         LIST_INIT(&domain->ctx_list);
 1706 
 1707         IOMMU_LOCK(iommu);
 1708         LIST_INSERT_HEAD(&unit->domain_list, domain, next);
 1709         IOMMU_UNLOCK(iommu);
 1710 
 1711         return (&domain->iodom);
 1712 }
 1713 
 1714 static void
 1715 smmu_domain_free(device_t dev, struct iommu_domain *iodom)
 1716 {
 1717         struct smmu_domain *domain;
 1718         struct smmu_softc *sc;
 1719         struct smmu_cd *cd;
 1720 
 1721         sc = device_get_softc(dev);
 1722 
 1723         domain = (struct smmu_domain *)iodom;
 1724 
 1725         LIST_REMOVE(domain, next);
 1726 
 1727         cd = domain->cd;
 1728 
 1729         pmap_sremove_pages(&domain->p);
 1730         pmap_release(&domain->p);
 1731 
 1732         smmu_tlbi_asid(sc, domain->asid);
 1733         smmu_asid_free(sc, domain->asid);
 1734 
 1735         contigfree(cd->vaddr, cd->size, M_SMMU);
 1736         free(cd, M_SMMU);
 1737 
 1738         free(domain, M_SMMU);
 1739 }
 1740 
 1741 static int
 1742 smmu_set_buswide(device_t dev, struct smmu_domain *domain,
 1743     struct smmu_ctx *ctx)
 1744 {
 1745         struct smmu_softc *sc;
 1746         int i;
 1747 
 1748         sc = device_get_softc(dev);
 1749 
 1750         for (i = 0; i < PCI_SLOTMAX; i++)
 1751                 smmu_init_ste(sc, domain->cd, (ctx->sid | i), ctx->bypass);
 1752 
 1753         return (0);
 1754 }
 1755 
 1756 static struct iommu_ctx *
 1757 smmu_ctx_alloc(device_t dev, struct iommu_domain *iodom, device_t child,
 1758     bool disabled)
 1759 {
 1760         struct smmu_domain *domain;
 1761         struct smmu_softc *sc;
 1762         struct smmu_ctx *ctx;
 1763         uint16_t rid;
 1764         u_int xref, sid;
 1765         int seg;
 1766         int err;
 1767 
 1768         sc = device_get_softc(dev);
 1769         domain = (struct smmu_domain *)iodom;
 1770 
 1771         seg = pci_get_domain(child);
 1772         rid = pci_get_rid(child);
 1773         err = acpi_iort_map_pci_smmuv3(seg, rid, &xref, &sid);
 1774         if (err)
 1775                 return (NULL);
 1776 
 1777         if (sc->features & SMMU_FEATURE_2_LVL_STREAM_TABLE) {
 1778                 err = smmu_init_l1_entry(sc, sid);
 1779                 if (err)
 1780                         return (NULL);
 1781         }
 1782 
 1783         ctx = malloc(sizeof(struct smmu_ctx), M_SMMU, M_WAITOK | M_ZERO);
 1784         ctx->vendor = pci_get_vendor(child);
 1785         ctx->device = pci_get_device(child);
 1786         ctx->dev = child;
 1787         ctx->sid = sid;
 1788         ctx->domain = domain;
 1789         if (disabled)
 1790                 ctx->bypass = true;
 1791 
 1792         /*
 1793          * Neoverse N1 SDP:
 1794          * 0x800 xhci
 1795          * 0x700 re
 1796          * 0x600 sata
 1797          */
 1798 
 1799         smmu_init_ste(sc, domain->cd, ctx->sid, ctx->bypass);
 1800 
 1801         if (iommu_is_buswide_ctx(iodom->iommu, pci_get_bus(ctx->dev)))
 1802                 smmu_set_buswide(dev, domain, ctx);
 1803 
 1804         IOMMU_DOMAIN_LOCK(iodom);
 1805         LIST_INSERT_HEAD(&domain->ctx_list, ctx, next);
 1806         IOMMU_DOMAIN_UNLOCK(iodom);
 1807 
 1808         return (&ctx->ioctx);
 1809 }
 1810 
 1811 static void
 1812 smmu_ctx_free(device_t dev, struct iommu_ctx *ioctx)
 1813 {
 1814         struct smmu_softc *sc;
 1815         struct smmu_ctx *ctx;
 1816 
 1817         IOMMU_ASSERT_LOCKED(ioctx->domain->iommu);
 1818 
 1819         sc = device_get_softc(dev);
 1820         ctx = (struct smmu_ctx *)ioctx;
 1821 
 1822         smmu_deinit_l1_entry(sc, ctx->sid);
 1823 
 1824         LIST_REMOVE(ctx, next);
 1825 
 1826         free(ctx, M_SMMU);
 1827 }
 1828 
 1829 struct smmu_ctx *
 1830 smmu_ctx_lookup_by_sid(device_t dev, u_int sid)
 1831 {
 1832         struct smmu_softc *sc;
 1833         struct smmu_domain *domain;
 1834         struct smmu_unit *unit;
 1835         struct smmu_ctx *ctx;
 1836 
 1837         sc = device_get_softc(dev);
 1838 
 1839         unit = &sc->unit;
 1840 
 1841         LIST_FOREACH(domain, &unit->domain_list, next) {
 1842                 LIST_FOREACH(ctx, &domain->ctx_list, next) {
 1843                         if (ctx->sid == sid)
 1844                                 return (ctx);
 1845                 }
 1846         }
 1847 
 1848         return (NULL);
 1849 }
 1850 
 1851 static struct iommu_ctx *
 1852 smmu_ctx_lookup(device_t dev, device_t child)
 1853 {
 1854         struct iommu_unit *iommu;
 1855         struct smmu_softc *sc;
 1856         struct smmu_domain *domain;
 1857         struct smmu_unit *unit;
 1858         struct smmu_ctx *ctx;
 1859 
 1860         sc = device_get_softc(dev);
 1861 
 1862         unit = &sc->unit;
 1863         iommu = &unit->iommu;
 1864 
 1865         IOMMU_ASSERT_LOCKED(iommu);
 1866 
 1867         LIST_FOREACH(domain, &unit->domain_list, next) {
 1868                 IOMMU_DOMAIN_LOCK(&domain->iodom);
 1869                 LIST_FOREACH(ctx, &domain->ctx_list, next) {
 1870                         if (ctx->dev == child) {
 1871                                 IOMMU_DOMAIN_UNLOCK(&domain->iodom);
 1872                                 return (&ctx->ioctx);
 1873                         }
 1874                 }
 1875                 IOMMU_DOMAIN_UNLOCK(&domain->iodom);
 1876         }
 1877 
 1878         return (NULL);
 1879 }
 1880 
 1881 static int
 1882 smmu_find(device_t dev, device_t child)
 1883 {
 1884         struct smmu_softc *sc;
 1885         u_int xref, sid;
 1886         uint16_t rid;
 1887         int error;
 1888         int seg;
 1889 
 1890         sc = device_get_softc(dev);
 1891 
 1892         rid = pci_get_rid(child);
 1893         seg = pci_get_domain(child);
 1894 
 1895         /*
 1896          * Find an xref of an IOMMU controller that serves traffic for dev.
 1897          */
 1898 #ifdef DEV_ACPI
 1899         error = acpi_iort_map_pci_smmuv3(seg, rid, &xref, &sid);
 1900         if (error) {
 1901                 /* Could not find reference to an SMMU device. */
 1902                 return (ENOENT);
 1903         }
 1904 #else
 1905         /* TODO: add FDT support. */
 1906         return (ENXIO);
 1907 #endif
 1908 
 1909         /* Check if xref is ours. */
 1910         if (xref != sc->xref)
 1911                 return (EFAULT);
 1912 
 1913         return (0);
 1914 }
 1915 
 1916 static device_method_t smmu_methods[] = {
 1917         /* Device interface */
 1918         DEVMETHOD(device_detach,        smmu_detach),
 1919 
 1920         /* SMMU interface */
 1921         DEVMETHOD(iommu_find,           smmu_find),
 1922         DEVMETHOD(iommu_map,            smmu_map),
 1923         DEVMETHOD(iommu_unmap,          smmu_unmap),
 1924         DEVMETHOD(iommu_domain_alloc,   smmu_domain_alloc),
 1925         DEVMETHOD(iommu_domain_free,    smmu_domain_free),
 1926         DEVMETHOD(iommu_ctx_alloc,      smmu_ctx_alloc),
 1927         DEVMETHOD(iommu_ctx_free,       smmu_ctx_free),
 1928         DEVMETHOD(iommu_ctx_lookup,     smmu_ctx_lookup),
 1929 
 1930         /* Bus interface */
 1931         DEVMETHOD(bus_read_ivar,        smmu_read_ivar),
 1932 
 1933         /* End */
 1934         DEVMETHOD_END
 1935 };
 1936 
 1937 DEFINE_CLASS_0(smmu, smmu_driver, smmu_methods, sizeof(struct smmu_softc));

Cache object: 69020c1d2e6da84729ce9533855a8f08


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.