The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/x86/iommu/intel_drv.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
    3  *
    4  * Copyright (c) 2013-2015 The FreeBSD Foundation
    5  *
    6  * This software was developed by Konstantin Belousov <kib@FreeBSD.org>
    7  * under sponsorship from the FreeBSD Foundation.
    8  *
    9  * Redistribution and use in source and binary forms, with or without
   10  * modification, are permitted provided that the following conditions
   11  * are met:
   12  * 1. Redistributions of source code must retain the above copyright
   13  *    notice, this list of conditions and the following disclaimer.
   14  * 2. Redistributions in binary form must reproduce the above copyright
   15  *    notice, this list of conditions and the following disclaimer in the
   16  *    documentation and/or other materials provided with the distribution.
   17  *
   18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   21  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   28  * SUCH DAMAGE.
   29  */
   30 
   31 #include <sys/cdefs.h>
   32 __FBSDID("$FreeBSD$");
   33 
   34 #include "opt_acpi.h"
   35 #if defined(__amd64__)
   36 #define DEV_APIC
   37 #else
   38 #include "opt_apic.h"
   39 #endif
   40 #include "opt_ddb.h"
   41 
   42 #include <sys/param.h>
   43 #include <sys/bus.h>
   44 #include <sys/kernel.h>
   45 #include <sys/lock.h>
   46 #include <sys/malloc.h>
   47 #include <sys/memdesc.h>
   48 #include <sys/module.h>
   49 #include <sys/mutex.h>
   50 #include <sys/rman.h>
   51 #include <sys/rwlock.h>
   52 #include <sys/smp.h>
   53 #include <sys/taskqueue.h>
   54 #include <sys/tree.h>
   55 #include <sys/vmem.h>
   56 #include <vm/vm.h>
   57 #include <vm/vm_extern.h>
   58 #include <vm/vm_kern.h>
   59 #include <vm/vm_object.h>
   60 #include <vm/vm_page.h>
   61 #include <vm/vm_pager.h>
   62 #include <vm/vm_map.h>
   63 #include <contrib/dev/acpica/include/acpi.h>
   64 #include <contrib/dev/acpica/include/accommon.h>
   65 #include <dev/acpica/acpivar.h>
   66 #include <dev/pci/pcireg.h>
   67 #include <dev/pci/pcivar.h>
   68 #include <machine/bus.h>
   69 #include <machine/pci_cfgreg.h>
   70 #include <x86/include/busdma_impl.h>
   71 #include <dev/iommu/busdma_iommu.h>
   72 #include <x86/iommu/intel_reg.h>
   73 #include <x86/iommu/intel_dmar.h>
   74 
   75 #ifdef DEV_APIC
   76 #include "pcib_if.h"
   77 #include <machine/intr_machdep.h>
   78 #include <x86/apicreg.h>
   79 #include <x86/apicvar.h>
   80 #endif
   81 
   82 #define DMAR_FAULT_IRQ_RID      0
   83 #define DMAR_QI_IRQ_RID         1
   84 #define DMAR_REG_RID            2
   85 
   86 static devclass_t dmar_devclass;
   87 static device_t *dmar_devs;
   88 static int dmar_devcnt;
   89 
   90 typedef int (*dmar_iter_t)(ACPI_DMAR_HEADER *, void *);
   91 
   92 static void
   93 dmar_iterate_tbl(dmar_iter_t iter, void *arg)
   94 {
   95         ACPI_TABLE_DMAR *dmartbl;
   96         ACPI_DMAR_HEADER *dmarh;
   97         char *ptr, *ptrend;
   98         ACPI_STATUS status;
   99 
  100         status = AcpiGetTable(ACPI_SIG_DMAR, 1, (ACPI_TABLE_HEADER **)&dmartbl);
  101         if (ACPI_FAILURE(status))
  102                 return;
  103         ptr = (char *)dmartbl + sizeof(*dmartbl);
  104         ptrend = (char *)dmartbl + dmartbl->Header.Length;
  105         for (;;) {
  106                 if (ptr >= ptrend)
  107                         break;
  108                 dmarh = (ACPI_DMAR_HEADER *)ptr;
  109                 if (dmarh->Length <= 0) {
  110                         printf("dmar_identify: corrupted DMAR table, l %d\n",
  111                             dmarh->Length);
  112                         break;
  113                 }
  114                 ptr += dmarh->Length;
  115                 if (!iter(dmarh, arg))
  116                         break;
  117         }
  118         AcpiPutTable((ACPI_TABLE_HEADER *)dmartbl);
  119 }
  120 
  121 struct find_iter_args {
  122         int i;
  123         ACPI_DMAR_HARDWARE_UNIT *res;
  124 };
  125 
  126 static int
  127 dmar_find_iter(ACPI_DMAR_HEADER *dmarh, void *arg)
  128 {
  129         struct find_iter_args *fia;
  130 
  131         if (dmarh->Type != ACPI_DMAR_TYPE_HARDWARE_UNIT)
  132                 return (1);
  133 
  134         fia = arg;
  135         if (fia->i == 0) {
  136                 fia->res = (ACPI_DMAR_HARDWARE_UNIT *)dmarh;
  137                 return (0);
  138         }
  139         fia->i--;
  140         return (1);
  141 }
  142 
  143 static ACPI_DMAR_HARDWARE_UNIT *
  144 dmar_find_by_index(int idx)
  145 {
  146         struct find_iter_args fia;
  147 
  148         fia.i = idx;
  149         fia.res = NULL;
  150         dmar_iterate_tbl(dmar_find_iter, &fia);
  151         return (fia.res);
  152 }
  153 
  154 static int
  155 dmar_count_iter(ACPI_DMAR_HEADER *dmarh, void *arg)
  156 {
  157 
  158         if (dmarh->Type == ACPI_DMAR_TYPE_HARDWARE_UNIT)
  159                 dmar_devcnt++;
  160         return (1);
  161 }
  162 
  163 static int dmar_enable = 0;
  164 static void
  165 dmar_identify(driver_t *driver, device_t parent)
  166 {
  167         ACPI_TABLE_DMAR *dmartbl;
  168         ACPI_DMAR_HARDWARE_UNIT *dmarh;
  169         ACPI_STATUS status;
  170         int i, error;
  171 
  172         if (acpi_disabled("dmar"))
  173                 return;
  174         TUNABLE_INT_FETCH("hw.dmar.enable", &dmar_enable);
  175         if (!dmar_enable)
  176                 return;
  177         status = AcpiGetTable(ACPI_SIG_DMAR, 1, (ACPI_TABLE_HEADER **)&dmartbl);
  178         if (ACPI_FAILURE(status))
  179                 return;
  180         haw = dmartbl->Width + 1;
  181         if ((1ULL << (haw + 1)) > BUS_SPACE_MAXADDR)
  182                 dmar_high = BUS_SPACE_MAXADDR;
  183         else
  184                 dmar_high = 1ULL << (haw + 1);
  185         if (bootverbose) {
  186                 printf("DMAR HAW=%d flags=<%b>\n", dmartbl->Width,
  187                     (unsigned)dmartbl->Flags,
  188                     "\020\001INTR_REMAP\002X2APIC_OPT_OUT");
  189         }
  190         AcpiPutTable((ACPI_TABLE_HEADER *)dmartbl);
  191 
  192         dmar_iterate_tbl(dmar_count_iter, NULL);
  193         if (dmar_devcnt == 0)
  194                 return;
  195         dmar_devs = malloc(sizeof(device_t) * dmar_devcnt, M_DEVBUF,
  196             M_WAITOK | M_ZERO);
  197         for (i = 0; i < dmar_devcnt; i++) {
  198                 dmarh = dmar_find_by_index(i);
  199                 if (dmarh == NULL) {
  200                         printf("dmar_identify: cannot find HWUNIT %d\n", i);
  201                         continue;
  202                 }
  203                 dmar_devs[i] = BUS_ADD_CHILD(parent, 1, "dmar", i);
  204                 if (dmar_devs[i] == NULL) {
  205                         printf("dmar_identify: cannot create instance %d\n", i);
  206                         continue;
  207                 }
  208                 error = bus_set_resource(dmar_devs[i], SYS_RES_MEMORY,
  209                     DMAR_REG_RID, dmarh->Address, PAGE_SIZE);
  210                 if (error != 0) {
  211                         printf(
  212         "dmar%d: unable to alloc register window at 0x%08jx: error %d\n",
  213                             i, (uintmax_t)dmarh->Address, error);
  214                         device_delete_child(parent, dmar_devs[i]);
  215                         dmar_devs[i] = NULL;
  216                 }
  217         }
  218 }
  219 
  220 static int
  221 dmar_probe(device_t dev)
  222 {
  223 
  224         if (acpi_get_handle(dev) != NULL)
  225                 return (ENXIO);
  226         device_set_desc(dev, "DMA remap");
  227         return (BUS_PROBE_NOWILDCARD);
  228 }
  229 
  230 static void
  231 dmar_release_intr(device_t dev, struct dmar_unit *unit, int idx)
  232 {
  233         struct dmar_msi_data *dmd;
  234 
  235         dmd = &unit->intrs[idx];
  236         if (dmd->irq == -1)
  237                 return;
  238         bus_teardown_intr(dev, dmd->irq_res, dmd->intr_handle);
  239         bus_release_resource(dev, SYS_RES_IRQ, dmd->irq_rid, dmd->irq_res);
  240         bus_delete_resource(dev, SYS_RES_IRQ, dmd->irq_rid);
  241         PCIB_RELEASE_MSIX(device_get_parent(device_get_parent(dev)),
  242             dev, dmd->irq);
  243         dmd->irq = -1;
  244 }
  245 
  246 static void
  247 dmar_release_resources(device_t dev, struct dmar_unit *unit)
  248 {
  249         int i;
  250 
  251         iommu_fini_busdma(&unit->iommu);
  252         dmar_fini_irt(unit);
  253         dmar_fini_qi(unit);
  254         dmar_fini_fault_log(unit);
  255         for (i = 0; i < DMAR_INTR_TOTAL; i++)
  256                 dmar_release_intr(dev, unit, i);
  257         if (unit->regs != NULL) {
  258                 bus_deactivate_resource(dev, SYS_RES_MEMORY, unit->reg_rid,
  259                     unit->regs);
  260                 bus_release_resource(dev, SYS_RES_MEMORY, unit->reg_rid,
  261                     unit->regs);
  262                 unit->regs = NULL;
  263         }
  264         if (unit->domids != NULL) {
  265                 delete_unrhdr(unit->domids);
  266                 unit->domids = NULL;
  267         }
  268         if (unit->ctx_obj != NULL) {
  269                 vm_object_deallocate(unit->ctx_obj);
  270                 unit->ctx_obj = NULL;
  271         }
  272 }
  273 
  274 static int
  275 dmar_alloc_irq(device_t dev, struct dmar_unit *unit, int idx)
  276 {
  277         device_t pcib;
  278         struct dmar_msi_data *dmd;
  279         uint64_t msi_addr;
  280         uint32_t msi_data;
  281         int error;
  282 
  283         dmd = &unit->intrs[idx];
  284         pcib = device_get_parent(device_get_parent(dev)); /* Really not pcib */
  285         error = PCIB_ALLOC_MSIX(pcib, dev, &dmd->irq);
  286         if (error != 0) {
  287                 device_printf(dev, "cannot allocate %s interrupt, %d\n",
  288                     dmd->name, error);
  289                 goto err1;
  290         }
  291         error = bus_set_resource(dev, SYS_RES_IRQ, dmd->irq_rid,
  292             dmd->irq, 1);
  293         if (error != 0) {
  294                 device_printf(dev, "cannot set %s interrupt resource, %d\n",
  295                     dmd->name, error);
  296                 goto err2;
  297         }
  298         dmd->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
  299             &dmd->irq_rid, RF_ACTIVE);
  300         if (dmd->irq_res == NULL) {
  301                 device_printf(dev,
  302                     "cannot allocate resource for %s interrupt\n", dmd->name);
  303                 error = ENXIO;
  304                 goto err3;
  305         }
  306         error = bus_setup_intr(dev, dmd->irq_res, INTR_TYPE_MISC,
  307             dmd->handler, NULL, unit, &dmd->intr_handle);
  308         if (error != 0) {
  309                 device_printf(dev, "cannot setup %s interrupt, %d\n",
  310                     dmd->name, error);
  311                 goto err4;
  312         }
  313         bus_describe_intr(dev, dmd->irq_res, dmd->intr_handle, "%s", dmd->name);
  314         error = PCIB_MAP_MSI(pcib, dev, dmd->irq, &msi_addr, &msi_data);
  315         if (error != 0) {
  316                 device_printf(dev, "cannot map %s interrupt, %d\n",
  317                     dmd->name, error);
  318                 goto err5;
  319         }
  320         dmar_write4(unit, dmd->msi_data_reg, msi_data);
  321         dmar_write4(unit, dmd->msi_addr_reg, msi_addr);
  322         /* Only for xAPIC mode */
  323         dmar_write4(unit, dmd->msi_uaddr_reg, msi_addr >> 32);
  324         return (0);
  325 
  326 err5:
  327         bus_teardown_intr(dev, dmd->irq_res, dmd->intr_handle);
  328 err4:
  329         bus_release_resource(dev, SYS_RES_IRQ, dmd->irq_rid, dmd->irq_res);
  330 err3:
  331         bus_delete_resource(dev, SYS_RES_IRQ, dmd->irq_rid);
  332 err2:
  333         PCIB_RELEASE_MSIX(pcib, dev, dmd->irq);
  334         dmd->irq = -1;
  335 err1:
  336         return (error);
  337 }
  338 
  339 #ifdef DEV_APIC
  340 static int
  341 dmar_remap_intr(device_t dev, device_t child, u_int irq)
  342 {
  343         struct dmar_unit *unit;
  344         struct dmar_msi_data *dmd;
  345         uint64_t msi_addr;
  346         uint32_t msi_data;
  347         int i, error;
  348 
  349         unit = device_get_softc(dev);
  350         for (i = 0; i < DMAR_INTR_TOTAL; i++) {
  351                 dmd = &unit->intrs[i];
  352                 if (irq == dmd->irq) {
  353                         error = PCIB_MAP_MSI(device_get_parent(
  354                             device_get_parent(dev)),
  355                             dev, irq, &msi_addr, &msi_data);
  356                         if (error != 0)
  357                                 return (error);
  358                         DMAR_LOCK(unit);
  359                         (dmd->disable_intr)(unit);
  360                         dmar_write4(unit, dmd->msi_data_reg, msi_data);
  361                         dmar_write4(unit, dmd->msi_addr_reg, msi_addr);
  362                         dmar_write4(unit, dmd->msi_uaddr_reg, msi_addr >> 32);
  363                         (dmd->enable_intr)(unit);
  364                         DMAR_UNLOCK(unit);
  365                         return (0);
  366                 }
  367         }
  368         return (ENOENT);
  369 }
  370 #endif
  371 
  372 static void
  373 dmar_print_caps(device_t dev, struct dmar_unit *unit,
  374     ACPI_DMAR_HARDWARE_UNIT *dmaru)
  375 {
  376         uint32_t caphi, ecaphi;
  377 
  378         device_printf(dev, "regs@0x%08jx, ver=%d.%d, seg=%d, flags=<%b>\n",
  379             (uintmax_t)dmaru->Address, DMAR_MAJOR_VER(unit->hw_ver),
  380             DMAR_MINOR_VER(unit->hw_ver), dmaru->Segment,
  381             dmaru->Flags, "\020\001INCLUDE_ALL_PCI");
  382         caphi = unit->hw_cap >> 32;
  383         device_printf(dev, "cap=%b,", (u_int)unit->hw_cap,
  384             "\020\004AFL\005WBF\006PLMR\007PHMR\010CM\027ZLR\030ISOCH");
  385         printf("%b, ", caphi, "\020\010PSI\027DWD\030DRD\031FL1GP\034PSI");
  386         printf("ndoms=%d, sagaw=%d, mgaw=%d, fro=%d, nfr=%d, superp=%d",
  387             DMAR_CAP_ND(unit->hw_cap), DMAR_CAP_SAGAW(unit->hw_cap),
  388             DMAR_CAP_MGAW(unit->hw_cap), DMAR_CAP_FRO(unit->hw_cap),
  389             DMAR_CAP_NFR(unit->hw_cap), DMAR_CAP_SPS(unit->hw_cap));
  390         if ((unit->hw_cap & DMAR_CAP_PSI) != 0)
  391                 printf(", mamv=%d", DMAR_CAP_MAMV(unit->hw_cap));
  392         printf("\n");
  393         ecaphi = unit->hw_ecap >> 32;
  394         device_printf(dev, "ecap=%b,", (u_int)unit->hw_ecap,
  395             "\020\001C\002QI\003DI\004IR\005EIM\007PT\010SC\031ECS\032MTS"
  396             "\033NEST\034DIS\035PASID\036PRS\037ERS\040SRS");
  397         printf("%b, ", ecaphi, "\020\002NWFS\003EAFS");
  398         printf("mhmw=%d, iro=%d\n", DMAR_ECAP_MHMV(unit->hw_ecap),
  399             DMAR_ECAP_IRO(unit->hw_ecap));
  400 }
  401 
  402 static int
  403 dmar_attach(device_t dev)
  404 {
  405         struct dmar_unit *unit;
  406         ACPI_DMAR_HARDWARE_UNIT *dmaru;
  407         uint64_t timeout;
  408         int i, error;
  409 
  410         unit = device_get_softc(dev);
  411         unit->dev = dev;
  412         unit->iommu.unit = device_get_unit(dev);
  413         unit->iommu.dev = dev;
  414         dmaru = dmar_find_by_index(unit->iommu.unit);
  415         if (dmaru == NULL)
  416                 return (EINVAL);
  417         unit->segment = dmaru->Segment;
  418         unit->base = dmaru->Address;
  419         unit->reg_rid = DMAR_REG_RID;
  420         unit->regs = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
  421             &unit->reg_rid, RF_ACTIVE);
  422         if (unit->regs == NULL) {
  423                 device_printf(dev, "cannot allocate register window\n");
  424                 return (ENOMEM);
  425         }
  426         unit->hw_ver = dmar_read4(unit, DMAR_VER_REG);
  427         unit->hw_cap = dmar_read8(unit, DMAR_CAP_REG);
  428         unit->hw_ecap = dmar_read8(unit, DMAR_ECAP_REG);
  429         if (bootverbose)
  430                 dmar_print_caps(dev, unit, dmaru);
  431         dmar_quirks_post_ident(unit);
  432 
  433         timeout = dmar_get_timeout();
  434         TUNABLE_UINT64_FETCH("hw.dmar.timeout", &timeout);
  435         dmar_update_timeout(timeout);
  436 
  437         for (i = 0; i < DMAR_INTR_TOTAL; i++)
  438                 unit->intrs[i].irq = -1;
  439 
  440         unit->intrs[DMAR_INTR_FAULT].name = "fault";
  441         unit->intrs[DMAR_INTR_FAULT].irq_rid = DMAR_FAULT_IRQ_RID;
  442         unit->intrs[DMAR_INTR_FAULT].handler = dmar_fault_intr;
  443         unit->intrs[DMAR_INTR_FAULT].msi_data_reg = DMAR_FEDATA_REG;
  444         unit->intrs[DMAR_INTR_FAULT].msi_addr_reg = DMAR_FEADDR_REG;
  445         unit->intrs[DMAR_INTR_FAULT].msi_uaddr_reg = DMAR_FEUADDR_REG;
  446         unit->intrs[DMAR_INTR_FAULT].enable_intr = dmar_enable_fault_intr;
  447         unit->intrs[DMAR_INTR_FAULT].disable_intr = dmar_disable_fault_intr;
  448         error = dmar_alloc_irq(dev, unit, DMAR_INTR_FAULT);
  449         if (error != 0) {
  450                 dmar_release_resources(dev, unit);
  451                 return (error);
  452         }
  453         if (DMAR_HAS_QI(unit)) {
  454                 unit->intrs[DMAR_INTR_QI].name = "qi";
  455                 unit->intrs[DMAR_INTR_QI].irq_rid = DMAR_QI_IRQ_RID;
  456                 unit->intrs[DMAR_INTR_QI].handler = dmar_qi_intr;
  457                 unit->intrs[DMAR_INTR_QI].msi_data_reg = DMAR_IEDATA_REG;
  458                 unit->intrs[DMAR_INTR_QI].msi_addr_reg = DMAR_IEADDR_REG;
  459                 unit->intrs[DMAR_INTR_QI].msi_uaddr_reg = DMAR_IEUADDR_REG;
  460                 unit->intrs[DMAR_INTR_QI].enable_intr = dmar_enable_qi_intr;
  461                 unit->intrs[DMAR_INTR_QI].disable_intr = dmar_disable_qi_intr;
  462                 error = dmar_alloc_irq(dev, unit, DMAR_INTR_QI);
  463                 if (error != 0) {
  464                         dmar_release_resources(dev, unit);
  465                         return (error);
  466                 }
  467         }
  468 
  469         mtx_init(&unit->iommu.lock, "dmarhw", NULL, MTX_DEF);
  470         unit->domids = new_unrhdr(0, dmar_nd2mask(DMAR_CAP_ND(unit->hw_cap)),
  471             &unit->iommu.lock);
  472         LIST_INIT(&unit->domains);
  473 
  474         /*
  475          * 9.2 "Context Entry":
  476          * When Caching Mode (CM) field is reported as Set, the
  477          * domain-id value of zero is architecturally reserved.
  478          * Software must not use domain-id value of zero
  479          * when CM is Set.
  480          */
  481         if ((unit->hw_cap & DMAR_CAP_CM) != 0)
  482                 alloc_unr_specific(unit->domids, 0);
  483 
  484         unit->ctx_obj = vm_pager_allocate(OBJT_PHYS, NULL, IDX_TO_OFF(1 +
  485             DMAR_CTX_CNT), 0, 0, NULL);
  486 
  487         /*
  488          * Allocate and load the root entry table pointer.  Enable the
  489          * address translation after the required invalidations are
  490          * done.
  491          */
  492         dmar_pgalloc(unit->ctx_obj, 0, IOMMU_PGF_WAITOK | IOMMU_PGF_ZERO);
  493         DMAR_LOCK(unit);
  494         error = dmar_load_root_entry_ptr(unit);
  495         if (error != 0) {
  496                 DMAR_UNLOCK(unit);
  497                 dmar_release_resources(dev, unit);
  498                 return (error);
  499         }
  500         error = dmar_inv_ctx_glob(unit);
  501         if (error != 0) {
  502                 DMAR_UNLOCK(unit);
  503                 dmar_release_resources(dev, unit);
  504                 return (error);
  505         }
  506         if ((unit->hw_ecap & DMAR_ECAP_DI) != 0) {
  507                 error = dmar_inv_iotlb_glob(unit);
  508                 if (error != 0) {
  509                         DMAR_UNLOCK(unit);
  510                         dmar_release_resources(dev, unit);
  511                         return (error);
  512                 }
  513         }
  514 
  515         DMAR_UNLOCK(unit);
  516         error = dmar_init_fault_log(unit);
  517         if (error != 0) {
  518                 dmar_release_resources(dev, unit);
  519                 return (error);
  520         }
  521         error = dmar_init_qi(unit);
  522         if (error != 0) {
  523                 dmar_release_resources(dev, unit);
  524                 return (error);
  525         }
  526         error = dmar_init_irt(unit);
  527         if (error != 0) {
  528                 dmar_release_resources(dev, unit);
  529                 return (error);
  530         }
  531         error = iommu_init_busdma(&unit->iommu);
  532         if (error != 0) {
  533                 dmar_release_resources(dev, unit);
  534                 return (error);
  535         }
  536 
  537 #ifdef NOTYET
  538         DMAR_LOCK(unit);
  539         error = dmar_enable_translation(unit);
  540         if (error != 0) {
  541                 DMAR_UNLOCK(unit);
  542                 dmar_release_resources(dev, unit);
  543                 return (error);
  544         }
  545         DMAR_UNLOCK(unit);
  546 #endif
  547 
  548         return (0);
  549 }
  550 
  551 static int
  552 dmar_detach(device_t dev)
  553 {
  554 
  555         return (EBUSY);
  556 }
  557 
  558 static int
  559 dmar_suspend(device_t dev)
  560 {
  561 
  562         return (0);
  563 }
  564 
  565 static int
  566 dmar_resume(device_t dev)
  567 {
  568 
  569         /* XXXKIB */
  570         return (0);
  571 }
  572 
  573 static device_method_t dmar_methods[] = {
  574         DEVMETHOD(device_identify, dmar_identify),
  575         DEVMETHOD(device_probe, dmar_probe),
  576         DEVMETHOD(device_attach, dmar_attach),
  577         DEVMETHOD(device_detach, dmar_detach),
  578         DEVMETHOD(device_suspend, dmar_suspend),
  579         DEVMETHOD(device_resume, dmar_resume),
  580 #ifdef DEV_APIC
  581         DEVMETHOD(bus_remap_intr, dmar_remap_intr),
  582 #endif
  583         DEVMETHOD_END
  584 };
  585 
  586 static driver_t dmar_driver = {
  587         "dmar",
  588         dmar_methods,
  589         sizeof(struct dmar_unit),
  590 };
  591 
  592 DRIVER_MODULE(dmar, acpi, dmar_driver, dmar_devclass, 0, 0);
  593 MODULE_DEPEND(dmar, acpi, 1, 1, 1);
  594 
  595 static void
  596 dmar_print_path(int busno, int depth, const ACPI_DMAR_PCI_PATH *path)
  597 {
  598         int i;
  599 
  600         printf("[%d, ", busno);
  601         for (i = 0; i < depth; i++) {
  602                 if (i != 0)
  603                         printf(", ");
  604                 printf("(%d, %d)", path[i].Device, path[i].Function);
  605         }
  606         printf("]");
  607 }
  608 
  609 int
  610 dmar_dev_depth(device_t child)
  611 {
  612         devclass_t pci_class;
  613         device_t bus, pcib;
  614         int depth;
  615 
  616         pci_class = devclass_find("pci");
  617         for (depth = 1; ; depth++) {
  618                 bus = device_get_parent(child);
  619                 pcib = device_get_parent(bus);
  620                 if (device_get_devclass(device_get_parent(pcib)) !=
  621                     pci_class)
  622                         return (depth);
  623                 child = pcib;
  624         }
  625 }
  626 
  627 void
  628 dmar_dev_path(device_t child, int *busno, void *path1, int depth)
  629 {
  630         devclass_t pci_class;
  631         device_t bus, pcib;
  632         ACPI_DMAR_PCI_PATH *path;
  633 
  634         pci_class = devclass_find("pci");
  635         path = path1;
  636         for (depth--; depth != -1; depth--) {
  637                 path[depth].Device = pci_get_slot(child);
  638                 path[depth].Function = pci_get_function(child);
  639                 bus = device_get_parent(child);
  640                 pcib = device_get_parent(bus);
  641                 if (device_get_devclass(device_get_parent(pcib)) !=
  642                     pci_class) {
  643                         /* reached a host bridge */
  644                         *busno = pcib_get_bus(bus);
  645                         return;
  646                 }
  647                 child = pcib;
  648         }
  649         panic("wrong depth");
  650 }
  651 
  652 static int
  653 dmar_match_pathes(int busno1, const ACPI_DMAR_PCI_PATH *path1, int depth1,
  654     int busno2, const ACPI_DMAR_PCI_PATH *path2, int depth2,
  655     enum AcpiDmarScopeType scope_type)
  656 {
  657         int i, depth;
  658 
  659         if (busno1 != busno2)
  660                 return (0);
  661         if (scope_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT && depth1 != depth2)
  662                 return (0);
  663         depth = depth1;
  664         if (depth2 < depth)
  665                 depth = depth2;
  666         for (i = 0; i < depth; i++) {
  667                 if (path1[i].Device != path2[i].Device ||
  668                     path1[i].Function != path2[i].Function)
  669                         return (0);
  670         }
  671         return (1);
  672 }
  673 
  674 static int
  675 dmar_match_devscope(ACPI_DMAR_DEVICE_SCOPE *devscope, int dev_busno,
  676     const ACPI_DMAR_PCI_PATH *dev_path, int dev_path_len)
  677 {
  678         ACPI_DMAR_PCI_PATH *path;
  679         int path_len;
  680 
  681         if (devscope->Length < sizeof(*devscope)) {
  682                 printf("dmar_match_devscope: corrupted DMAR table, dl %d\n",
  683                     devscope->Length);
  684                 return (-1);
  685         }
  686         if (devscope->EntryType != ACPI_DMAR_SCOPE_TYPE_ENDPOINT &&
  687             devscope->EntryType != ACPI_DMAR_SCOPE_TYPE_BRIDGE)
  688                 return (0);
  689         path_len = devscope->Length - sizeof(*devscope);
  690         if (path_len % 2 != 0) {
  691                 printf("dmar_match_devscope: corrupted DMAR table, dl %d\n",
  692                     devscope->Length);
  693                 return (-1);
  694         }
  695         path_len /= 2;
  696         path = (ACPI_DMAR_PCI_PATH *)(devscope + 1);
  697         if (path_len == 0) {
  698                 printf("dmar_match_devscope: corrupted DMAR table, dl %d\n",
  699                     devscope->Length);
  700                 return (-1);
  701         }
  702 
  703         return (dmar_match_pathes(devscope->Bus, path, path_len, dev_busno,
  704             dev_path, dev_path_len, devscope->EntryType));
  705 }
  706 
  707 static bool
  708 dmar_match_by_path(struct dmar_unit *unit, int dev_domain, int dev_busno,
  709     const ACPI_DMAR_PCI_PATH *dev_path, int dev_path_len, const char **banner)
  710 {
  711         ACPI_DMAR_HARDWARE_UNIT *dmarh;
  712         ACPI_DMAR_DEVICE_SCOPE *devscope;
  713         char *ptr, *ptrend;
  714         int match;
  715 
  716         dmarh = dmar_find_by_index(unit->iommu.unit);
  717         if (dmarh == NULL)
  718                 return (false);
  719         if (dmarh->Segment != dev_domain)
  720                 return (false);
  721         if ((dmarh->Flags & ACPI_DMAR_INCLUDE_ALL) != 0) {
  722                 if (banner != NULL)
  723                         *banner = "INCLUDE_ALL";
  724                 return (true);
  725         }
  726         ptr = (char *)dmarh + sizeof(*dmarh);
  727         ptrend = (char *)dmarh + dmarh->Header.Length;
  728         while (ptr < ptrend) {
  729                 devscope = (ACPI_DMAR_DEVICE_SCOPE *)ptr;
  730                 ptr += devscope->Length;
  731                 match = dmar_match_devscope(devscope, dev_busno, dev_path,
  732                     dev_path_len);
  733                 if (match == -1)
  734                         return (false);
  735                 if (match == 1) {
  736                         if (banner != NULL)
  737                                 *banner = "specific match";
  738                         return (true);
  739                 }
  740         }
  741         return (false);
  742 }
  743 
  744 static struct dmar_unit *
  745 dmar_find_by_scope(int dev_domain, int dev_busno,
  746     const ACPI_DMAR_PCI_PATH *dev_path, int dev_path_len)
  747 {
  748         struct dmar_unit *unit;
  749         int i;
  750 
  751         for (i = 0; i < dmar_devcnt; i++) {
  752                 if (dmar_devs[i] == NULL)
  753                         continue;
  754                 unit = device_get_softc(dmar_devs[i]);
  755                 if (dmar_match_by_path(unit, dev_domain, dev_busno, dev_path,
  756                     dev_path_len, NULL))
  757                         return (unit);
  758         }
  759         return (NULL);
  760 }
  761 
  762 struct dmar_unit *
  763 dmar_find(device_t dev, bool verbose)
  764 {
  765         struct dmar_unit *unit;
  766         const char *banner;
  767         int i, dev_domain, dev_busno, dev_path_len;
  768 
  769         /*
  770          * This function can only handle PCI(e) devices.
  771          */
  772         if (device_get_devclass(device_get_parent(dev)) !=
  773             devclass_find("pci"))
  774                 return (NULL);
  775 
  776         dev_domain = pci_get_domain(dev);
  777         dev_path_len = dmar_dev_depth(dev);
  778         ACPI_DMAR_PCI_PATH dev_path[dev_path_len];
  779         dmar_dev_path(dev, &dev_busno, dev_path, dev_path_len);
  780         banner = "";
  781 
  782         for (i = 0; i < dmar_devcnt; i++) {
  783                 if (dmar_devs[i] == NULL)
  784                         continue;
  785                 unit = device_get_softc(dmar_devs[i]);
  786                 if (dmar_match_by_path(unit, dev_domain, dev_busno,
  787                     dev_path, dev_path_len, &banner))
  788                         break;
  789         }
  790         if (i == dmar_devcnt)
  791                 return (NULL);
  792 
  793         if (verbose) {
  794                 device_printf(dev, "pci%d:%d:%d:%d matched dmar%d by %s",
  795                     dev_domain, pci_get_bus(dev), pci_get_slot(dev),
  796                     pci_get_function(dev), unit->iommu.unit, banner);
  797                 printf(" scope path ");
  798                 dmar_print_path(dev_busno, dev_path_len, dev_path);
  799                 printf("\n");
  800         }
  801         return (unit);
  802 }
  803 
  804 static struct dmar_unit *
  805 dmar_find_nonpci(u_int id, u_int entry_type, uint16_t *rid)
  806 {
  807         device_t dmar_dev;
  808         struct dmar_unit *unit;
  809         ACPI_DMAR_HARDWARE_UNIT *dmarh;
  810         ACPI_DMAR_DEVICE_SCOPE *devscope;
  811         ACPI_DMAR_PCI_PATH *path;
  812         char *ptr, *ptrend;
  813 #ifdef DEV_APIC
  814         int error;
  815 #endif
  816         int i;
  817 
  818         for (i = 0; i < dmar_devcnt; i++) {
  819                 dmar_dev = dmar_devs[i];
  820                 if (dmar_dev == NULL)
  821                         continue;
  822                 unit = (struct dmar_unit *)device_get_softc(dmar_dev);
  823                 dmarh = dmar_find_by_index(i);
  824                 if (dmarh == NULL)
  825                         continue;
  826                 ptr = (char *)dmarh + sizeof(*dmarh);
  827                 ptrend = (char *)dmarh + dmarh->Header.Length;
  828                 for (;;) {
  829                         if (ptr >= ptrend)
  830                                 break;
  831                         devscope = (ACPI_DMAR_DEVICE_SCOPE *)ptr;
  832                         ptr += devscope->Length;
  833                         if (devscope->EntryType != entry_type)
  834                                 continue;
  835                         if (devscope->EnumerationId != id)
  836                                 continue;
  837 #ifdef DEV_APIC
  838                         if (entry_type == ACPI_DMAR_SCOPE_TYPE_IOAPIC) {
  839                                 error = ioapic_get_rid(id, rid);
  840                                 /*
  841                                  * If our IOAPIC has PCI bindings then
  842                                  * use the PCI device rid.
  843                                  */
  844                                 if (error == 0)
  845                                         return (unit);
  846                         }
  847 #endif
  848                         if (devscope->Length - sizeof(ACPI_DMAR_DEVICE_SCOPE)
  849                             == 2) {
  850                                 if (rid != NULL) {
  851                                         path = (ACPI_DMAR_PCI_PATH *)
  852                                             (devscope + 1);
  853                                         *rid = PCI_RID(devscope->Bus,
  854                                             path->Device, path->Function);
  855                                 }
  856                                 return (unit);
  857                         }
  858                         printf(
  859                            "dmar_find_nonpci: id %d type %d path length != 2\n",
  860                             id, entry_type);
  861                         break;
  862                 }
  863         }
  864         return (NULL);
  865 }
  866 
  867 struct dmar_unit *
  868 dmar_find_hpet(device_t dev, uint16_t *rid)
  869 {
  870 
  871         return (dmar_find_nonpci(hpet_get_uid(dev), ACPI_DMAR_SCOPE_TYPE_HPET,
  872             rid));
  873 }
  874 
  875 struct dmar_unit *
  876 dmar_find_ioapic(u_int apic_id, uint16_t *rid)
  877 {
  878 
  879         return (dmar_find_nonpci(apic_id, ACPI_DMAR_SCOPE_TYPE_IOAPIC, rid));
  880 }
  881 
  882 struct rmrr_iter_args {
  883         struct dmar_domain *domain;
  884         int dev_domain;
  885         int dev_busno;
  886         const ACPI_DMAR_PCI_PATH *dev_path;
  887         int dev_path_len;
  888         struct iommu_map_entries_tailq *rmrr_entries;
  889 };
  890 
  891 static int
  892 dmar_rmrr_iter(ACPI_DMAR_HEADER *dmarh, void *arg)
  893 {
  894         struct rmrr_iter_args *ria;
  895         ACPI_DMAR_RESERVED_MEMORY *resmem;
  896         ACPI_DMAR_DEVICE_SCOPE *devscope;
  897         struct iommu_map_entry *entry;
  898         char *ptr, *ptrend;
  899         int match;
  900 
  901         if (dmarh->Type != ACPI_DMAR_TYPE_RESERVED_MEMORY)
  902                 return (1);
  903 
  904         ria = arg;
  905         resmem = (ACPI_DMAR_RESERVED_MEMORY *)dmarh;
  906         if (resmem->Segment != ria->dev_domain)
  907                 return (1);
  908 
  909         ptr = (char *)resmem + sizeof(*resmem);
  910         ptrend = (char *)resmem + resmem->Header.Length;
  911         for (;;) {
  912                 if (ptr >= ptrend)
  913                         break;
  914                 devscope = (ACPI_DMAR_DEVICE_SCOPE *)ptr;
  915                 ptr += devscope->Length;
  916                 match = dmar_match_devscope(devscope, ria->dev_busno,
  917                     ria->dev_path, ria->dev_path_len);
  918                 if (match == 1) {
  919                         entry = iommu_gas_alloc_entry(DOM2IODOM(ria->domain),
  920                             IOMMU_PGF_WAITOK);
  921                         entry->start = resmem->BaseAddress;
  922                         /* The RMRR entry end address is inclusive. */
  923                         entry->end = resmem->EndAddress;
  924                         TAILQ_INSERT_TAIL(ria->rmrr_entries, entry,
  925                             dmamap_link);
  926                 }
  927         }
  928 
  929         return (1);
  930 }
  931 
  932 void
  933 dmar_dev_parse_rmrr(struct dmar_domain *domain, int dev_domain, int dev_busno,
  934     const void *dev_path, int dev_path_len,
  935     struct iommu_map_entries_tailq *rmrr_entries)
  936 {
  937         struct rmrr_iter_args ria;
  938 
  939         ria.domain = domain;
  940         ria.dev_domain = dev_domain;
  941         ria.dev_busno = dev_busno;
  942         ria.dev_path = (const ACPI_DMAR_PCI_PATH *)dev_path;
  943         ria.dev_path_len = dev_path_len;
  944         ria.rmrr_entries = rmrr_entries;
  945         dmar_iterate_tbl(dmar_rmrr_iter, &ria);
  946 }
  947 
  948 struct inst_rmrr_iter_args {
  949         struct dmar_unit *dmar;
  950 };
  951 
  952 static device_t
  953 dmar_path_dev(int segment, int path_len, int busno,
  954     const ACPI_DMAR_PCI_PATH *path, uint16_t *rid)
  955 {
  956         device_t dev;
  957         int i;
  958 
  959         dev = NULL;
  960         for (i = 0; i < path_len; i++) {
  961                 dev = pci_find_dbsf(segment, busno, path->Device,
  962                     path->Function);
  963                 if (i != path_len - 1) {
  964                         busno = pci_cfgregread(busno, path->Device,
  965                             path->Function, PCIR_SECBUS_1, 1);
  966                         path++;
  967                 }
  968         }
  969         *rid = PCI_RID(busno, path->Device, path->Function);
  970         return (dev);
  971 }
  972 
  973 static int
  974 dmar_inst_rmrr_iter(ACPI_DMAR_HEADER *dmarh, void *arg)
  975 {
  976         const ACPI_DMAR_RESERVED_MEMORY *resmem;
  977         const ACPI_DMAR_DEVICE_SCOPE *devscope;
  978         struct inst_rmrr_iter_args *iria;
  979         const char *ptr, *ptrend;
  980         device_t dev;
  981         struct dmar_unit *unit;
  982         int dev_path_len;
  983         uint16_t rid;
  984 
  985         iria = arg;
  986 
  987         if (dmarh->Type != ACPI_DMAR_TYPE_RESERVED_MEMORY)
  988                 return (1);
  989 
  990         resmem = (ACPI_DMAR_RESERVED_MEMORY *)dmarh;
  991         if (resmem->Segment != iria->dmar->segment)
  992                 return (1);
  993 
  994         ptr = (const char *)resmem + sizeof(*resmem);
  995         ptrend = (const char *)resmem + resmem->Header.Length;
  996         for (;;) {
  997                 if (ptr >= ptrend)
  998                         break;
  999                 devscope = (const ACPI_DMAR_DEVICE_SCOPE *)ptr;
 1000                 ptr += devscope->Length;
 1001                 /* XXXKIB bridge */
 1002                 if (devscope->EntryType != ACPI_DMAR_SCOPE_TYPE_ENDPOINT)
 1003                         continue;
 1004                 rid = 0;
 1005                 dev_path_len = (devscope->Length -
 1006                     sizeof(ACPI_DMAR_DEVICE_SCOPE)) / 2;
 1007                 dev = dmar_path_dev(resmem->Segment, dev_path_len,
 1008                     devscope->Bus,
 1009                     (const ACPI_DMAR_PCI_PATH *)(devscope + 1), &rid);
 1010                 if (dev == NULL) {
 1011                         if (bootverbose) {
 1012                                 printf("dmar%d no dev found for RMRR "
 1013                                     "[%#jx, %#jx] rid %#x scope path ",
 1014                                      iria->dmar->iommu.unit,
 1015                                      (uintmax_t)resmem->BaseAddress,
 1016                                      (uintmax_t)resmem->EndAddress,
 1017                                      rid);
 1018                                 dmar_print_path(devscope->Bus, dev_path_len,
 1019                                     (const ACPI_DMAR_PCI_PATH *)(devscope + 1));
 1020                                 printf("\n");
 1021                         }
 1022                         unit = dmar_find_by_scope(resmem->Segment,
 1023                             devscope->Bus,
 1024                             (const ACPI_DMAR_PCI_PATH *)(devscope + 1),
 1025                             dev_path_len);
 1026                         if (iria->dmar != unit)
 1027                                 continue;
 1028                         dmar_get_ctx_for_devpath(iria->dmar, rid,
 1029                             resmem->Segment, devscope->Bus, 
 1030                             (const ACPI_DMAR_PCI_PATH *)(devscope + 1),
 1031                             dev_path_len, false, true);
 1032                 } else {
 1033                         unit = dmar_find(dev, false);
 1034                         if (iria->dmar != unit)
 1035                                 continue;
 1036                         iommu_instantiate_ctx(&(iria)->dmar->iommu,
 1037                             dev, true);
 1038                 }
 1039         }
 1040 
 1041         return (1);
 1042 
 1043 }
 1044 
 1045 /*
 1046  * Pre-create all contexts for the DMAR which have RMRR entries.
 1047  */
 1048 int
 1049 dmar_instantiate_rmrr_ctxs(struct iommu_unit *unit)
 1050 {
 1051         struct dmar_unit *dmar;
 1052         struct inst_rmrr_iter_args iria;
 1053         int error;
 1054 
 1055         dmar = IOMMU2DMAR(unit);
 1056 
 1057         if (!dmar_barrier_enter(dmar, DMAR_BARRIER_RMRR))
 1058                 return (0);
 1059 
 1060         error = 0;
 1061         iria.dmar = dmar;
 1062         dmar_iterate_tbl(dmar_inst_rmrr_iter, &iria);
 1063         DMAR_LOCK(dmar);
 1064         if (!LIST_EMPTY(&dmar->domains)) {
 1065                 KASSERT((dmar->hw_gcmd & DMAR_GCMD_TE) == 0,
 1066             ("dmar%d: RMRR not handled but translation is already enabled",
 1067                     dmar->iommu.unit));
 1068                 error = dmar_enable_translation(dmar);
 1069                 if (bootverbose) {
 1070                         if (error == 0) {
 1071                                 printf("dmar%d: enabled translation\n",
 1072                                     dmar->iommu.unit);
 1073                         } else {
 1074                                 printf("dmar%d: enabling translation failed, "
 1075                                     "error %d\n", dmar->iommu.unit, error);
 1076                         }
 1077                 }
 1078         }
 1079         dmar_barrier_exit(dmar, DMAR_BARRIER_RMRR);
 1080         return (error);
 1081 }
 1082 
 1083 #ifdef DDB
 1084 #include <ddb/ddb.h>
 1085 #include <ddb/db_lex.h>
 1086 
 1087 static void
 1088 dmar_print_domain_entry(const struct iommu_map_entry *entry)
 1089 {
 1090         struct iommu_map_entry *l, *r;
 1091 
 1092         db_printf(
 1093             "    start %jx end %jx first %jx last %jx free_down %jx flags %x ",
 1094             entry->start, entry->end, entry->first, entry->last,
 1095             entry->free_down, entry->flags);
 1096         db_printf("left ");
 1097         l = RB_LEFT(entry, rb_entry);
 1098         if (l == NULL)
 1099                 db_printf("NULL ");
 1100         else
 1101                 db_printf("%jx ", l->start);
 1102         db_printf("right ");
 1103         r = RB_RIGHT(entry, rb_entry);
 1104         if (r == NULL)
 1105                 db_printf("NULL");
 1106         else
 1107                 db_printf("%jx", r->start);
 1108         db_printf("\n");
 1109 }
 1110 
 1111 static void
 1112 dmar_print_ctx(struct dmar_ctx *ctx)
 1113 {
 1114 
 1115         db_printf(
 1116             "    @%p pci%d:%d:%d refs %d flags %x loads %lu unloads %lu\n",
 1117             ctx, pci_get_bus(ctx->context.tag->owner),
 1118             pci_get_slot(ctx->context.tag->owner),
 1119             pci_get_function(ctx->context.tag->owner), ctx->refs,
 1120             ctx->context.flags, ctx->context.loads, ctx->context.unloads);
 1121 }
 1122 
 1123 static void
 1124 dmar_print_domain(struct dmar_domain *domain, bool show_mappings)
 1125 {
 1126         struct iommu_domain *iodom;
 1127         struct iommu_map_entry *entry;
 1128         struct dmar_ctx *ctx;
 1129 
 1130         iodom = DOM2IODOM(domain);
 1131 
 1132         db_printf(
 1133             "  @%p dom %d mgaw %d agaw %d pglvl %d end %jx refs %d\n"
 1134             "   ctx_cnt %d flags %x pgobj %p map_ents %u\n",
 1135             domain, domain->domain, domain->mgaw, domain->agaw, domain->pglvl,
 1136             (uintmax_t)domain->iodom.end, domain->refs, domain->ctx_cnt,
 1137             domain->iodom.flags, domain->pgtbl_obj, domain->iodom.entries_cnt);
 1138         if (!LIST_EMPTY(&domain->contexts)) {
 1139                 db_printf("  Contexts:\n");
 1140                 LIST_FOREACH(ctx, &domain->contexts, link)
 1141                         dmar_print_ctx(ctx);
 1142         }
 1143         if (!show_mappings)
 1144                 return;
 1145         db_printf("    mapped:\n");
 1146         RB_FOREACH(entry, iommu_gas_entries_tree, &iodom->rb_root) {
 1147                 dmar_print_domain_entry(entry);
 1148                 if (db_pager_quit)
 1149                         break;
 1150         }
 1151         if (db_pager_quit)
 1152                 return;
 1153         db_printf("    unloading:\n");
 1154         TAILQ_FOREACH(entry, &domain->iodom.unload_entries, dmamap_link) {
 1155                 dmar_print_domain_entry(entry);
 1156                 if (db_pager_quit)
 1157                         break;
 1158         }
 1159 }
 1160 
 1161 DB_SHOW_COMMAND_FLAGS(dmar_domain, db_dmar_print_domain, CS_OWN)
 1162 {
 1163         struct dmar_unit *unit;
 1164         struct dmar_domain *domain;
 1165         struct dmar_ctx *ctx;
 1166         bool show_mappings, valid;
 1167         int pci_domain, bus, device, function, i, t;
 1168         db_expr_t radix;
 1169 
 1170         valid = false;
 1171         radix = db_radix;
 1172         db_radix = 10;
 1173         t = db_read_token();
 1174         if (t == tSLASH) {
 1175                 t = db_read_token();
 1176                 if (t != tIDENT) {
 1177                         db_printf("Bad modifier\n");
 1178                         db_radix = radix;
 1179                         db_skip_to_eol();
 1180                         return;
 1181                 }
 1182                 show_mappings = strchr(db_tok_string, 'm') != NULL;
 1183                 t = db_read_token();
 1184         } else {
 1185                 show_mappings = false;
 1186         }
 1187         if (t == tNUMBER) {
 1188                 pci_domain = db_tok_number;
 1189                 t = db_read_token();
 1190                 if (t == tNUMBER) {
 1191                         bus = db_tok_number;
 1192                         t = db_read_token();
 1193                         if (t == tNUMBER) {
 1194                                 device = db_tok_number;
 1195                                 t = db_read_token();
 1196                                 if (t == tNUMBER) {
 1197                                         function = db_tok_number;
 1198                                         valid = true;
 1199                                 }
 1200                         }
 1201                 }
 1202         }
 1203                         db_radix = radix;
 1204         db_skip_to_eol();
 1205         if (!valid) {
 1206                 db_printf("usage: show dmar_domain [/m] "
 1207                     "<domain> <bus> <device> <func>\n");
 1208                 return;
 1209         }
 1210         for (i = 0; i < dmar_devcnt; i++) {
 1211                 unit = device_get_softc(dmar_devs[i]);
 1212                 LIST_FOREACH(domain, &unit->domains, link) {
 1213                         LIST_FOREACH(ctx, &domain->contexts, link) {
 1214                                 if (pci_domain == unit->segment && 
 1215                                     bus == pci_get_bus(ctx->context.tag->owner) &&
 1216                                     device ==
 1217                                     pci_get_slot(ctx->context.tag->owner) &&
 1218                                     function ==
 1219                                     pci_get_function(ctx->context.tag->owner)) {
 1220                                         dmar_print_domain(domain,
 1221                                             show_mappings);
 1222                                         goto out;
 1223                                 }
 1224                         }
 1225                 }
 1226         }
 1227 out:;
 1228 }
 1229 
 1230 static void
 1231 dmar_print_one(int idx, bool show_domains, bool show_mappings)
 1232 {
 1233         struct dmar_unit *unit;
 1234         struct dmar_domain *domain;
 1235         int i, frir;
 1236 
 1237         unit = device_get_softc(dmar_devs[idx]);
 1238         db_printf("dmar%d at %p, root at 0x%jx, ver 0x%x\n", unit->iommu.unit,
 1239             unit, dmar_read8(unit, DMAR_RTADDR_REG),
 1240             dmar_read4(unit, DMAR_VER_REG));
 1241         db_printf("cap 0x%jx ecap 0x%jx gsts 0x%x fsts 0x%x fectl 0x%x\n",
 1242             (uintmax_t)dmar_read8(unit, DMAR_CAP_REG),
 1243             (uintmax_t)dmar_read8(unit, DMAR_ECAP_REG),
 1244             dmar_read4(unit, DMAR_GSTS_REG),
 1245             dmar_read4(unit, DMAR_FSTS_REG),
 1246             dmar_read4(unit, DMAR_FECTL_REG));
 1247         if (unit->ir_enabled) {
 1248                 db_printf("ir is enabled; IRT @%p phys 0x%jx maxcnt %d\n",
 1249                     unit->irt, (uintmax_t)unit->irt_phys, unit->irte_cnt);
 1250         }
 1251         db_printf("fed 0x%x fea 0x%x feua 0x%x\n",
 1252             dmar_read4(unit, DMAR_FEDATA_REG),
 1253             dmar_read4(unit, DMAR_FEADDR_REG),
 1254             dmar_read4(unit, DMAR_FEUADDR_REG));
 1255         db_printf("primary fault log:\n");
 1256         for (i = 0; i < DMAR_CAP_NFR(unit->hw_cap); i++) {
 1257                 frir = (DMAR_CAP_FRO(unit->hw_cap) + i) * 16;
 1258                 db_printf("  %d at 0x%x: %jx %jx\n", i, frir,
 1259                     (uintmax_t)dmar_read8(unit, frir),
 1260                     (uintmax_t)dmar_read8(unit, frir + 8));
 1261         }
 1262         if (DMAR_HAS_QI(unit)) {
 1263                 db_printf("ied 0x%x iea 0x%x ieua 0x%x\n",
 1264                     dmar_read4(unit, DMAR_IEDATA_REG),
 1265                     dmar_read4(unit, DMAR_IEADDR_REG),
 1266                     dmar_read4(unit, DMAR_IEUADDR_REG));
 1267                 if (unit->qi_enabled) {
 1268                         db_printf("qi is enabled: queue @0x%jx (IQA 0x%jx) "
 1269                             "size 0x%jx\n"
 1270                     "  head 0x%x tail 0x%x avail 0x%x status 0x%x ctrl 0x%x\n"
 1271                     "  hw compl 0x%x@%p/phys@%jx next seq 0x%x gen 0x%x\n",
 1272                             (uintmax_t)unit->inv_queue,
 1273                             (uintmax_t)dmar_read8(unit, DMAR_IQA_REG),
 1274                             (uintmax_t)unit->inv_queue_size,
 1275                             dmar_read4(unit, DMAR_IQH_REG),
 1276                             dmar_read4(unit, DMAR_IQT_REG),
 1277                             unit->inv_queue_avail,
 1278                             dmar_read4(unit, DMAR_ICS_REG),
 1279                             dmar_read4(unit, DMAR_IECTL_REG),
 1280                             unit->inv_waitd_seq_hw,
 1281                             &unit->inv_waitd_seq_hw,
 1282                             (uintmax_t)unit->inv_waitd_seq_hw_phys,
 1283                             unit->inv_waitd_seq,
 1284                             unit->inv_waitd_gen);
 1285                 } else {
 1286                         db_printf("qi is disabled\n");
 1287                 }
 1288         }
 1289         if (show_domains) {
 1290                 db_printf("domains:\n");
 1291                 LIST_FOREACH(domain, &unit->domains, link) {
 1292                         dmar_print_domain(domain, show_mappings);
 1293                         if (db_pager_quit)
 1294                                 break;
 1295                 }
 1296         }
 1297 }
 1298 
 1299 DB_SHOW_COMMAND(dmar, db_dmar_print)
 1300 {
 1301         bool show_domains, show_mappings;
 1302 
 1303         show_domains = strchr(modif, 'd') != NULL;
 1304         show_mappings = strchr(modif, 'm') != NULL;
 1305         if (!have_addr) {
 1306                 db_printf("usage: show dmar [/d] [/m] index\n");
 1307                 return;
 1308         }
 1309         dmar_print_one((int)addr, show_domains, show_mappings);
 1310 }
 1311 
 1312 DB_SHOW_ALL_COMMAND(dmars, db_show_all_dmars)
 1313 {
 1314         int i;
 1315         bool show_domains, show_mappings;
 1316 
 1317         show_domains = strchr(modif, 'd') != NULL;
 1318         show_mappings = strchr(modif, 'm') != NULL;
 1319 
 1320         for (i = 0; i < dmar_devcnt; i++) {
 1321                 dmar_print_one(i, show_domains, show_mappings);
 1322                 if (db_pager_quit)
 1323                         break;
 1324         }
 1325 }
 1326 #endif
 1327 
 1328 struct iommu_unit *
 1329 iommu_find(device_t dev, bool verbose)
 1330 {
 1331         struct dmar_unit *dmar;
 1332 
 1333         dmar = dmar_find(dev, verbose);
 1334 
 1335         return (&dmar->iommu);
 1336 }

Cache object: e4277700b565ea15ecce336bc52b8a2a


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.