The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/x86/iommu/intel_idpgtbl.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 2013 The FreeBSD Foundation
    3  * All rights reserved.
    4  *
    5  * This software was developed by Konstantin Belousov <kib@FreeBSD.org>
    6  * under sponsorship from the FreeBSD Foundation.
    7  *
    8  * Redistribution and use in source and binary forms, with or without
    9  * modification, are permitted provided that the following conditions
   10  * are met:
   11  * 1. Redistributions of source code must retain the above copyright
   12  *    notice, this list of conditions and the following disclaimer.
   13  * 2. Redistributions in binary form must reproduce the above copyright
   14  *    notice, this list of conditions and the following disclaimer in the
   15  *    documentation and/or other materials provided with the distribution.
   16  *
   17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   20  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   27  * SUCH DAMAGE.
   28  */
   29 
   30 #include <sys/cdefs.h>
   31 __FBSDID("$FreeBSD$");
   32 
   33 #include <sys/param.h>
   34 #include <sys/systm.h>
   35 #include <sys/malloc.h>
   36 #include <sys/bus.h>
   37 #include <sys/interrupt.h>
   38 #include <sys/kernel.h>
   39 #include <sys/ktr.h>
   40 #include <sys/lock.h>
   41 #include <sys/memdesc.h>
   42 #include <sys/mutex.h>
   43 #include <sys/proc.h>
   44 #include <sys/rwlock.h>
   45 #include <sys/rman.h>
   46 #include <sys/sf_buf.h>
   47 #include <sys/sysctl.h>
   48 #include <sys/taskqueue.h>
   49 #include <sys/tree.h>
   50 #include <sys/uio.h>
   51 #include <vm/vm.h>
   52 #include <vm/vm_extern.h>
   53 #include <vm/vm_kern.h>
   54 #include <vm/vm_object.h>
   55 #include <vm/vm_page.h>
   56 #include <vm/vm_pager.h>
   57 #include <vm/vm_map.h>
   58 #include <machine/atomic.h>
   59 #include <machine/bus.h>
   60 #include <machine/cpu.h>
   61 #include <machine/md_var.h>
   62 #include <machine/specialreg.h>
   63 #include <x86/include/busdma_impl.h>
   64 #include <x86/iommu/intel_reg.h>
   65 #include <x86/iommu/busdma_dmar.h>
   66 #include <x86/iommu/intel_dmar.h>
   67 
   68 static int ctx_unmap_buf_locked(struct dmar_ctx *ctx, dmar_gaddr_t base,
   69     dmar_gaddr_t size, int flags);
   70 
   71 /*
   72  * The cache of the identity mapping page tables for the DMARs.  Using
   73  * the cache saves significant amount of memory for page tables by
   74  * reusing the page tables, since usually DMARs are identical and have
   75  * the same capabilities.  Still, cache records the information needed
   76  * to match DMAR capabilities and page table format, to correctly
   77  * handle different DMARs.
   78  */
   79 
   80 struct idpgtbl {
   81         dmar_gaddr_t maxaddr;   /* Page table covers the guest address
   82                                    range [0..maxaddr) */
   83         int pglvl;              /* Total page table levels ignoring
   84                                    superpages */
   85         int leaf;               /* The last materialized page table
   86                                    level, it is non-zero if superpages
   87                                    are supported */
   88         vm_object_t pgtbl_obj;  /* The page table pages */
   89         LIST_ENTRY(idpgtbl) link;
   90 };
   91 
   92 static struct sx idpgtbl_lock;
   93 SX_SYSINIT(idpgtbl, &idpgtbl_lock, "idpgtbl");
   94 static LIST_HEAD(, idpgtbl) idpgtbls = LIST_HEAD_INITIALIZER(idpgtbls);
   95 static MALLOC_DEFINE(M_DMAR_IDPGTBL, "dmar_idpgtbl",
   96     "Intel DMAR Identity mappings cache elements");
   97 
   98 /*
   99  * Build the next level of the page tables for the identity mapping.
  100  * - lvl is the level to build;
  101  * - idx is the index of the page table page in the pgtbl_obj, which is
  102  *   being allocated filled now;
  103  * - addr is the starting address in the bus address space which is
  104  *   mapped by the page table page.
  105  */
  106 static void
  107 ctx_idmap_nextlvl(struct idpgtbl *tbl, int lvl, vm_pindex_t idx,
  108     dmar_gaddr_t addr)
  109 {
  110         vm_page_t m1;
  111         dmar_pte_t *pte;
  112         struct sf_buf *sf;
  113         dmar_gaddr_t f, pg_sz;
  114         vm_pindex_t base;
  115         int i;
  116 
  117         VM_OBJECT_ASSERT_LOCKED(tbl->pgtbl_obj);
  118         if (addr >= tbl->maxaddr)
  119                 return;
  120         (void)dmar_pgalloc(tbl->pgtbl_obj, idx, DMAR_PGF_OBJL | DMAR_PGF_WAITOK |
  121             DMAR_PGF_ZERO);
  122         base = idx * DMAR_NPTEPG + 1; /* Index of the first child page of idx */
  123         pg_sz = pglvl_page_size(tbl->pglvl, lvl);
  124         if (lvl != tbl->leaf) {
  125                 for (i = 0, f = addr; i < DMAR_NPTEPG; i++, f += pg_sz)
  126                         ctx_idmap_nextlvl(tbl, lvl + 1, base + i, f);
  127         }
  128         VM_OBJECT_WUNLOCK(tbl->pgtbl_obj);
  129         pte = dmar_map_pgtbl(tbl->pgtbl_obj, idx, DMAR_PGF_WAITOK, &sf);
  130         if (lvl == tbl->leaf) {
  131                 for (i = 0, f = addr; i < DMAR_NPTEPG; i++, f += pg_sz) {
  132                         if (f >= tbl->maxaddr)
  133                                 break;
  134                         pte[i].pte = (DMAR_PTE_ADDR_MASK & f) |
  135                             DMAR_PTE_R | DMAR_PTE_W;
  136                 }
  137         } else {
  138                 for (i = 0, f = addr; i < DMAR_NPTEPG; i++, f += pg_sz) {
  139                         if (f >= tbl->maxaddr)
  140                                 break;
  141                         m1 = dmar_pgalloc(tbl->pgtbl_obj, base + i,
  142                             DMAR_PGF_NOALLOC);
  143                         KASSERT(m1 != NULL, ("lost page table page"));
  144                         pte[i].pte = (DMAR_PTE_ADDR_MASK &
  145                             VM_PAGE_TO_PHYS(m1)) | DMAR_PTE_R | DMAR_PTE_W;
  146                 }
  147         }
  148         /* ctx_get_idmap_pgtbl flushes CPU cache if needed. */
  149         dmar_unmap_pgtbl(sf);
  150         VM_OBJECT_WLOCK(tbl->pgtbl_obj);
  151 }
  152 
  153 /*
  154  * Find a ready and compatible identity-mapping page table in the
  155  * cache. If not found, populate the identity-mapping page table for
  156  * the context, up to the maxaddr. The maxaddr byte is allowed to be
  157  * not mapped, which is aligned with the definition of Maxmem as the
  158  * highest usable physical address + 1.  If superpages are used, the
  159  * maxaddr is typically mapped.
  160  */
  161 vm_object_t
  162 ctx_get_idmap_pgtbl(struct dmar_ctx *ctx, dmar_gaddr_t maxaddr)
  163 {
  164         struct dmar_unit *unit;
  165         struct idpgtbl *tbl;
  166         vm_object_t res;
  167         vm_page_t m;
  168         int leaf, i;
  169 
  170         leaf = 0; /* silence gcc */
  171 
  172         /*
  173          * First, determine where to stop the paging structures.
  174          */
  175         for (i = 0; i < ctx->pglvl; i++) {
  176                 if (i == ctx->pglvl - 1 || ctx_is_sp_lvl(ctx, i)) {
  177                         leaf = i;
  178                         break;
  179                 }
  180         }
  181 
  182         /*
  183          * Search the cache for a compatible page table.  Qualified
  184          * page table must map up to maxaddr, its level must be
  185          * supported by the DMAR and leaf should be equal to the
  186          * calculated value.  The later restriction could be lifted
  187          * but I believe it is currently impossible to have any
  188          * deviations for existing hardware.
  189          */
  190         sx_slock(&idpgtbl_lock);
  191         LIST_FOREACH(tbl, &idpgtbls, link) {
  192                 if (tbl->maxaddr >= maxaddr &&
  193                     dmar_pglvl_supported(ctx->dmar, tbl->pglvl) &&
  194                     tbl->leaf == leaf) {
  195                         res = tbl->pgtbl_obj;
  196                         vm_object_reference(res);
  197                         sx_sunlock(&idpgtbl_lock);
  198                         ctx->pglvl = tbl->pglvl; /* XXXKIB ? */
  199                         goto end;
  200                 }
  201         }
  202 
  203         /*
  204          * Not found in cache, relock the cache into exclusive mode to
  205          * be able to add element, and recheck cache again after the
  206          * relock.
  207          */
  208         sx_sunlock(&idpgtbl_lock);
  209         sx_xlock(&idpgtbl_lock);
  210         LIST_FOREACH(tbl, &idpgtbls, link) {
  211                 if (tbl->maxaddr >= maxaddr &&
  212                     dmar_pglvl_supported(ctx->dmar, tbl->pglvl) &&
  213                     tbl->leaf == leaf) {
  214                         res = tbl->pgtbl_obj;
  215                         vm_object_reference(res);
  216                         sx_xunlock(&idpgtbl_lock);
  217                         ctx->pglvl = tbl->pglvl; /* XXXKIB ? */
  218                         return (res);
  219                 }
  220         }
  221 
  222         /*
  223          * Still not found, create new page table.
  224          */
  225         tbl = malloc(sizeof(*tbl), M_DMAR_IDPGTBL, M_WAITOK);
  226         tbl->pglvl = ctx->pglvl;
  227         tbl->leaf = leaf;
  228         tbl->maxaddr = maxaddr;
  229         tbl->pgtbl_obj = vm_pager_allocate(OBJT_PHYS, NULL,
  230             IDX_TO_OFF(pglvl_max_pages(tbl->pglvl)), 0, 0, NULL);
  231         VM_OBJECT_WLOCK(tbl->pgtbl_obj);
  232         ctx_idmap_nextlvl(tbl, 0, 0, 0);
  233         VM_OBJECT_WUNLOCK(tbl->pgtbl_obj);
  234         LIST_INSERT_HEAD(&idpgtbls, tbl, link);
  235         res = tbl->pgtbl_obj;
  236         vm_object_reference(res);
  237         sx_xunlock(&idpgtbl_lock);
  238 
  239 end:
  240         /*
  241          * Table was found or created.
  242          *
  243          * If DMAR does not snoop paging structures accesses, flush
  244          * CPU cache to memory.  Note that dmar_unmap_pgtbl() coherent
  245          * argument was possibly invalid at the time of the identity
  246          * page table creation, since DMAR which was passed at the
  247          * time of creation could be coherent, while current DMAR is
  248          * not.
  249          *
  250          * If DMAR cannot look into the chipset write buffer, flush it
  251          * as well.
  252          */
  253         unit = ctx->dmar;
  254         if (!DMAR_IS_COHERENT(unit)) {
  255                 VM_OBJECT_WLOCK(res);
  256                 for (m = vm_page_lookup(res, 0); m != NULL;
  257                      m = vm_page_next(m))
  258                         pmap_invalidate_cache_pages(&m, 1);
  259                 VM_OBJECT_WUNLOCK(res);
  260         }
  261         if ((unit->hw_cap & DMAR_CAP_RWBF) != 0) {
  262                 DMAR_LOCK(unit);
  263                 dmar_flush_write_bufs(unit);
  264                 DMAR_UNLOCK(unit);
  265         }
  266         
  267         return (res);
  268 }
  269 
  270 /*
  271  * Return a reference to the identity mapping page table to the cache.
  272  */
  273 void
  274 put_idmap_pgtbl(vm_object_t obj)
  275 {
  276         struct idpgtbl *tbl, *tbl1;
  277         vm_object_t rmobj;
  278 
  279         sx_slock(&idpgtbl_lock);
  280         KASSERT(obj->ref_count >= 2, ("lost cache reference"));
  281         vm_object_deallocate(obj);
  282 
  283         /*
  284          * Cache always owns one last reference on the page table object.
  285          * If there is an additional reference, object must stay.
  286          */
  287         if (obj->ref_count > 1) {
  288                 sx_sunlock(&idpgtbl_lock);
  289                 return;
  290         }
  291 
  292         /*
  293          * Cache reference is the last, remove cache element and free
  294          * page table object, returning the page table pages to the
  295          * system.
  296          */
  297         sx_sunlock(&idpgtbl_lock);
  298         sx_xlock(&idpgtbl_lock);
  299         LIST_FOREACH_SAFE(tbl, &idpgtbls, link, tbl1) {
  300                 rmobj = tbl->pgtbl_obj;
  301                 if (rmobj->ref_count == 1) {
  302                         LIST_REMOVE(tbl, link);
  303                         atomic_subtract_int(&dmar_tbl_pagecnt,
  304                             rmobj->resident_page_count);
  305                         vm_object_deallocate(rmobj);
  306                         free(tbl, M_DMAR_IDPGTBL);
  307                 }
  308         }
  309         sx_xunlock(&idpgtbl_lock);
  310 }
  311 
  312 /*
  313  * The core routines to map and unmap host pages at the given guest
  314  * address.  Support superpages.
  315  */
  316 
  317 /*
  318  * Index of the pte for the guest address base in the page table at
  319  * the level lvl.
  320  */
  321 static int
  322 ctx_pgtbl_pte_off(struct dmar_ctx *ctx, dmar_gaddr_t base, int lvl)
  323 {
  324 
  325         base >>= DMAR_PAGE_SHIFT + (ctx->pglvl - lvl - 1) * DMAR_NPTEPGSHIFT;
  326         return (base & DMAR_PTEMASK);
  327 }
  328 
  329 /*
  330  * Returns the page index of the page table page in the page table
  331  * object, which maps the given address base at the page table level
  332  * lvl.
  333  */
  334 static vm_pindex_t
  335 ctx_pgtbl_get_pindex(struct dmar_ctx *ctx, dmar_gaddr_t base, int lvl)
  336 {
  337         vm_pindex_t idx, pidx;
  338         int i;
  339 
  340         KASSERT(lvl >= 0 && lvl < ctx->pglvl, ("wrong lvl %p %d", ctx, lvl));
  341 
  342         for (pidx = idx = 0, i = 0; i < lvl; i++, pidx = idx)
  343                 idx = ctx_pgtbl_pte_off(ctx, base, i) + pidx * DMAR_NPTEPG + 1;
  344         return (idx);
  345 }
  346 
  347 static dmar_pte_t *
  348 ctx_pgtbl_map_pte(struct dmar_ctx *ctx, dmar_gaddr_t base, int lvl, int flags,
  349     vm_pindex_t *idxp, struct sf_buf **sf)
  350 {
  351         vm_page_t m;
  352         struct sf_buf *sfp;
  353         dmar_pte_t *pte, *ptep;
  354         vm_pindex_t idx, idx1;
  355 
  356         DMAR_CTX_ASSERT_PGLOCKED(ctx);
  357         KASSERT((flags & DMAR_PGF_OBJL) != 0, ("lost PGF_OBJL"));
  358 
  359         idx = ctx_pgtbl_get_pindex(ctx, base, lvl);
  360         if (*sf != NULL && idx == *idxp) {
  361                 pte = (dmar_pte_t *)sf_buf_kva(*sf);
  362         } else {
  363                 if (*sf != NULL)
  364                         dmar_unmap_pgtbl(*sf);
  365                 *idxp = idx;
  366 retry:
  367                 pte = dmar_map_pgtbl(ctx->pgtbl_obj, idx, flags, sf);
  368                 if (pte == NULL) {
  369                         KASSERT(lvl > 0, ("lost root page table page %p", ctx));
  370                         /*
  371                          * Page table page does not exist, allocate
  372                          * it and create a pte in the preceeding page level
  373                          * to reference the allocated page table page.
  374                          */
  375                         m = dmar_pgalloc(ctx->pgtbl_obj, idx, flags |
  376                             DMAR_PGF_ZERO);
  377                         if (m == NULL)
  378                                 return (NULL);
  379 
  380                         /*
  381                          * Prevent potential free while pgtbl_obj is
  382                          * unlocked in the recursive call to
  383                          * ctx_pgtbl_map_pte(), if other thread did
  384                          * pte write and clean while the lock if
  385                          * dropped.
  386                          */
  387                         m->wire_count++;
  388 
  389                         sfp = NULL;
  390                         ptep = ctx_pgtbl_map_pte(ctx, base, lvl - 1, flags,
  391                             &idx1, &sfp);
  392                         if (ptep == NULL) {
  393                                 KASSERT(m->pindex != 0,
  394                                     ("loosing root page %p", ctx));
  395                                 m->wire_count--;
  396                                 dmar_pgfree(ctx->pgtbl_obj, m->pindex, flags);
  397                                 return (NULL);
  398                         }
  399                         dmar_pte_store(&ptep->pte, DMAR_PTE_R | DMAR_PTE_W |
  400                             VM_PAGE_TO_PHYS(m));
  401                         dmar_flush_pte_to_ram(ctx->dmar, ptep);
  402                         sf_buf_page(sfp)->wire_count += 1;
  403                         m->wire_count--;
  404                         dmar_unmap_pgtbl(sfp);
  405                         /* Only executed once. */
  406                         goto retry;
  407                 }
  408         }
  409         pte += ctx_pgtbl_pte_off(ctx, base, lvl);
  410         return (pte);
  411 }
  412 
  413 static int
  414 ctx_map_buf_locked(struct dmar_ctx *ctx, dmar_gaddr_t base, dmar_gaddr_t size,
  415     vm_page_t *ma, uint64_t pflags, int flags)
  416 {
  417         dmar_pte_t *pte;
  418         struct sf_buf *sf;
  419         dmar_gaddr_t pg_sz, base1, size1;
  420         vm_pindex_t pi, c, idx, run_sz;
  421         int lvl;
  422         bool superpage;
  423 
  424         DMAR_CTX_ASSERT_PGLOCKED(ctx);
  425 
  426         base1 = base;
  427         size1 = size;
  428         flags |= DMAR_PGF_OBJL;
  429         TD_PREP_PINNED_ASSERT;
  430 
  431         for (sf = NULL, pi = 0; size > 0; base += pg_sz, size -= pg_sz,
  432             pi += run_sz) {
  433                 for (lvl = 0, c = 0, superpage = false;; lvl++) {
  434                         pg_sz = ctx_page_size(ctx, lvl);
  435                         run_sz = pg_sz >> DMAR_PAGE_SHIFT;
  436                         if (lvl == ctx->pglvl - 1)
  437                                 break;
  438                         /*
  439                          * Check if the current base suitable for the
  440                          * superpage mapping.  First, verify the level.
  441                          */
  442                         if (!ctx_is_sp_lvl(ctx, lvl))
  443                                 continue;
  444                         /*
  445                          * Next, look at the size of the mapping and
  446                          * alignment of both guest and host addresses.
  447                          */
  448                         if (size < pg_sz || (base & (pg_sz - 1)) != 0 ||
  449                             (VM_PAGE_TO_PHYS(ma[pi]) & (pg_sz - 1)) != 0)
  450                                 continue;
  451                         /* All passed, check host pages contiguouty. */
  452                         if (c == 0) {
  453                                 for (c = 1; c < run_sz; c++) {
  454                                         if (VM_PAGE_TO_PHYS(ma[pi + c]) !=
  455                                             VM_PAGE_TO_PHYS(ma[pi + c - 1]) +
  456                                             PAGE_SIZE)
  457                                                 break;
  458                                 }
  459                         }
  460                         if (c >= run_sz) {
  461                                 superpage = true;
  462                                 break;
  463                         }
  464                 }
  465                 KASSERT(size >= pg_sz,
  466                     ("mapping loop overflow %p %jx %jx %jx", ctx,
  467                     (uintmax_t)base, (uintmax_t)size, (uintmax_t)pg_sz));
  468                 KASSERT(pg_sz > 0, ("pg_sz 0 lvl %d", lvl));
  469                 pte = ctx_pgtbl_map_pte(ctx, base, lvl, flags, &idx, &sf);
  470                 if (pte == NULL) {
  471                         KASSERT((flags & DMAR_PGF_WAITOK) == 0,
  472                             ("failed waitable pte alloc %p", ctx));
  473                         if (sf != NULL)
  474                                 dmar_unmap_pgtbl(sf);
  475                         ctx_unmap_buf_locked(ctx, base1, base - base1, flags);
  476                         TD_PINNED_ASSERT;
  477                         return (ENOMEM);
  478                 }
  479                 dmar_pte_store(&pte->pte, VM_PAGE_TO_PHYS(ma[pi]) | pflags |
  480                     (superpage ? DMAR_PTE_SP : 0));
  481                 dmar_flush_pte_to_ram(ctx->dmar, pte);
  482                 sf_buf_page(sf)->wire_count += 1;
  483         }
  484         if (sf != NULL)
  485                 dmar_unmap_pgtbl(sf);
  486         TD_PINNED_ASSERT;
  487         return (0);
  488 }
  489 
  490 int
  491 ctx_map_buf(struct dmar_ctx *ctx, dmar_gaddr_t base, dmar_gaddr_t size,
  492     vm_page_t *ma, uint64_t pflags, int flags)
  493 {
  494         struct dmar_unit *unit;
  495         int error;
  496 
  497         unit = ctx->dmar;
  498 
  499         KASSERT((ctx->flags & DMAR_CTX_IDMAP) == 0,
  500             ("modifying idmap pagetable ctx %p", ctx));
  501         KASSERT((base & DMAR_PAGE_MASK) == 0,
  502             ("non-aligned base %p %jx %jx", ctx, (uintmax_t)base,
  503             (uintmax_t)size));
  504         KASSERT((size & DMAR_PAGE_MASK) == 0,
  505             ("non-aligned size %p %jx %jx", ctx, (uintmax_t)base,
  506             (uintmax_t)size));
  507         KASSERT(size > 0, ("zero size %p %jx %jx", ctx, (uintmax_t)base,
  508             (uintmax_t)size));
  509         KASSERT(base < (1ULL << ctx->agaw),
  510             ("base too high %p %jx %jx agaw %d", ctx, (uintmax_t)base,
  511             (uintmax_t)size, ctx->agaw));
  512         KASSERT(base + size < (1ULL << ctx->agaw),
  513             ("end too high %p %jx %jx agaw %d", ctx, (uintmax_t)base,
  514             (uintmax_t)size, ctx->agaw));
  515         KASSERT(base + size > base,
  516             ("size overflow %p %jx %jx", ctx, (uintmax_t)base,
  517             (uintmax_t)size));
  518         KASSERT((pflags & (DMAR_PTE_R | DMAR_PTE_W)) != 0,
  519             ("neither read nor write %jx", (uintmax_t)pflags));
  520         KASSERT((pflags & ~(DMAR_PTE_R | DMAR_PTE_W | DMAR_PTE_SNP |
  521             DMAR_PTE_TM)) == 0,
  522             ("invalid pte flags %jx", (uintmax_t)pflags));
  523         KASSERT((pflags & DMAR_PTE_SNP) == 0 ||
  524             (unit->hw_ecap & DMAR_ECAP_SC) != 0,
  525             ("PTE_SNP for dmar without snoop control %p %jx",
  526             ctx, (uintmax_t)pflags));
  527         KASSERT((pflags & DMAR_PTE_TM) == 0 ||
  528             (unit->hw_ecap & DMAR_ECAP_DI) != 0,
  529             ("PTE_TM for dmar without DIOTLB %p %jx",
  530             ctx, (uintmax_t)pflags));
  531         KASSERT((flags & ~DMAR_PGF_WAITOK) == 0, ("invalid flags %x", flags));
  532 
  533         DMAR_CTX_PGLOCK(ctx);
  534         error = ctx_map_buf_locked(ctx, base, size, ma, pflags, flags);
  535         DMAR_CTX_PGUNLOCK(ctx);
  536         if (error != 0)
  537                 return (error);
  538 
  539         if ((unit->hw_cap & DMAR_CAP_CM) != 0)
  540                 ctx_flush_iotlb_sync(ctx, base, size);
  541         else if ((unit->hw_cap & DMAR_CAP_RWBF) != 0) {
  542                 /* See 11.1 Write Buffer Flushing. */
  543                 DMAR_LOCK(unit);
  544                 dmar_flush_write_bufs(unit);
  545                 DMAR_UNLOCK(unit);
  546         }
  547         return (0);
  548 }
  549 
  550 static void ctx_unmap_clear_pte(struct dmar_ctx *ctx, dmar_gaddr_t base,
  551     int lvl, int flags, dmar_pte_t *pte, struct sf_buf **sf, bool free_fs);
  552 
  553 static void
  554 ctx_free_pgtbl_pde(struct dmar_ctx *ctx, dmar_gaddr_t base, int lvl, int flags)
  555 {
  556         struct sf_buf *sf;
  557         dmar_pte_t *pde;
  558         vm_pindex_t idx;
  559 
  560         sf = NULL;
  561         pde = ctx_pgtbl_map_pte(ctx, base, lvl, flags, &idx, &sf);
  562         ctx_unmap_clear_pte(ctx, base, lvl, flags, pde, &sf, true);
  563 }
  564 
  565 static void
  566 ctx_unmap_clear_pte(struct dmar_ctx *ctx, dmar_gaddr_t base, int lvl,
  567     int flags, dmar_pte_t *pte, struct sf_buf **sf, bool free_sf)
  568 {
  569         vm_page_t m;
  570 
  571         dmar_pte_clear(&pte->pte);
  572         dmar_flush_pte_to_ram(ctx->dmar, pte);
  573         m = sf_buf_page(*sf);
  574         if (free_sf) {
  575                 dmar_unmap_pgtbl(*sf);
  576                 *sf = NULL;
  577         }
  578         m->wire_count--;
  579         if (m->wire_count != 0)
  580                 return;
  581         KASSERT(lvl != 0,
  582             ("lost reference (lvl) on root pg ctx %p base %jx lvl %d",
  583             ctx, (uintmax_t)base, lvl));
  584         KASSERT(m->pindex != 0,
  585             ("lost reference (idx) on root pg ctx %p base %jx lvl %d",
  586             ctx, (uintmax_t)base, lvl));
  587         dmar_pgfree(ctx->pgtbl_obj, m->pindex, flags);
  588         ctx_free_pgtbl_pde(ctx, base, lvl - 1, flags);
  589 }
  590 
  591 /*
  592  * Assumes that the unmap is never partial.
  593  */
  594 static int
  595 ctx_unmap_buf_locked(struct dmar_ctx *ctx, dmar_gaddr_t base,
  596     dmar_gaddr_t size, int flags)
  597 {
  598         dmar_pte_t *pte;
  599         struct sf_buf *sf;
  600         vm_pindex_t idx;
  601         dmar_gaddr_t pg_sz;
  602         int lvl;
  603 
  604         DMAR_CTX_ASSERT_PGLOCKED(ctx);
  605         if (size == 0)
  606                 return (0);
  607 
  608         KASSERT((ctx->flags & DMAR_CTX_IDMAP) == 0,
  609             ("modifying idmap pagetable ctx %p", ctx));
  610         KASSERT((base & DMAR_PAGE_MASK) == 0,
  611             ("non-aligned base %p %jx %jx", ctx, (uintmax_t)base,
  612             (uintmax_t)size));
  613         KASSERT((size & DMAR_PAGE_MASK) == 0,
  614             ("non-aligned size %p %jx %jx", ctx, (uintmax_t)base,
  615             (uintmax_t)size));
  616         KASSERT(base < (1ULL << ctx->agaw),
  617             ("base too high %p %jx %jx agaw %d", ctx, (uintmax_t)base,
  618             (uintmax_t)size, ctx->agaw));
  619         KASSERT(base + size < (1ULL << ctx->agaw),
  620             ("end too high %p %jx %jx agaw %d", ctx, (uintmax_t)base,
  621             (uintmax_t)size, ctx->agaw));
  622         KASSERT(base + size > base,
  623             ("size overflow %p %jx %jx", ctx, (uintmax_t)base,
  624             (uintmax_t)size));
  625         KASSERT((flags & ~DMAR_PGF_WAITOK) == 0, ("invalid flags %x", flags));
  626 
  627         pg_sz = 0; /* silence gcc */
  628         flags |= DMAR_PGF_OBJL;
  629         TD_PREP_PINNED_ASSERT;
  630 
  631         for (sf = NULL; size > 0; base += pg_sz, size -= pg_sz) {
  632                 for (lvl = 0; lvl < ctx->pglvl; lvl++) {
  633                         if (lvl != ctx->pglvl - 1 && !ctx_is_sp_lvl(ctx, lvl))
  634                                 continue;
  635                         pg_sz = ctx_page_size(ctx, lvl);
  636                         if (pg_sz > size)
  637                                 continue;
  638                         pte = ctx_pgtbl_map_pte(ctx, base, lvl, flags,
  639                             &idx, &sf);
  640                         KASSERT(pte != NULL,
  641                             ("sleeping or page missed %p %jx %d 0x%x",
  642                             ctx, (uintmax_t)base, lvl, flags));
  643                         if ((pte->pte & DMAR_PTE_SP) != 0 ||
  644                             lvl == ctx->pglvl - 1) {
  645                                 ctx_unmap_clear_pte(ctx, base, lvl, flags,
  646                                     pte, &sf, false);
  647                                 break;
  648                         }
  649                 }
  650                 KASSERT(size >= pg_sz,
  651                     ("unmapping loop overflow %p %jx %jx %jx", ctx,
  652                     (uintmax_t)base, (uintmax_t)size, (uintmax_t)pg_sz));
  653         }
  654         if (sf != NULL)
  655                 dmar_unmap_pgtbl(sf);
  656         /*
  657          * See 11.1 Write Buffer Flushing for an explanation why RWBF
  658          * can be ignored there.
  659          */
  660 
  661         TD_PINNED_ASSERT;
  662         return (0);
  663 }
  664 
  665 int
  666 ctx_unmap_buf(struct dmar_ctx *ctx, dmar_gaddr_t base, dmar_gaddr_t size,
  667     int flags)
  668 {
  669         int error;
  670 
  671         DMAR_CTX_PGLOCK(ctx);
  672         error = ctx_unmap_buf_locked(ctx, base, size, flags);
  673         DMAR_CTX_PGUNLOCK(ctx);
  674         return (error);
  675 }
  676 
  677 int
  678 ctx_alloc_pgtbl(struct dmar_ctx *ctx)
  679 {
  680         vm_page_t m;
  681 
  682         KASSERT(ctx->pgtbl_obj == NULL, ("already initialized %p", ctx));
  683 
  684         ctx->pgtbl_obj = vm_pager_allocate(OBJT_PHYS, NULL,
  685             IDX_TO_OFF(pglvl_max_pages(ctx->pglvl)), 0, 0, NULL);
  686         DMAR_CTX_PGLOCK(ctx);
  687         m = dmar_pgalloc(ctx->pgtbl_obj, 0, DMAR_PGF_WAITOK |
  688             DMAR_PGF_ZERO | DMAR_PGF_OBJL);
  689         /* No implicit free of the top level page table page. */
  690         m->wire_count = 1;
  691         DMAR_CTX_PGUNLOCK(ctx);
  692         return (0);
  693 }
  694 
  695 void
  696 ctx_free_pgtbl(struct dmar_ctx *ctx)
  697 {
  698         vm_object_t obj;
  699         vm_page_t m;
  700 
  701         obj = ctx->pgtbl_obj;
  702         if (obj == NULL) {
  703                 KASSERT((ctx->dmar->hw_ecap & DMAR_ECAP_PT) != 0 &&
  704                     (ctx->flags & DMAR_CTX_IDMAP) != 0,
  705                     ("lost pagetable object ctx %p", ctx));
  706                 return;
  707         }
  708         DMAR_CTX_ASSERT_PGLOCKED(ctx);
  709         ctx->pgtbl_obj = NULL;
  710 
  711         if ((ctx->flags & DMAR_CTX_IDMAP) != 0) {
  712                 put_idmap_pgtbl(obj);
  713                 ctx->flags &= ~DMAR_CTX_IDMAP;
  714                 return;
  715         }
  716 
  717         /* Obliterate wire_counts */
  718         VM_OBJECT_ASSERT_WLOCKED(obj);
  719         for (m = vm_page_lookup(obj, 0); m != NULL; m = vm_page_next(m))
  720                 m->wire_count = 0;
  721         VM_OBJECT_WUNLOCK(obj);
  722         vm_object_deallocate(obj);
  723 }
  724 
  725 static inline uint64_t
  726 ctx_wait_iotlb_flush(struct dmar_unit *unit, uint64_t wt, int iro)
  727 {
  728         uint64_t iotlbr;
  729 
  730         dmar_write8(unit, iro + DMAR_IOTLB_REG_OFF, DMAR_IOTLB_IVT |
  731             DMAR_IOTLB_DR | DMAR_IOTLB_DW | wt);
  732         for (;;) {
  733                 iotlbr = dmar_read8(unit, iro + DMAR_IOTLB_REG_OFF);
  734                 if ((iotlbr & DMAR_IOTLB_IVT) == 0)
  735                         break;
  736                 cpu_spinwait();
  737         }
  738         return (iotlbr);
  739 }
  740 
  741 void
  742 ctx_flush_iotlb_sync(struct dmar_ctx *ctx, dmar_gaddr_t base, dmar_gaddr_t size)
  743 {
  744         struct dmar_unit *unit;
  745         dmar_gaddr_t isize;
  746         uint64_t iotlbr;
  747         int am, iro;
  748 
  749         unit = ctx->dmar;
  750         KASSERT(!unit->qi_enabled, ("dmar%d: sync iotlb flush call",
  751             unit->unit));
  752         iro = DMAR_ECAP_IRO(unit->hw_ecap) * 16;
  753         DMAR_LOCK(unit);
  754         if ((unit->hw_cap & DMAR_CAP_PSI) == 0 || size > 2 * 1024 * 1024) {
  755                 iotlbr = ctx_wait_iotlb_flush(unit, DMAR_IOTLB_IIRG_DOM |
  756                     DMAR_IOTLB_DID(ctx->domain), iro);
  757                 KASSERT((iotlbr & DMAR_IOTLB_IAIG_MASK) !=
  758                     DMAR_IOTLB_IAIG_INVLD,
  759                     ("dmar%d: invalidation failed %jx", unit->unit,
  760                     (uintmax_t)iotlbr));
  761         } else {
  762                 for (; size > 0; base += isize, size -= isize) {
  763                         am = calc_am(unit, base, size, &isize);
  764                         dmar_write8(unit, iro, base | am);
  765                         iotlbr = ctx_wait_iotlb_flush(unit,
  766                             DMAR_IOTLB_IIRG_PAGE | DMAR_IOTLB_DID(ctx->domain),
  767                             iro);
  768                         KASSERT((iotlbr & DMAR_IOTLB_IAIG_MASK) !=
  769                             DMAR_IOTLB_IAIG_INVLD,
  770                             ("dmar%d: PSI invalidation failed "
  771                             "iotlbr 0x%jx base 0x%jx size 0x%jx am %d",
  772                             unit->unit, (uintmax_t)iotlbr,
  773                             (uintmax_t)base, (uintmax_t)size, am));
  774                         /*
  775                          * Any non-page granularity covers whole guest
  776                          * address space for the domain.
  777                          */
  778                         if ((iotlbr & DMAR_IOTLB_IAIG_MASK) !=
  779                             DMAR_IOTLB_IAIG_PAGE)
  780                                 break;
  781                 }
  782         }
  783         DMAR_UNLOCK(unit);
  784 }

Cache object: 8f2379b8ce22f132963e4f993c602f0a


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.