The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/x86/iommu/intel_idpgtbl.c

Version: -  FREEBSD  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-2  -  FREEBSD-11-1  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-4  -  FREEBSD-10-3  -  FREEBSD-10-2  -  FREEBSD-10-1  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-3  -  FREEBSD-9-2  -  FREEBSD-9-1  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-4  -  FREEBSD-8-3  -  FREEBSD-8-2  -  FREEBSD-8-1  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-4  -  FREEBSD-7-3  -  FREEBSD-7-2  -  FREEBSD-7-1  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-4  -  FREEBSD-6-3  -  FREEBSD-6-2  -  FREEBSD-6-1  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-5  -  FREEBSD-5-4  -  FREEBSD-5-3  -  FREEBSD-5-2  -  FREEBSD-5-1  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  linux-2.6  -  linux-2.4.22  -  MK83  -  MK84  -  PLAN9  -  DFBSD  -  NETBSD  -  NETBSD5  -  NETBSD4  -  NETBSD3  -  NETBSD20  -  OPENBSD  -  xnu-517  -  xnu-792  -  xnu-792.6.70  -  xnu-1228  -  xnu-1456.1.26  -  xnu-1699.24.8  -  xnu-2050.18.24  -  OPENSOLARIS  -  minix-3-1-1 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 2013 The FreeBSD Foundation
    3  * All rights reserved.
    4  *
    5  * This software was developed by Konstantin Belousov <kib@FreeBSD.org>
    6  * under sponsorship from the FreeBSD Foundation.
    7  *
    8  * Redistribution and use in source and binary forms, with or without
    9  * modification, are permitted provided that the following conditions
   10  * are met:
   11  * 1. Redistributions of source code must retain the above copyright
   12  *    notice, this list of conditions and the following disclaimer.
   13  * 2. Redistributions in binary form must reproduce the above copyright
   14  *    notice, this list of conditions and the following disclaimer in the
   15  *    documentation and/or other materials provided with the distribution.
   16  *
   17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   20  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   27  * SUCH DAMAGE.
   28  */
   29 
   30 #include <sys/cdefs.h>
   31 __FBSDID("$FreeBSD: releng/10.2/sys/x86/iommu/intel_idpgtbl.c 284021 2015-06-05 08:36:25Z kib $");
   32 
   33 #include <sys/param.h>
   34 #include <sys/systm.h>
   35 #include <sys/malloc.h>
   36 #include <sys/bus.h>
   37 #include <sys/interrupt.h>
   38 #include <sys/kernel.h>
   39 #include <sys/ktr.h>
   40 #include <sys/lock.h>
   41 #include <sys/memdesc.h>
   42 #include <sys/mutex.h>
   43 #include <sys/proc.h>
   44 #include <sys/rwlock.h>
   45 #include <sys/rman.h>
   46 #include <sys/sf_buf.h>
   47 #include <sys/sysctl.h>
   48 #include <sys/taskqueue.h>
   49 #include <sys/tree.h>
   50 #include <sys/uio.h>
   51 #include <vm/vm.h>
   52 #include <vm/vm_extern.h>
   53 #include <vm/vm_kern.h>
   54 #include <vm/vm_object.h>
   55 #include <vm/vm_page.h>
   56 #include <vm/vm_pager.h>
   57 #include <vm/vm_map.h>
   58 #include <machine/atomic.h>
   59 #include <machine/bus.h>
   60 #include <machine/cpu.h>
   61 #include <machine/md_var.h>
   62 #include <machine/specialreg.h>
   63 #include <x86/include/busdma_impl.h>
   64 #include <x86/iommu/intel_reg.h>
   65 #include <x86/iommu/busdma_dmar.h>
   66 #include <x86/iommu/intel_dmar.h>
   67 
   68 static int ctx_unmap_buf_locked(struct dmar_ctx *ctx, dmar_gaddr_t base,
   69     dmar_gaddr_t size, int flags);
   70 
   71 /*
   72  * The cache of the identity mapping page tables for the DMARs.  Using
   73  * the cache saves significant amount of memory for page tables by
   74  * reusing the page tables, since usually DMARs are identical and have
   75  * the same capabilities.  Still, cache records the information needed
   76  * to match DMAR capabilities and page table format, to correctly
   77  * handle different DMARs.
   78  */
   79 
   80 struct idpgtbl {
   81         dmar_gaddr_t maxaddr;   /* Page table covers the guest address
   82                                    range [0..maxaddr) */
   83         int pglvl;              /* Total page table levels ignoring
   84                                    superpages */
   85         int leaf;               /* The last materialized page table
   86                                    level, it is non-zero if superpages
   87                                    are supported */
   88         vm_object_t pgtbl_obj;  /* The page table pages */
   89         LIST_ENTRY(idpgtbl) link;
   90 };
   91 
   92 static struct sx idpgtbl_lock;
   93 SX_SYSINIT(idpgtbl, &idpgtbl_lock, "idpgtbl");
   94 static LIST_HEAD(, idpgtbl) idpgtbls = LIST_HEAD_INITIALIZER(idpgtbls);
   95 static MALLOC_DEFINE(M_DMAR_IDPGTBL, "dmar_idpgtbl",
   96     "Intel DMAR Identity mappings cache elements");
   97 
   98 /*
   99  * Build the next level of the page tables for the identity mapping.
  100  * - lvl is the level to build;
  101  * - idx is the index of the page table page in the pgtbl_obj, which is
  102  *   being allocated filled now;
  103  * - addr is the starting address in the bus address space which is
  104  *   mapped by the page table page.
  105  */
  106 static void
  107 ctx_idmap_nextlvl(struct idpgtbl *tbl, int lvl, vm_pindex_t idx,
  108     dmar_gaddr_t addr)
  109 {
  110         vm_page_t m1;
  111         dmar_pte_t *pte;
  112         struct sf_buf *sf;
  113         dmar_gaddr_t f, pg_sz;
  114         vm_pindex_t base;
  115         int i;
  116 
  117         VM_OBJECT_ASSERT_LOCKED(tbl->pgtbl_obj);
  118         if (addr >= tbl->maxaddr)
  119                 return;
  120         (void)dmar_pgalloc(tbl->pgtbl_obj, idx, DMAR_PGF_OBJL | DMAR_PGF_WAITOK |
  121             DMAR_PGF_ZERO);
  122         base = idx * DMAR_NPTEPG + 1; /* Index of the first child page of idx */
  123         pg_sz = pglvl_page_size(tbl->pglvl, lvl);
  124         if (lvl != tbl->leaf) {
  125                 for (i = 0, f = addr; i < DMAR_NPTEPG; i++, f += pg_sz)
  126                         ctx_idmap_nextlvl(tbl, lvl + 1, base + i, f);
  127         }
  128         VM_OBJECT_WUNLOCK(tbl->pgtbl_obj);
  129         pte = dmar_map_pgtbl(tbl->pgtbl_obj, idx, DMAR_PGF_WAITOK, &sf);
  130         if (lvl == tbl->leaf) {
  131                 for (i = 0, f = addr; i < DMAR_NPTEPG; i++, f += pg_sz) {
  132                         if (f >= tbl->maxaddr)
  133                                 break;
  134                         pte[i].pte = (DMAR_PTE_ADDR_MASK & f) |
  135                             DMAR_PTE_R | DMAR_PTE_W;
  136                 }
  137         } else {
  138                 for (i = 0, f = addr; i < DMAR_NPTEPG; i++, f += pg_sz) {
  139                         if (f >= tbl->maxaddr)
  140                                 break;
  141                         m1 = dmar_pgalloc(tbl->pgtbl_obj, base + i,
  142                             DMAR_PGF_NOALLOC);
  143                         KASSERT(m1 != NULL, ("lost page table page"));
  144                         pte[i].pte = (DMAR_PTE_ADDR_MASK &
  145                             VM_PAGE_TO_PHYS(m1)) | DMAR_PTE_R | DMAR_PTE_W;
  146                 }
  147         }
  148         /* ctx_get_idmap_pgtbl flushes CPU cache if needed. */
  149         dmar_unmap_pgtbl(sf);
  150         VM_OBJECT_WLOCK(tbl->pgtbl_obj);
  151 }
  152 
  153 /*
  154  * Find a ready and compatible identity-mapping page table in the
  155  * cache. If not found, populate the identity-mapping page table for
  156  * the context, up to the maxaddr. The maxaddr byte is allowed to be
  157  * not mapped, which is aligned with the definition of Maxmem as the
  158  * highest usable physical address + 1.  If superpages are used, the
  159  * maxaddr is typically mapped.
  160  */
  161 vm_object_t
  162 ctx_get_idmap_pgtbl(struct dmar_ctx *ctx, dmar_gaddr_t maxaddr)
  163 {
  164         struct dmar_unit *unit;
  165         struct idpgtbl *tbl;
  166         vm_object_t res;
  167         vm_page_t m;
  168         int leaf, i;
  169 
  170         leaf = 0; /* silence gcc */
  171 
  172         /*
  173          * First, determine where to stop the paging structures.
  174          */
  175         for (i = 0; i < ctx->pglvl; i++) {
  176                 if (i == ctx->pglvl - 1 || ctx_is_sp_lvl(ctx, i)) {
  177                         leaf = i;
  178                         break;
  179                 }
  180         }
  181 
  182         /*
  183          * Search the cache for a compatible page table.  Qualified
  184          * page table must map up to maxaddr, its level must be
  185          * supported by the DMAR and leaf should be equal to the
  186          * calculated value.  The later restriction could be lifted
  187          * but I believe it is currently impossible to have any
  188          * deviations for existing hardware.
  189          */
  190         sx_slock(&idpgtbl_lock);
  191         LIST_FOREACH(tbl, &idpgtbls, link) {
  192                 if (tbl->maxaddr >= maxaddr &&
  193                     dmar_pglvl_supported(ctx->dmar, tbl->pglvl) &&
  194                     tbl->leaf == leaf) {
  195                         res = tbl->pgtbl_obj;
  196                         vm_object_reference(res);
  197                         sx_sunlock(&idpgtbl_lock);
  198                         ctx->pglvl = tbl->pglvl; /* XXXKIB ? */
  199                         goto end;
  200                 }
  201         }
  202 
  203         /*
  204          * Not found in cache, relock the cache into exclusive mode to
  205          * be able to add element, and recheck cache again after the
  206          * relock.
  207          */
  208         sx_sunlock(&idpgtbl_lock);
  209         sx_xlock(&idpgtbl_lock);
  210         LIST_FOREACH(tbl, &idpgtbls, link) {
  211                 if (tbl->maxaddr >= maxaddr &&
  212                     dmar_pglvl_supported(ctx->dmar, tbl->pglvl) &&
  213                     tbl->leaf == leaf) {
  214                         res = tbl->pgtbl_obj;
  215                         vm_object_reference(res);
  216                         sx_xunlock(&idpgtbl_lock);
  217                         ctx->pglvl = tbl->pglvl; /* XXXKIB ? */
  218                         return (res);
  219                 }
  220         }
  221 
  222         /*
  223          * Still not found, create new page table.
  224          */
  225         tbl = malloc(sizeof(*tbl), M_DMAR_IDPGTBL, M_WAITOK);
  226         tbl->pglvl = ctx->pglvl;
  227         tbl->leaf = leaf;
  228         tbl->maxaddr = maxaddr;
  229         tbl->pgtbl_obj = vm_pager_allocate(OBJT_PHYS, NULL,
  230             IDX_TO_OFF(pglvl_max_pages(tbl->pglvl)), 0, 0, NULL);
  231         VM_OBJECT_WLOCK(tbl->pgtbl_obj);
  232         ctx_idmap_nextlvl(tbl, 0, 0, 0);
  233         VM_OBJECT_WUNLOCK(tbl->pgtbl_obj);
  234         LIST_INSERT_HEAD(&idpgtbls, tbl, link);
  235         res = tbl->pgtbl_obj;
  236         vm_object_reference(res);
  237         sx_xunlock(&idpgtbl_lock);
  238 
  239 end:
  240         /*
  241          * Table was found or created.
  242          *
  243          * If DMAR does not snoop paging structures accesses, flush
  244          * CPU cache to memory.  Note that dmar_unmap_pgtbl() coherent
  245          * argument was possibly invalid at the time of the identity
  246          * page table creation, since DMAR which was passed at the
  247          * time of creation could be coherent, while current DMAR is
  248          * not.
  249          *
  250          * If DMAR cannot look into the chipset write buffer, flush it
  251          * as well.
  252          */
  253         unit = ctx->dmar;
  254         if (!DMAR_IS_COHERENT(unit)) {
  255                 VM_OBJECT_WLOCK(res);
  256                 for (m = vm_page_lookup(res, 0); m != NULL;
  257                      m = vm_page_next(m))
  258                         pmap_invalidate_cache_pages(&m, 1);
  259                 VM_OBJECT_WUNLOCK(res);
  260         }
  261         if ((unit->hw_cap & DMAR_CAP_RWBF) != 0) {
  262                 DMAR_LOCK(unit);
  263                 dmar_flush_write_bufs(unit);
  264                 DMAR_UNLOCK(unit);
  265         }
  266         
  267         return (res);
  268 }
  269 
  270 /*
  271  * Return a reference to the identity mapping page table to the cache.
  272  */
  273 void
  274 put_idmap_pgtbl(vm_object_t obj)
  275 {
  276         struct idpgtbl *tbl, *tbl1;
  277         vm_object_t rmobj;
  278 
  279         sx_slock(&idpgtbl_lock);
  280         KASSERT(obj->ref_count >= 2, ("lost cache reference"));
  281         vm_object_deallocate(obj);
  282 
  283         /*
  284          * Cache always owns one last reference on the page table object.
  285          * If there is an additional reference, object must stay.
  286          */
  287         if (obj->ref_count > 1) {
  288                 sx_sunlock(&idpgtbl_lock);
  289                 return;
  290         }
  291 
  292         /*
  293          * Cache reference is the last, remove cache element and free
  294          * page table object, returning the page table pages to the
  295          * system.
  296          */
  297         sx_sunlock(&idpgtbl_lock);
  298         sx_xlock(&idpgtbl_lock);
  299         LIST_FOREACH_SAFE(tbl, &idpgtbls, link, tbl1) {
  300                 rmobj = tbl->pgtbl_obj;
  301                 if (rmobj->ref_count == 1) {
  302                         LIST_REMOVE(tbl, link);
  303                         atomic_subtract_int(&dmar_tbl_pagecnt,
  304                             rmobj->resident_page_count);
  305                         vm_object_deallocate(rmobj);
  306                         free(tbl, M_DMAR_IDPGTBL);
  307                 }
  308         }
  309         sx_xunlock(&idpgtbl_lock);
  310 }
  311 
  312 /*
  313  * The core routines to map and unmap host pages at the given guest
  314  * address.  Support superpages.
  315  */
  316 
  317 /*
  318  * Index of the pte for the guest address base in the page table at
  319  * the level lvl.
  320  */
  321 static int
  322 ctx_pgtbl_pte_off(struct dmar_ctx *ctx, dmar_gaddr_t base, int lvl)
  323 {
  324 
  325         base >>= DMAR_PAGE_SHIFT + (ctx->pglvl - lvl - 1) * DMAR_NPTEPGSHIFT;
  326         return (base & DMAR_PTEMASK);
  327 }
  328 
  329 /*
  330  * Returns the page index of the page table page in the page table
  331  * object, which maps the given address base at the page table level
  332  * lvl.
  333  */
  334 static vm_pindex_t
  335 ctx_pgtbl_get_pindex(struct dmar_ctx *ctx, dmar_gaddr_t base, int lvl)
  336 {
  337         vm_pindex_t idx, pidx;
  338         int i;
  339 
  340         KASSERT(lvl >= 0 && lvl < ctx->pglvl, ("wrong lvl %p %d", ctx, lvl));
  341 
  342         for (pidx = idx = 0, i = 0; i < lvl; i++, pidx = idx)
  343                 idx = ctx_pgtbl_pte_off(ctx, base, i) + pidx * DMAR_NPTEPG + 1;
  344         return (idx);
  345 }
  346 
  347 static dmar_pte_t *
  348 ctx_pgtbl_map_pte(struct dmar_ctx *ctx, dmar_gaddr_t base, int lvl, int flags,
  349     vm_pindex_t *idxp, struct sf_buf **sf)
  350 {
  351         vm_page_t m;
  352         struct sf_buf *sfp;
  353         dmar_pte_t *pte, *ptep;
  354         vm_pindex_t idx, idx1;
  355 
  356         DMAR_CTX_ASSERT_PGLOCKED(ctx);
  357         KASSERT((flags & DMAR_PGF_OBJL) != 0, ("lost PGF_OBJL"));
  358 
  359         idx = ctx_pgtbl_get_pindex(ctx, base, lvl);
  360         if (*sf != NULL && idx == *idxp) {
  361                 pte = (dmar_pte_t *)sf_buf_kva(*sf);
  362         } else {
  363                 if (*sf != NULL)
  364                         dmar_unmap_pgtbl(*sf);
  365                 *idxp = idx;
  366 retry:
  367                 pte = dmar_map_pgtbl(ctx->pgtbl_obj, idx, flags, sf);
  368                 if (pte == NULL) {
  369                         KASSERT(lvl > 0, ("lost root page table page %p", ctx));
  370                         /*
  371                          * Page table page does not exists, allocate
  372                          * it and create pte in the up level.
  373                          */
  374                         m = dmar_pgalloc(ctx->pgtbl_obj, idx, flags |
  375                             DMAR_PGF_ZERO);
  376                         if (m == NULL)
  377                                 return (NULL);
  378 
  379                         /*
  380                          * Prevent potential free while pgtbl_obj is
  381                          * unlocked in the recursive call to
  382                          * ctx_pgtbl_map_pte(), if other thread did
  383                          * pte write and clean while the lock if
  384                          * dropped.
  385                          */
  386                         m->wire_count++;
  387 
  388                         sfp = NULL;
  389                         ptep = ctx_pgtbl_map_pte(ctx, base, lvl - 1, flags,
  390                             &idx1, &sfp);
  391                         if (ptep == NULL) {
  392                                 KASSERT(m->pindex != 0,
  393                                     ("loosing root page %p", ctx));
  394                                 m->wire_count--;
  395                                 dmar_pgfree(ctx->pgtbl_obj, m->pindex, flags);
  396                                 return (NULL);
  397                         }
  398                         dmar_pte_store(&ptep->pte, DMAR_PTE_R | DMAR_PTE_W |
  399                             VM_PAGE_TO_PHYS(m));
  400                         dmar_flush_pte_to_ram(ctx->dmar, ptep);
  401                         sf_buf_page(sfp)->wire_count += 1;
  402                         m->wire_count--;
  403                         dmar_unmap_pgtbl(sfp);
  404                         /* Only executed once. */
  405                         goto retry;
  406                 }
  407         }
  408         pte += ctx_pgtbl_pte_off(ctx, base, lvl);
  409         return (pte);
  410 }
  411 
  412 static int
  413 ctx_map_buf_locked(struct dmar_ctx *ctx, dmar_gaddr_t base, dmar_gaddr_t size,
  414     vm_page_t *ma, uint64_t pflags, int flags)
  415 {
  416         dmar_pte_t *pte;
  417         struct sf_buf *sf;
  418         dmar_gaddr_t pg_sz, base1, size1;
  419         vm_pindex_t pi, c, idx, run_sz;
  420         int lvl;
  421         bool superpage;
  422 
  423         DMAR_CTX_ASSERT_PGLOCKED(ctx);
  424 
  425         base1 = base;
  426         size1 = size;
  427         flags |= DMAR_PGF_OBJL;
  428         TD_PREP_PINNED_ASSERT;
  429 
  430         for (sf = NULL, pi = 0; size > 0; base += pg_sz, size -= pg_sz,
  431             pi += run_sz) {
  432                 for (lvl = 0, c = 0, superpage = false;; lvl++) {
  433                         pg_sz = ctx_page_size(ctx, lvl);
  434                         run_sz = pg_sz >> DMAR_PAGE_SHIFT;
  435                         if (lvl == ctx->pglvl - 1)
  436                                 break;
  437                         /*
  438                          * Check if the current base suitable for the
  439                          * superpage mapping.  First, verify the level.
  440                          */
  441                         if (!ctx_is_sp_lvl(ctx, lvl))
  442                                 continue;
  443                         /*
  444                          * Next, look at the size of the mapping and
  445                          * alignment of both guest and host addresses.
  446                          */
  447                         if (size < pg_sz || (base & (pg_sz - 1)) != 0 ||
  448                             (VM_PAGE_TO_PHYS(ma[pi]) & (pg_sz - 1)) != 0)
  449                                 continue;
  450                         /* All passed, check host pages contiguouty. */
  451                         if (c == 0) {
  452                                 for (c = 1; c < run_sz; c++) {
  453                                         if (VM_PAGE_TO_PHYS(ma[pi + c]) !=
  454                                             VM_PAGE_TO_PHYS(ma[pi + c - 1]) +
  455                                             PAGE_SIZE)
  456                                                 break;
  457                                 }
  458                         }
  459                         if (c >= run_sz) {
  460                                 superpage = true;
  461                                 break;
  462                         }
  463                 }
  464                 KASSERT(size >= pg_sz,
  465                     ("mapping loop overflow %p %jx %jx %jx", ctx,
  466                     (uintmax_t)base, (uintmax_t)size, (uintmax_t)pg_sz));
  467                 KASSERT(pg_sz > 0, ("pg_sz 0 lvl %d", lvl));
  468                 pte = ctx_pgtbl_map_pte(ctx, base, lvl, flags, &idx, &sf);
  469                 if (pte == NULL) {
  470                         KASSERT((flags & DMAR_PGF_WAITOK) == 0,
  471                             ("failed waitable pte alloc %p", ctx));
  472                         if (sf != NULL)
  473                                 dmar_unmap_pgtbl(sf);
  474                         ctx_unmap_buf_locked(ctx, base1, base - base1, flags);
  475                         TD_PINNED_ASSERT;
  476                         return (ENOMEM);
  477                 }
  478                 dmar_pte_store(&pte->pte, VM_PAGE_TO_PHYS(ma[pi]) | pflags |
  479                     (superpage ? DMAR_PTE_SP : 0));
  480                 dmar_flush_pte_to_ram(ctx->dmar, pte);
  481                 sf_buf_page(sf)->wire_count += 1;
  482         }
  483         if (sf != NULL)
  484                 dmar_unmap_pgtbl(sf);
  485         TD_PINNED_ASSERT;
  486         return (0);
  487 }
  488 
  489 int
  490 ctx_map_buf(struct dmar_ctx *ctx, dmar_gaddr_t base, dmar_gaddr_t size,
  491     vm_page_t *ma, uint64_t pflags, int flags)
  492 {
  493         struct dmar_unit *unit;
  494         int error;
  495 
  496         unit = ctx->dmar;
  497 
  498         KASSERT((ctx->flags & DMAR_CTX_IDMAP) == 0,
  499             ("modifying idmap pagetable ctx %p", ctx));
  500         KASSERT((base & DMAR_PAGE_MASK) == 0,
  501             ("non-aligned base %p %jx %jx", ctx, (uintmax_t)base,
  502             (uintmax_t)size));
  503         KASSERT((size & DMAR_PAGE_MASK) == 0,
  504             ("non-aligned size %p %jx %jx", ctx, (uintmax_t)base,
  505             (uintmax_t)size));
  506         KASSERT(size > 0, ("zero size %p %jx %jx", ctx, (uintmax_t)base,
  507             (uintmax_t)size));
  508         KASSERT(base < (1ULL << ctx->agaw),
  509             ("base too high %p %jx %jx agaw %d", ctx, (uintmax_t)base,
  510             (uintmax_t)size, ctx->agaw));
  511         KASSERT(base + size < (1ULL << ctx->agaw),
  512             ("end too high %p %jx %jx agaw %d", ctx, (uintmax_t)base,
  513             (uintmax_t)size, ctx->agaw));
  514         KASSERT(base + size > base,
  515             ("size overflow %p %jx %jx", ctx, (uintmax_t)base,
  516             (uintmax_t)size));
  517         KASSERT((pflags & (DMAR_PTE_R | DMAR_PTE_W)) != 0,
  518             ("neither read nor write %jx", (uintmax_t)pflags));
  519         KASSERT((pflags & ~(DMAR_PTE_R | DMAR_PTE_W | DMAR_PTE_SNP |
  520             DMAR_PTE_TM)) == 0,
  521             ("invalid pte flags %jx", (uintmax_t)pflags));
  522         KASSERT((pflags & DMAR_PTE_SNP) == 0 ||
  523             (unit->hw_ecap & DMAR_ECAP_SC) != 0,
  524             ("PTE_SNP for dmar without snoop control %p %jx",
  525             ctx, (uintmax_t)pflags));
  526         KASSERT((pflags & DMAR_PTE_TM) == 0 ||
  527             (unit->hw_ecap & DMAR_ECAP_DI) != 0,
  528             ("PTE_TM for dmar without DIOTLB %p %jx",
  529             ctx, (uintmax_t)pflags));
  530         KASSERT((flags & ~DMAR_PGF_WAITOK) == 0, ("invalid flags %x", flags));
  531 
  532         DMAR_CTX_PGLOCK(ctx);
  533         error = ctx_map_buf_locked(ctx, base, size, ma, pflags, flags);
  534         DMAR_CTX_PGUNLOCK(ctx);
  535         if (error != 0)
  536                 return (error);
  537 
  538         if ((unit->hw_cap & DMAR_CAP_CM) != 0)
  539                 ctx_flush_iotlb_sync(ctx, base, size);
  540         else if ((unit->hw_cap & DMAR_CAP_RWBF) != 0) {
  541                 /* See 11.1 Write Buffer Flushing. */
  542                 DMAR_LOCK(unit);
  543                 dmar_flush_write_bufs(unit);
  544                 DMAR_UNLOCK(unit);
  545         }
  546         return (0);
  547 }
  548 
  549 static void ctx_unmap_clear_pte(struct dmar_ctx *ctx, dmar_gaddr_t base,
  550     int lvl, int flags, dmar_pte_t *pte, struct sf_buf **sf, bool free_fs);
  551 
  552 static void
  553 ctx_free_pgtbl_pde(struct dmar_ctx *ctx, dmar_gaddr_t base, int lvl, int flags)
  554 {
  555         struct sf_buf *sf;
  556         dmar_pte_t *pde;
  557         vm_pindex_t idx;
  558 
  559         sf = NULL;
  560         pde = ctx_pgtbl_map_pte(ctx, base, lvl, flags, &idx, &sf);
  561         ctx_unmap_clear_pte(ctx, base, lvl, flags, pde, &sf, true);
  562 }
  563 
  564 static void
  565 ctx_unmap_clear_pte(struct dmar_ctx *ctx, dmar_gaddr_t base, int lvl,
  566     int flags, dmar_pte_t *pte, struct sf_buf **sf, bool free_sf)
  567 {
  568         vm_page_t m;
  569 
  570         dmar_pte_clear(&pte->pte);
  571         dmar_flush_pte_to_ram(ctx->dmar, pte);
  572         m = sf_buf_page(*sf);
  573         if (free_sf) {
  574                 dmar_unmap_pgtbl(*sf);
  575                 *sf = NULL;
  576         }
  577         m->wire_count--;
  578         if (m->wire_count != 0)
  579                 return;
  580         KASSERT(lvl != 0,
  581             ("lost reference (lvl) on root pg ctx %p base %jx lvl %d",
  582             ctx, (uintmax_t)base, lvl));
  583         KASSERT(m->pindex != 0,
  584             ("lost reference (idx) on root pg ctx %p base %jx lvl %d",
  585             ctx, (uintmax_t)base, lvl));
  586         dmar_pgfree(ctx->pgtbl_obj, m->pindex, flags);
  587         ctx_free_pgtbl_pde(ctx, base, lvl - 1, flags);
  588 }
  589 
  590 /*
  591  * Assumes that the unmap is never partial.
  592  */
  593 static int
  594 ctx_unmap_buf_locked(struct dmar_ctx *ctx, dmar_gaddr_t base,
  595     dmar_gaddr_t size, int flags)
  596 {
  597         dmar_pte_t *pte;
  598         struct sf_buf *sf;
  599         vm_pindex_t idx;
  600         dmar_gaddr_t pg_sz;
  601         int lvl;
  602 
  603         DMAR_CTX_ASSERT_PGLOCKED(ctx);
  604         if (size == 0)
  605                 return (0);
  606 
  607         KASSERT((ctx->flags & DMAR_CTX_IDMAP) == 0,
  608             ("modifying idmap pagetable ctx %p", ctx));
  609         KASSERT((base & DMAR_PAGE_MASK) == 0,
  610             ("non-aligned base %p %jx %jx", ctx, (uintmax_t)base,
  611             (uintmax_t)size));
  612         KASSERT((size & DMAR_PAGE_MASK) == 0,
  613             ("non-aligned size %p %jx %jx", ctx, (uintmax_t)base,
  614             (uintmax_t)size));
  615         KASSERT(base < (1ULL << ctx->agaw),
  616             ("base too high %p %jx %jx agaw %d", ctx, (uintmax_t)base,
  617             (uintmax_t)size, ctx->agaw));
  618         KASSERT(base + size < (1ULL << ctx->agaw),
  619             ("end too high %p %jx %jx agaw %d", ctx, (uintmax_t)base,
  620             (uintmax_t)size, ctx->agaw));
  621         KASSERT(base + size > base,
  622             ("size overflow %p %jx %jx", ctx, (uintmax_t)base,
  623             (uintmax_t)size));
  624         KASSERT((flags & ~DMAR_PGF_WAITOK) == 0, ("invalid flags %x", flags));
  625 
  626         pg_sz = 0; /* silence gcc */
  627         flags |= DMAR_PGF_OBJL;
  628         TD_PREP_PINNED_ASSERT;
  629 
  630         for (sf = NULL; size > 0; base += pg_sz, size -= pg_sz) {
  631                 for (lvl = 0; lvl < ctx->pglvl; lvl++) {
  632                         if (lvl != ctx->pglvl - 1 && !ctx_is_sp_lvl(ctx, lvl))
  633                                 continue;
  634                         pg_sz = ctx_page_size(ctx, lvl);
  635                         if (pg_sz > size)
  636                                 continue;
  637                         pte = ctx_pgtbl_map_pte(ctx, base, lvl, flags,
  638                             &idx, &sf);
  639                         KASSERT(pte != NULL,
  640                             ("sleeping or page missed %p %jx %d 0x%x",
  641                             ctx, (uintmax_t)base, lvl, flags));
  642                         if ((pte->pte & DMAR_PTE_SP) != 0 ||
  643                             lvl == ctx->pglvl - 1) {
  644                                 ctx_unmap_clear_pte(ctx, base, lvl, flags,
  645                                     pte, &sf, false);
  646                                 break;
  647                         }
  648                 }
  649                 KASSERT(size >= pg_sz,
  650                     ("unmapping loop overflow %p %jx %jx %jx", ctx,
  651                     (uintmax_t)base, (uintmax_t)size, (uintmax_t)pg_sz));
  652         }
  653         if (sf != NULL)
  654                 dmar_unmap_pgtbl(sf);
  655         /*
  656          * See 11.1 Write Buffer Flushing for an explanation why RWBF
  657          * can be ignored there.
  658          */
  659 
  660         TD_PINNED_ASSERT;
  661         return (0);
  662 }
  663 
  664 int
  665 ctx_unmap_buf(struct dmar_ctx *ctx, dmar_gaddr_t base, dmar_gaddr_t size,
  666     int flags)
  667 {
  668         int error;
  669 
  670         DMAR_CTX_PGLOCK(ctx);
  671         error = ctx_unmap_buf_locked(ctx, base, size, flags);
  672         DMAR_CTX_PGUNLOCK(ctx);
  673         return (error);
  674 }
  675 
  676 int
  677 ctx_alloc_pgtbl(struct dmar_ctx *ctx)
  678 {
  679         vm_page_t m;
  680 
  681         KASSERT(ctx->pgtbl_obj == NULL, ("already initialized %p", ctx));
  682 
  683         ctx->pgtbl_obj = vm_pager_allocate(OBJT_PHYS, NULL,
  684             IDX_TO_OFF(pglvl_max_pages(ctx->pglvl)), 0, 0, NULL);
  685         DMAR_CTX_PGLOCK(ctx);
  686         m = dmar_pgalloc(ctx->pgtbl_obj, 0, DMAR_PGF_WAITOK |
  687             DMAR_PGF_ZERO | DMAR_PGF_OBJL);
  688         /* No implicit free of the top level page table page. */
  689         m->wire_count = 1;
  690         DMAR_CTX_PGUNLOCK(ctx);
  691         return (0);
  692 }
  693 
  694 void
  695 ctx_free_pgtbl(struct dmar_ctx *ctx)
  696 {
  697         vm_object_t obj;
  698         vm_page_t m;
  699 
  700         obj = ctx->pgtbl_obj;
  701         if (obj == NULL) {
  702                 KASSERT((ctx->dmar->hw_ecap & DMAR_ECAP_PT) != 0 &&
  703                     (ctx->flags & DMAR_CTX_IDMAP) != 0,
  704                     ("lost pagetable object ctx %p", ctx));
  705                 return;
  706         }
  707         DMAR_CTX_ASSERT_PGLOCKED(ctx);
  708         ctx->pgtbl_obj = NULL;
  709 
  710         if ((ctx->flags & DMAR_CTX_IDMAP) != 0) {
  711                 put_idmap_pgtbl(obj);
  712                 ctx->flags &= ~DMAR_CTX_IDMAP;
  713                 return;
  714         }
  715 
  716         /* Obliterate wire_counts */
  717         VM_OBJECT_ASSERT_WLOCKED(obj);
  718         for (m = vm_page_lookup(obj, 0); m != NULL; m = vm_page_next(m))
  719                 m->wire_count = 0;
  720         VM_OBJECT_WUNLOCK(obj);
  721         vm_object_deallocate(obj);
  722 }
  723 
  724 static inline uint64_t
  725 ctx_wait_iotlb_flush(struct dmar_unit *unit, uint64_t wt, int iro)
  726 {
  727         uint64_t iotlbr;
  728 
  729         dmar_write8(unit, iro + DMAR_IOTLB_REG_OFF, DMAR_IOTLB_IVT |
  730             DMAR_IOTLB_DR | DMAR_IOTLB_DW | wt);
  731         for (;;) {
  732                 iotlbr = dmar_read8(unit, iro + DMAR_IOTLB_REG_OFF);
  733                 if ((iotlbr & DMAR_IOTLB_IVT) == 0)
  734                         break;
  735                 cpu_spinwait();
  736         }
  737         return (iotlbr);
  738 }
  739 
  740 void
  741 ctx_flush_iotlb_sync(struct dmar_ctx *ctx, dmar_gaddr_t base, dmar_gaddr_t size)
  742 {
  743         struct dmar_unit *unit;
  744         dmar_gaddr_t isize;
  745         uint64_t iotlbr;
  746         int am, iro;
  747 
  748         unit = ctx->dmar;
  749         KASSERT(!unit->qi_enabled, ("dmar%d: sync iotlb flush call",
  750             unit->unit));
  751         iro = DMAR_ECAP_IRO(unit->hw_ecap) * 16;
  752         DMAR_LOCK(unit);
  753         if ((unit->hw_cap & DMAR_CAP_PSI) == 0 || size > 2 * 1024 * 1024) {
  754                 iotlbr = ctx_wait_iotlb_flush(unit, DMAR_IOTLB_IIRG_DOM |
  755                     DMAR_IOTLB_DID(ctx->domain), iro);
  756                 KASSERT((iotlbr & DMAR_IOTLB_IAIG_MASK) !=
  757                     DMAR_IOTLB_IAIG_INVLD,
  758                     ("dmar%d: invalidation failed %jx", unit->unit,
  759                     (uintmax_t)iotlbr));
  760         } else {
  761                 for (; size > 0; base += isize, size -= isize) {
  762                         am = calc_am(unit, base, size, &isize);
  763                         dmar_write8(unit, iro, base | am);
  764                         iotlbr = ctx_wait_iotlb_flush(unit,
  765                             DMAR_IOTLB_IIRG_PAGE | DMAR_IOTLB_DID(ctx->domain),
  766                             iro);
  767                         KASSERT((iotlbr & DMAR_IOTLB_IAIG_MASK) !=
  768                             DMAR_IOTLB_IAIG_INVLD,
  769                             ("dmar%d: PSI invalidation failed "
  770                             "iotlbr 0x%jx base 0x%jx size 0x%jx am %d",
  771                             unit->unit, (uintmax_t)iotlbr,
  772                             (uintmax_t)base, (uintmax_t)size, am));
  773                         /*
  774                          * Any non-page granularity covers whole guest
  775                          * address space for the domain.
  776                          */
  777                         if ((iotlbr & DMAR_IOTLB_IAIG_MASK) !=
  778                             DMAR_IOTLB_IAIG_PAGE)
  779                                 break;
  780                 }
  781         }
  782         DMAR_UNLOCK(unit);
  783 }

Cache object: cd7df66158739599389c8d2cb8d21644


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.