The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/x86/iommu/intel_dmar.h

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 2013 The FreeBSD Foundation
    3  * All rights reserved.
    4  *
    5  * This software was developed by Konstantin Belousov <kib@FreeBSD.org>
    6  * under sponsorship from the FreeBSD Foundation.
    7  *
    8  * Redistribution and use in source and binary forms, with or without
    9  * modification, are permitted provided that the following conditions
   10  * are met:
   11  * 1. Redistributions of source code must retain the above copyright
   12  *    notice, this list of conditions and the following disclaimer.
   13  * 2. Redistributions in binary form must reproduce the above copyright
   14  *    notice, this list of conditions and the following disclaimer in the
   15  *    documentation and/or other materials provided with the distribution.
   16  *
   17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   20  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   27  * SUCH DAMAGE.
   28  *
   29  * $FreeBSD: releng/10.1/sys/x86/iommu/intel_dmar.h 263747 2014-03-25 20:17:57Z kib $
   30  */
   31 
   32 #ifndef __X86_IOMMU_INTEL_DMAR_H
   33 #define __X86_IOMMU_INTEL_DMAR_H
   34 
   35 /* Host or physical memory address, after translation. */
   36 typedef uint64_t dmar_haddr_t;
   37 /* Guest or bus address, before translation. */
   38 typedef uint64_t dmar_gaddr_t;
   39 
   40 struct dmar_qi_genseq {
   41         u_int gen;
   42         uint32_t seq;
   43 };
   44 
   45 struct dmar_map_entry {
   46         dmar_gaddr_t start;
   47         dmar_gaddr_t end;
   48         dmar_gaddr_t free_after;        /* Free space after the entry */
   49         dmar_gaddr_t free_down;         /* Max free space below the
   50                                            current R/B tree node */
   51         u_int flags;
   52         TAILQ_ENTRY(dmar_map_entry) dmamap_link; /* Link for dmamap entries */
   53         RB_ENTRY(dmar_map_entry) rb_entry;       /* Links for ctx entries */
   54         TAILQ_ENTRY(dmar_map_entry) unroll_link; /* Link for unroll after
   55                                                     dmamap_load failure */
   56         struct dmar_ctx *ctx;
   57         struct dmar_qi_genseq gseq;
   58 };
   59 
   60 RB_HEAD(dmar_gas_entries_tree, dmar_map_entry);
   61 RB_PROTOTYPE(dmar_gas_entries_tree, dmar_map_entry, rb_entry,
   62     dmar_gas_cmp_entries);
   63 
   64 #define DMAR_MAP_ENTRY_PLACE    0x0001  /* Fake entry */
   65 #define DMAR_MAP_ENTRY_RMRR     0x0002  /* Permanent, not linked by
   66                                            dmamap_link */
   67 #define DMAR_MAP_ENTRY_MAP      0x0004  /* Busdma created, linked by
   68                                            dmamap_link */
   69 #define DMAR_MAP_ENTRY_UNMAPPED 0x0010  /* No backing pages */
   70 #define DMAR_MAP_ENTRY_QI_NF    0x0020  /* qi task, do not free entry */
   71 #define DMAR_MAP_ENTRY_READ     0x1000  /* Read permitted */
   72 #define DMAR_MAP_ENTRY_WRITE    0x2000  /* Write permitted */
   73 #define DMAR_MAP_ENTRY_SNOOP    0x4000  /* Snoop */
   74 #define DMAR_MAP_ENTRY_TM       0x8000  /* Transient */
   75 
   76 struct dmar_ctx {
   77         int bus;        /* pci bus/slot/func */
   78         int slot;
   79         int func;
   80         int domain;     /* DID */
   81         int mgaw;       /* Real max address width */
   82         int agaw;       /* Adjusted guest address width */
   83         int pglvl;      /* The pagelevel */
   84         int awlvl;      /* The pagelevel as the bitmask, to set in
   85                            context entry */
   86         dmar_gaddr_t end;/* Highest address + 1 in the guest AS */
   87         u_int refs;     /* References to the context, from tags */
   88         struct dmar_unit *dmar;
   89         struct bus_dma_tag_dmar ctx_tag; /* Root tag */
   90         struct mtx lock;
   91         LIST_ENTRY(dmar_ctx) link;      /* Member in the dmar list */
   92         vm_object_t pgtbl_obj;          /* Page table pages */
   93         u_int flags;                    /* Protected by dmar lock */
   94         uint64_t last_fault_rec[2];     /* Last fault reported */
   95         u_int entries_cnt;
   96         u_long loads;
   97         u_long unloads;
   98         struct dmar_gas_entries_tree rb_root;
   99         struct dmar_map_entries_tailq unload_entries; /* Entries to unload */
  100         struct dmar_map_entry *first_place, *last_place;
  101         struct task unload_task;
  102 };
  103 
  104 /* struct dmar_ctx flags */
  105 #define DMAR_CTX_FAULTED        0x0001  /* Fault was reported,
  106                                            last_fault_rec is valid */
  107 #define DMAR_CTX_IDMAP          0x0002  /* Context uses identity page table */
  108 #define DMAR_CTX_RMRR           0x0004  /* Context contains RMRR entry,
  109                                            cannot be turned off */
  110 #define DMAR_CTX_DISABLED       0x0008  /* Device is disabled, the
  111                                            ephemeral reference is kept
  112                                            to prevent context destruction */
  113 
  114 #define DMAR_CTX_PGLOCK(ctx)    VM_OBJECT_WLOCK((ctx)->pgtbl_obj)
  115 #define DMAR_CTX_PGTRYLOCK(ctx) VM_OBJECT_TRYWLOCK((ctx)->pgtbl_obj)
  116 #define DMAR_CTX_PGUNLOCK(ctx)  VM_OBJECT_WUNLOCK((ctx)->pgtbl_obj)
  117 #define DMAR_CTX_ASSERT_PGLOCKED(ctx) \
  118         VM_OBJECT_ASSERT_WLOCKED((ctx)->pgtbl_obj)
  119 
  120 #define DMAR_CTX_LOCK(ctx)      mtx_lock(&(ctx)->lock)
  121 #define DMAR_CTX_UNLOCK(ctx)    mtx_unlock(&(ctx)->lock)
  122 #define DMAR_CTX_ASSERT_LOCKED(ctx) mtx_assert(&(ctx)->lock, MA_OWNED)
  123 
  124 struct dmar_msi_data {
  125         int irq;
  126         int irq_rid;
  127         struct resource *irq_res;
  128         void *intr_handle;
  129         int (*handler)(void *);
  130         int msi_data_reg;
  131         int msi_addr_reg;
  132         int msi_uaddr_reg;
  133         void (*enable_intr)(struct dmar_unit *);
  134         void (*disable_intr)(struct dmar_unit *);
  135         const char *name;
  136 };
  137 
  138 #define DMAR_INTR_FAULT         0
  139 #define DMAR_INTR_QI            1
  140 #define DMAR_INTR_TOTAL         2
  141 
  142 struct dmar_unit {
  143         device_t dev;
  144         int unit;
  145         uint16_t segment;
  146         uint64_t base;
  147 
  148         /* Resources */
  149         int reg_rid;
  150         struct resource *regs;
  151 
  152         struct dmar_msi_data intrs[DMAR_INTR_TOTAL];
  153 
  154         /* Hardware registers cache */
  155         uint32_t hw_ver;
  156         uint64_t hw_cap;
  157         uint64_t hw_ecap;
  158         uint32_t hw_gcmd;
  159 
  160         /* Data for being a dmar */
  161         struct mtx lock;
  162         LIST_HEAD(, dmar_ctx) contexts;
  163         struct unrhdr *domids;
  164         vm_object_t ctx_obj;
  165         u_int barrier_flags;
  166 
  167         /* Fault handler data */
  168         struct mtx fault_lock;
  169         uint64_t *fault_log;
  170         int fault_log_head;
  171         int fault_log_tail;
  172         int fault_log_size;
  173         struct task fault_task;
  174         struct taskqueue *fault_taskqueue;
  175 
  176         /* QI */
  177         int qi_enabled;
  178         vm_offset_t inv_queue;
  179         vm_size_t inv_queue_size;
  180         uint32_t inv_queue_avail;
  181         uint32_t inv_queue_tail;
  182         volatile uint32_t inv_waitd_seq_hw; /* hw writes there on wait
  183                                                descr completion */
  184         uint64_t inv_waitd_seq_hw_phys;
  185         uint32_t inv_waitd_seq; /* next sequence number to use for wait descr */
  186         u_int inv_waitd_gen;    /* seq number generation AKA seq overflows */
  187         u_int inv_seq_waiters;  /* count of waiters for seq */
  188         u_int inv_queue_full;   /* informational counter */
  189 
  190         /* Delayed freeing of map entries queue processing */
  191         struct dmar_map_entries_tailq tlb_flush_entries;
  192         struct task qi_task;
  193         struct taskqueue *qi_taskqueue;
  194 
  195         /* Busdma delayed map load */
  196         struct task dmamap_load_task;
  197         TAILQ_HEAD(, bus_dmamap_dmar) delayed_maps;
  198         struct taskqueue *delayed_taskqueue;
  199 };
  200 
  201 #define DMAR_LOCK(dmar)         mtx_lock(&(dmar)->lock)
  202 #define DMAR_UNLOCK(dmar)       mtx_unlock(&(dmar)->lock)
  203 #define DMAR_ASSERT_LOCKED(dmar) mtx_assert(&(dmar)->lock, MA_OWNED)
  204 
  205 #define DMAR_FAULT_LOCK(dmar)   mtx_lock_spin(&(dmar)->fault_lock)
  206 #define DMAR_FAULT_UNLOCK(dmar) mtx_unlock_spin(&(dmar)->fault_lock)
  207 #define DMAR_FAULT_ASSERT_LOCKED(dmar) mtx_assert(&(dmar)->fault_lock, MA_OWNED)
  208 
  209 #define DMAR_IS_COHERENT(dmar)  (((dmar)->hw_ecap & DMAR_ECAP_C) != 0)
  210 #define DMAR_HAS_QI(dmar)       (((dmar)->hw_ecap & DMAR_ECAP_QI) != 0)
  211 
  212 /* Barrier ids */
  213 #define DMAR_BARRIER_RMRR       0
  214 #define DMAR_BARRIER_USEQ       1
  215 
  216 struct dmar_unit *dmar_find(device_t dev);
  217 
  218 u_int dmar_nd2mask(u_int nd);
  219 bool dmar_pglvl_supported(struct dmar_unit *unit, int pglvl);
  220 int ctx_set_agaw(struct dmar_ctx *ctx, int mgaw);
  221 int dmar_maxaddr2mgaw(struct dmar_unit* unit, dmar_gaddr_t maxaddr,
  222     bool allow_less);
  223 vm_pindex_t pglvl_max_pages(int pglvl);
  224 int ctx_is_sp_lvl(struct dmar_ctx *ctx, int lvl);
  225 dmar_gaddr_t pglvl_page_size(int total_pglvl, int lvl);
  226 dmar_gaddr_t ctx_page_size(struct dmar_ctx *ctx, int lvl);
  227 int calc_am(struct dmar_unit *unit, dmar_gaddr_t base, dmar_gaddr_t size,
  228     dmar_gaddr_t *isizep);
  229 struct vm_page *dmar_pgalloc(vm_object_t obj, vm_pindex_t idx, int flags);
  230 void dmar_pgfree(vm_object_t obj, vm_pindex_t idx, int flags);
  231 void *dmar_map_pgtbl(vm_object_t obj, vm_pindex_t idx, int flags,
  232     struct sf_buf **sf);
  233 void dmar_unmap_pgtbl(struct sf_buf *sf, bool coherent);
  234 int dmar_load_root_entry_ptr(struct dmar_unit *unit);
  235 int dmar_inv_ctx_glob(struct dmar_unit *unit);
  236 int dmar_inv_iotlb_glob(struct dmar_unit *unit);
  237 int dmar_flush_write_bufs(struct dmar_unit *unit);
  238 int dmar_enable_translation(struct dmar_unit *unit);
  239 int dmar_disable_translation(struct dmar_unit *unit);
  240 bool dmar_barrier_enter(struct dmar_unit *dmar, u_int barrier_id);
  241 void dmar_barrier_exit(struct dmar_unit *dmar, u_int barrier_id);
  242 
  243 int dmar_fault_intr(void *arg);
  244 void dmar_enable_fault_intr(struct dmar_unit *unit);
  245 void dmar_disable_fault_intr(struct dmar_unit *unit);
  246 int dmar_init_fault_log(struct dmar_unit *unit);
  247 void dmar_fini_fault_log(struct dmar_unit *unit);
  248 
  249 int dmar_qi_intr(void *arg);
  250 void dmar_enable_qi_intr(struct dmar_unit *unit);
  251 void dmar_disable_qi_intr(struct dmar_unit *unit);
  252 int dmar_init_qi(struct dmar_unit *unit);
  253 void dmar_fini_qi(struct dmar_unit *unit);
  254 void dmar_qi_invalidate_locked(struct dmar_ctx *ctx, dmar_gaddr_t start,
  255     dmar_gaddr_t size, struct dmar_qi_genseq *pseq);
  256 void dmar_qi_invalidate_ctx_glob_locked(struct dmar_unit *unit);
  257 void dmar_qi_invalidate_iotlb_glob_locked(struct dmar_unit *unit);
  258 
  259 vm_object_t ctx_get_idmap_pgtbl(struct dmar_ctx *ctx, dmar_gaddr_t maxaddr);
  260 void put_idmap_pgtbl(vm_object_t obj);
  261 int ctx_map_buf(struct dmar_ctx *ctx, dmar_gaddr_t base, dmar_gaddr_t size,
  262     vm_page_t *ma, uint64_t pflags, int flags);
  263 int ctx_unmap_buf(struct dmar_ctx *ctx, dmar_gaddr_t base, dmar_gaddr_t size,
  264     int flags);
  265 void ctx_flush_iotlb_sync(struct dmar_ctx *ctx, dmar_gaddr_t base,
  266     dmar_gaddr_t size);
  267 int ctx_alloc_pgtbl(struct dmar_ctx *ctx);
  268 void ctx_free_pgtbl(struct dmar_ctx *ctx);
  269 
  270 struct dmar_ctx *dmar_instantiate_ctx(struct dmar_unit *dmar, device_t dev,
  271     bool rmrr);
  272 struct dmar_ctx *dmar_get_ctx(struct dmar_unit *dmar, device_t dev,
  273     int bus, int slot, int func, bool id_mapped, bool rmrr_init);
  274 void dmar_free_ctx_locked(struct dmar_unit *dmar, struct dmar_ctx *ctx);
  275 void dmar_free_ctx(struct dmar_ctx *ctx);
  276 struct dmar_ctx *dmar_find_ctx_locked(struct dmar_unit *dmar, int bus,
  277     int slot, int func);
  278 void dmar_ctx_unload_entry(struct dmar_map_entry *entry, bool free);
  279 void dmar_ctx_unload(struct dmar_ctx *ctx,
  280     struct dmar_map_entries_tailq *entries, bool cansleep);
  281 void dmar_ctx_free_entry(struct dmar_map_entry *entry, bool free);
  282 
  283 int dmar_init_busdma(struct dmar_unit *unit);
  284 void dmar_fini_busdma(struct dmar_unit *unit);
  285 
  286 void dmar_gas_init_ctx(struct dmar_ctx *ctx);
  287 void dmar_gas_fini_ctx(struct dmar_ctx *ctx);
  288 struct dmar_map_entry *dmar_gas_alloc_entry(struct dmar_ctx *ctx, u_int flags);
  289 void dmar_gas_free_entry(struct dmar_ctx *ctx, struct dmar_map_entry *entry);
  290 void dmar_gas_free_space(struct dmar_ctx *ctx, struct dmar_map_entry *entry);
  291 int dmar_gas_map(struct dmar_ctx *ctx, const struct bus_dma_tag_common *common,
  292     dmar_gaddr_t size, u_int eflags, u_int flags, vm_page_t *ma,
  293     struct dmar_map_entry **res);
  294 void dmar_gas_free_region(struct dmar_ctx *ctx, struct dmar_map_entry *entry);
  295 int dmar_gas_map_region(struct dmar_ctx *ctx, struct dmar_map_entry *entry,
  296     u_int eflags, u_int flags, vm_page_t *ma);
  297 int dmar_gas_reserve_region(struct dmar_ctx *ctx, dmar_gaddr_t start,
  298     dmar_gaddr_t end);
  299 
  300 void dmar_ctx_parse_rmrr(struct dmar_ctx *ctx, device_t dev,
  301     struct dmar_map_entries_tailq *rmrr_entries);
  302 int dmar_instantiate_rmrr_ctxs(struct dmar_unit *dmar);
  303 
  304 void dmar_quirks_post_ident(struct dmar_unit *dmar);
  305 void dmar_quirks_pre_use(struct dmar_unit *dmar);
  306 
  307 #define DMAR_GM_CANWAIT 0x0001
  308 #define DMAR_GM_CANSPLIT 0x0002
  309 
  310 #define DMAR_PGF_WAITOK 0x0001
  311 #define DMAR_PGF_ZERO   0x0002
  312 #define DMAR_PGF_ALLOC  0x0004
  313 #define DMAR_PGF_NOALLOC 0x0008
  314 #define DMAR_PGF_OBJL   0x0010
  315 
  316 extern dmar_haddr_t dmar_high;
  317 extern int haw;
  318 extern int dmar_tbl_pagecnt;
  319 extern int dmar_match_verbose;
  320 extern int dmar_check_free;
  321 
  322 static inline uint32_t
  323 dmar_read4(const struct dmar_unit *unit, int reg)
  324 {
  325 
  326         return (bus_read_4(unit->regs, reg));
  327 }
  328 
  329 static inline uint64_t
  330 dmar_read8(const struct dmar_unit *unit, int reg)
  331 {
  332 #ifdef __i386__
  333         uint32_t high, low;
  334 
  335         low = bus_read_4(unit->regs, reg);
  336         high = bus_read_4(unit->regs, reg + 4);
  337         return (low | ((uint64_t)high << 32));
  338 #else
  339         return (bus_read_8(unit->regs, reg));
  340 #endif
  341 }
  342 
  343 static inline void
  344 dmar_write4(const struct dmar_unit *unit, int reg, uint32_t val)
  345 {
  346 
  347         KASSERT(reg != DMAR_GCMD_REG || (val & DMAR_GCMD_TE) ==
  348             (unit->hw_gcmd & DMAR_GCMD_TE),
  349             ("dmar%d clearing TE 0x%08x 0x%08x", unit->unit,
  350             unit->hw_gcmd, val));
  351         bus_write_4(unit->regs, reg, val);
  352 }
  353 
  354 static inline void
  355 dmar_write8(const struct dmar_unit *unit, int reg, uint64_t val)
  356 {
  357 
  358         KASSERT(reg != DMAR_GCMD_REG, ("8byte GCMD write"));
  359 #ifdef __i386__
  360         uint32_t high, low;
  361 
  362         low = val;
  363         high = val >> 32;
  364         bus_write_4(unit->regs, reg, low);
  365         bus_write_4(unit->regs, reg + 4, high);
  366 #else
  367         bus_write_8(unit->regs, reg, val);
  368 #endif
  369 }
  370 
  371 /*
  372  * dmar_pte_store and dmar_pte_clear ensure that on i386, 32bit writes
  373  * are issued in the correct order.  For store, the lower word,
  374  * containing the P or R and W bits, is set only after the high word
  375  * is written.  For clear, the P bit is cleared first, then the high
  376  * word is cleared.
  377  */
  378 static inline void
  379 dmar_pte_store(volatile uint64_t *dst, uint64_t val)
  380 {
  381 
  382         KASSERT(*dst == 0, ("used pte %p oldval %jx newval %jx",
  383             dst, (uintmax_t)*dst, (uintmax_t)val));
  384 #ifdef __i386__
  385         volatile uint32_t *p;
  386         uint32_t hi, lo;
  387 
  388         hi = val >> 32;
  389         lo = val;
  390         p = (volatile uint32_t *)dst;
  391         *(p + 1) = hi;
  392         *p = lo;
  393 #else
  394         *dst = val;
  395 #endif
  396 }
  397 
  398 static inline void
  399 dmar_pte_clear(volatile uint64_t *dst)
  400 {
  401 #ifdef __i386__
  402         volatile uint32_t *p;
  403 
  404         p = (volatile uint32_t *)dst;
  405         *p = 0;
  406         *(p + 1) = 0;
  407 #else
  408         *dst = 0;
  409 #endif
  410 }
  411 
  412 static inline bool
  413 dmar_test_boundary(dmar_gaddr_t start, dmar_gaddr_t size,
  414     dmar_gaddr_t boundary)
  415 {
  416 
  417         if (boundary == 0)
  418                 return (true);
  419         return (start + size <= ((start + boundary) & ~(boundary - 1)));
  420 }
  421 
  422 #ifdef INVARIANTS
  423 #define TD_PREP_PINNED_ASSERT                                           \
  424         int old_td_pinned;                                              \
  425         old_td_pinned = curthread->td_pinned
  426 #define TD_PINNED_ASSERT                                                \
  427         KASSERT(curthread->td_pinned == old_td_pinned,                  \
  428             ("pin count leak: %d %d %s:%d", curthread->td_pinned,       \
  429             old_td_pinned, __FILE__, __LINE__))
  430 #else
  431 #define TD_PREP_PINNED_ASSERT
  432 #define TD_PINNED_ASSERT
  433 #endif
  434 
  435 #endif

Cache object: 2d20cd5c4501b97fa55f39a81c0f4c54


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.