The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/x86/iommu/intel_dmar.h

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
    3  *
    4  * Copyright (c) 2013-2015 The FreeBSD Foundation
    5  *
    6  * This software was developed by Konstantin Belousov <kib@FreeBSD.org>
    7  * under sponsorship from the FreeBSD Foundation.
    8  *
    9  * Redistribution and use in source and binary forms, with or without
   10  * modification, are permitted provided that the following conditions
   11  * are met:
   12  * 1. Redistributions of source code must retain the above copyright
   13  *    notice, this list of conditions and the following disclaimer.
   14  * 2. Redistributions in binary form must reproduce the above copyright
   15  *    notice, this list of conditions and the following disclaimer in the
   16  *    documentation and/or other materials provided with the distribution.
   17  *
   18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   21  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   28  * SUCH DAMAGE.
   29  *
   30  * $FreeBSD$
   31  */
   32 
   33 #ifndef __X86_IOMMU_INTEL_DMAR_H
   34 #define __X86_IOMMU_INTEL_DMAR_H
   35 
   36 #include <dev/iommu/iommu.h>
   37 
   38 struct dmar_unit;
   39 
   40 /*
   41  * Locking annotations:
   42  * (u) - Protected by iommu unit lock
   43  * (d) - Protected by domain lock
   44  * (c) - Immutable after initialization
   45  */
   46 
   47 /*
   48  * The domain abstraction.  Most non-constant members of the domain
   49  * are protected by owning dmar unit lock, not by the domain lock.
   50  * Most important, the dmar lock protects the contexts list.
   51  *
   52  * The domain lock protects the address map for the domain, and list
   53  * of unload entries delayed.
   54  *
   55  * Page tables pages and pages content is protected by the vm object
   56  * lock pgtbl_obj, which contains the page tables pages.
   57  */
   58 struct dmar_domain {
   59         struct iommu_domain iodom;
   60         int domain;                     /* (c) DID, written in context entry */
   61         int mgaw;                       /* (c) Real max address width */
   62         int agaw;                       /* (c) Adjusted guest address width */
   63         int pglvl;                      /* (c) The pagelevel */
   64         int awlvl;                      /* (c) The pagelevel as the bitmask,
   65                                            to set in context entry */
   66         u_int ctx_cnt;                  /* (u) Number of contexts owned */
   67         u_int refs;                     /* (u) Refs, including ctx */
   68         struct dmar_unit *dmar;         /* (c) */
   69         LIST_ENTRY(dmar_domain) link;   /* (u) Member in the dmar list */
   70         LIST_HEAD(, dmar_ctx) contexts; /* (u) */
   71         vm_object_t pgtbl_obj;          /* (c) Page table pages */
   72         u_int batch_no;
   73 };
   74 
   75 struct dmar_ctx {
   76         struct iommu_ctx context;
   77         uint64_t last_fault_rec[2];     /* Last fault reported */
   78         LIST_ENTRY(dmar_ctx) link;      /* (u) Member in the domain list */
   79         u_int refs;                     /* (u) References from tags */
   80 };
   81 
   82 #define DMAR_DOMAIN_PGLOCK(dom)         VM_OBJECT_WLOCK((dom)->pgtbl_obj)
   83 #define DMAR_DOMAIN_PGTRYLOCK(dom)      VM_OBJECT_TRYWLOCK((dom)->pgtbl_obj)
   84 #define DMAR_DOMAIN_PGUNLOCK(dom)       VM_OBJECT_WUNLOCK((dom)->pgtbl_obj)
   85 #define DMAR_DOMAIN_ASSERT_PGLOCKED(dom) \
   86         VM_OBJECT_ASSERT_WLOCKED((dom)->pgtbl_obj)
   87 
   88 #define DMAR_DOMAIN_LOCK(dom)   mtx_lock(&(dom)->iodom.lock)
   89 #define DMAR_DOMAIN_UNLOCK(dom) mtx_unlock(&(dom)->iodom.lock)
   90 #define DMAR_DOMAIN_ASSERT_LOCKED(dom) mtx_assert(&(dom)->iodom.lock, MA_OWNED)
   91 
   92 #define DMAR2IOMMU(dmar)        &((dmar)->iommu)
   93 #define IOMMU2DMAR(dmar)        \
   94         __containerof((dmar), struct dmar_unit, iommu)
   95 
   96 #define DOM2IODOM(domain)       &((domain)->iodom)
   97 #define IODOM2DOM(domain)       \
   98         __containerof((domain), struct dmar_domain, iodom)
   99 
  100 #define CTX2IOCTX(ctx)          &((ctx)->context)
  101 #define IOCTX2CTX(ctx)          \
  102         __containerof((ctx), struct dmar_ctx, context)
  103 
  104 #define CTX2DOM(ctx)            IODOM2DOM((ctx)->context.domain)
  105 #define CTX2DMAR(ctx)           (CTX2DOM(ctx)->dmar)
  106 #define DOM2DMAR(domain)        ((domain)->dmar)
  107 
  108 struct dmar_msi_data {
  109         int irq;
  110         int irq_rid;
  111         struct resource *irq_res;
  112         void *intr_handle;
  113         int (*handler)(void *);
  114         int msi_data_reg;
  115         int msi_addr_reg;
  116         int msi_uaddr_reg;
  117         void (*enable_intr)(struct dmar_unit *);
  118         void (*disable_intr)(struct dmar_unit *);
  119         const char *name;
  120 };
  121 
  122 #define DMAR_INTR_FAULT         0
  123 #define DMAR_INTR_QI            1
  124 #define DMAR_INTR_TOTAL         2
  125 
  126 struct dmar_unit {
  127         struct iommu_unit iommu;
  128         device_t dev;
  129         uint16_t segment;
  130         uint64_t base;
  131 
  132         /* Resources */
  133         int reg_rid;
  134         struct resource *regs;
  135 
  136         struct dmar_msi_data intrs[DMAR_INTR_TOTAL];
  137 
  138         /* Hardware registers cache */
  139         uint32_t hw_ver;
  140         uint64_t hw_cap;
  141         uint64_t hw_ecap;
  142         uint32_t hw_gcmd;
  143 
  144         /* Data for being a dmar */
  145         LIST_HEAD(, dmar_domain) domains;
  146         struct unrhdr *domids;
  147         vm_object_t ctx_obj;
  148         u_int barrier_flags;
  149 
  150         /* Fault handler data */
  151         struct mtx fault_lock;
  152         uint64_t *fault_log;
  153         int fault_log_head;
  154         int fault_log_tail;
  155         int fault_log_size;
  156         struct task fault_task;
  157         struct taskqueue *fault_taskqueue;
  158 
  159         /* QI */
  160         int qi_enabled;
  161         vm_offset_t inv_queue;
  162         vm_size_t inv_queue_size;
  163         uint32_t inv_queue_avail;
  164         uint32_t inv_queue_tail;
  165         volatile uint32_t inv_waitd_seq_hw; /* hw writes there on wait
  166                                                descr completion */
  167         uint64_t inv_waitd_seq_hw_phys;
  168         uint32_t inv_waitd_seq; /* next sequence number to use for wait descr */
  169         u_int inv_waitd_gen;    /* seq number generation AKA seq overflows */
  170         u_int inv_seq_waiters;  /* count of waiters for seq */
  171         u_int inv_queue_full;   /* informational counter */
  172 
  173         /* IR */
  174         int ir_enabled;
  175         vm_paddr_t irt_phys;
  176         dmar_irte_t *irt;
  177         u_int irte_cnt;
  178         vmem_t *irtids;
  179 
  180         /*
  181          * Delayed freeing of map entries queue processing:
  182          *
  183          * tlb_flush_head and tlb_flush_tail are used to implement a FIFO
  184          * queue that supports concurrent dequeues and enqueues.  However,
  185          * there can only be a single dequeuer (accessing tlb_flush_head) and
  186          * a single enqueuer (accessing tlb_flush_tail) at a time.  Since the
  187          * unit's qi_task is the only dequeuer, it can access tlb_flush_head
  188          * without any locking.  In contrast, there may be multiple enqueuers,
  189          * so the enqueuers acquire the iommu unit lock to serialize their
  190          * accesses to tlb_flush_tail.
  191          *
  192          * In this FIFO queue implementation, the key to enabling concurrent
  193          * dequeues and enqueues is that the dequeuer never needs to access
  194          * tlb_flush_tail and the enqueuer never needs to access
  195          * tlb_flush_head.  In particular, tlb_flush_head and tlb_flush_tail
  196          * are never NULL, so neither a dequeuer nor an enqueuer ever needs to
  197          * update both.  Instead, tlb_flush_head always points to a "zombie"
  198          * struct, which previously held the last dequeued item.  Thus, the
  199          * zombie's next field actually points to the struct holding the first
  200          * item in the queue.  When an item is dequeued, the current zombie is
  201          * finally freed, and the struct that held the just dequeued item
  202          * becomes the new zombie.  When the queue is empty, tlb_flush_tail
  203          * also points to the zombie.
  204          */
  205         struct iommu_map_entry *tlb_flush_head;
  206         struct iommu_map_entry *tlb_flush_tail;
  207         struct task qi_task;
  208         struct taskqueue *qi_taskqueue;
  209 };
  210 
  211 #define DMAR_LOCK(dmar)         mtx_lock(&(dmar)->iommu.lock)
  212 #define DMAR_UNLOCK(dmar)       mtx_unlock(&(dmar)->iommu.lock)
  213 #define DMAR_ASSERT_LOCKED(dmar) mtx_assert(&(dmar)->iommu.lock, MA_OWNED)
  214 
  215 #define DMAR_FAULT_LOCK(dmar)   mtx_lock_spin(&(dmar)->fault_lock)
  216 #define DMAR_FAULT_UNLOCK(dmar) mtx_unlock_spin(&(dmar)->fault_lock)
  217 #define DMAR_FAULT_ASSERT_LOCKED(dmar) mtx_assert(&(dmar)->fault_lock, MA_OWNED)
  218 
  219 #define DMAR_IS_COHERENT(dmar)  (((dmar)->hw_ecap & DMAR_ECAP_C) != 0)
  220 #define DMAR_HAS_QI(dmar)       (((dmar)->hw_ecap & DMAR_ECAP_QI) != 0)
  221 #define DMAR_X2APIC(dmar) \
  222         (x2apic_mode && ((dmar)->hw_ecap & DMAR_ECAP_EIM) != 0)
  223 
  224 /* Barrier ids */
  225 #define DMAR_BARRIER_RMRR       0
  226 #define DMAR_BARRIER_USEQ       1
  227 
  228 struct dmar_unit *dmar_find(device_t dev, bool verbose);
  229 struct dmar_unit *dmar_find_hpet(device_t dev, uint16_t *rid);
  230 struct dmar_unit *dmar_find_ioapic(u_int apic_id, uint16_t *rid);
  231 
  232 u_int dmar_nd2mask(u_int nd);
  233 bool dmar_pglvl_supported(struct dmar_unit *unit, int pglvl);
  234 int domain_set_agaw(struct dmar_domain *domain, int mgaw);
  235 int dmar_maxaddr2mgaw(struct dmar_unit *unit, iommu_gaddr_t maxaddr,
  236     bool allow_less);
  237 vm_pindex_t pglvl_max_pages(int pglvl);
  238 int domain_is_sp_lvl(struct dmar_domain *domain, int lvl);
  239 iommu_gaddr_t pglvl_page_size(int total_pglvl, int lvl);
  240 iommu_gaddr_t domain_page_size(struct dmar_domain *domain, int lvl);
  241 int calc_am(struct dmar_unit *unit, iommu_gaddr_t base, iommu_gaddr_t size,
  242     iommu_gaddr_t *isizep);
  243 struct vm_page *dmar_pgalloc(vm_object_t obj, vm_pindex_t idx, int flags);
  244 void dmar_pgfree(vm_object_t obj, vm_pindex_t idx, int flags);
  245 void *dmar_map_pgtbl(vm_object_t obj, vm_pindex_t idx, int flags,
  246     struct sf_buf **sf);
  247 void dmar_unmap_pgtbl(struct sf_buf *sf);
  248 int dmar_load_root_entry_ptr(struct dmar_unit *unit);
  249 int dmar_inv_ctx_glob(struct dmar_unit *unit);
  250 int dmar_inv_iotlb_glob(struct dmar_unit *unit);
  251 int dmar_flush_write_bufs(struct dmar_unit *unit);
  252 void dmar_flush_pte_to_ram(struct dmar_unit *unit, dmar_pte_t *dst);
  253 void dmar_flush_ctx_to_ram(struct dmar_unit *unit, dmar_ctx_entry_t *dst);
  254 void dmar_flush_root_to_ram(struct dmar_unit *unit, dmar_root_entry_t *dst);
  255 int dmar_enable_translation(struct dmar_unit *unit);
  256 int dmar_disable_translation(struct dmar_unit *unit);
  257 int dmar_load_irt_ptr(struct dmar_unit *unit);
  258 int dmar_enable_ir(struct dmar_unit *unit);
  259 int dmar_disable_ir(struct dmar_unit *unit);
  260 bool dmar_barrier_enter(struct dmar_unit *dmar, u_int barrier_id);
  261 void dmar_barrier_exit(struct dmar_unit *dmar, u_int barrier_id);
  262 uint64_t dmar_get_timeout(void);
  263 void dmar_update_timeout(uint64_t newval);
  264 
  265 int dmar_fault_intr(void *arg);
  266 void dmar_enable_fault_intr(struct dmar_unit *unit);
  267 void dmar_disable_fault_intr(struct dmar_unit *unit);
  268 int dmar_init_fault_log(struct dmar_unit *unit);
  269 void dmar_fini_fault_log(struct dmar_unit *unit);
  270 
  271 int dmar_qi_intr(void *arg);
  272 void dmar_enable_qi_intr(struct dmar_unit *unit);
  273 void dmar_disable_qi_intr(struct dmar_unit *unit);
  274 int dmar_init_qi(struct dmar_unit *unit);
  275 void dmar_fini_qi(struct dmar_unit *unit);
  276 void dmar_qi_invalidate_locked(struct dmar_domain *domain,
  277     struct iommu_map_entry *entry, bool emit_wait);
  278 void dmar_qi_invalidate_sync(struct dmar_domain *domain, iommu_gaddr_t start,
  279     iommu_gaddr_t size, bool cansleep);
  280 void dmar_qi_invalidate_ctx_glob_locked(struct dmar_unit *unit);
  281 void dmar_qi_invalidate_iotlb_glob_locked(struct dmar_unit *unit);
  282 void dmar_qi_invalidate_iec_glob(struct dmar_unit *unit);
  283 void dmar_qi_invalidate_iec(struct dmar_unit *unit, u_int start, u_int cnt);
  284 
  285 vm_object_t domain_get_idmap_pgtbl(struct dmar_domain *domain,
  286     iommu_gaddr_t maxaddr);
  287 void put_idmap_pgtbl(vm_object_t obj);
  288 void domain_flush_iotlb_sync(struct dmar_domain *domain, iommu_gaddr_t base,
  289     iommu_gaddr_t size);
  290 int domain_alloc_pgtbl(struct dmar_domain *domain);
  291 void domain_free_pgtbl(struct dmar_domain *domain);
  292 extern const struct iommu_domain_map_ops dmar_domain_map_ops;
  293 
  294 int dmar_dev_depth(device_t child);
  295 void dmar_dev_path(device_t child, int *busno, void *path1, int depth);
  296 
  297 struct dmar_ctx *dmar_get_ctx_for_dev(struct dmar_unit *dmar, device_t dev,
  298     uint16_t rid, bool id_mapped, bool rmrr_init);
  299 struct dmar_ctx *dmar_get_ctx_for_devpath(struct dmar_unit *dmar, uint16_t rid,
  300     int dev_domain, int dev_busno, const void *dev_path, int dev_path_len,
  301     bool id_mapped, bool rmrr_init);
  302 int dmar_move_ctx_to_domain(struct dmar_domain *domain, struct dmar_ctx *ctx);
  303 void dmar_free_ctx_locked(struct dmar_unit *dmar, struct dmar_ctx *ctx);
  304 void dmar_free_ctx(struct dmar_ctx *ctx);
  305 struct dmar_ctx *dmar_find_ctx_locked(struct dmar_unit *dmar, uint16_t rid);
  306 void dmar_domain_free_entry(struct iommu_map_entry *entry, bool free);
  307 
  308 void dmar_dev_parse_rmrr(struct dmar_domain *domain, int dev_domain,
  309     int dev_busno, const void *dev_path, int dev_path_len,
  310     struct iommu_map_entries_tailq *rmrr_entries);
  311 int dmar_instantiate_rmrr_ctxs(struct iommu_unit *dmar);
  312 
  313 void dmar_quirks_post_ident(struct dmar_unit *dmar);
  314 void dmar_quirks_pre_use(struct iommu_unit *dmar);
  315 
  316 int dmar_init_irt(struct dmar_unit *unit);
  317 void dmar_fini_irt(struct dmar_unit *unit);
  318 
  319 extern iommu_haddr_t dmar_high;
  320 extern int haw;
  321 extern int dmar_tbl_pagecnt;
  322 extern int dmar_batch_coalesce;
  323 
  324 static inline uint32_t
  325 dmar_read4(const struct dmar_unit *unit, int reg)
  326 {
  327 
  328         return (bus_read_4(unit->regs, reg));
  329 }
  330 
  331 static inline uint64_t
  332 dmar_read8(const struct dmar_unit *unit, int reg)
  333 {
  334 #ifdef __i386__
  335         uint32_t high, low;
  336 
  337         low = bus_read_4(unit->regs, reg);
  338         high = bus_read_4(unit->regs, reg + 4);
  339         return (low | ((uint64_t)high << 32));
  340 #else
  341         return (bus_read_8(unit->regs, reg));
  342 #endif
  343 }
  344 
  345 static inline void
  346 dmar_write4(const struct dmar_unit *unit, int reg, uint32_t val)
  347 {
  348 
  349         KASSERT(reg != DMAR_GCMD_REG || (val & DMAR_GCMD_TE) ==
  350             (unit->hw_gcmd & DMAR_GCMD_TE),
  351             ("dmar%d clearing TE 0x%08x 0x%08x", unit->iommu.unit,
  352             unit->hw_gcmd, val));
  353         bus_write_4(unit->regs, reg, val);
  354 }
  355 
  356 static inline void
  357 dmar_write8(const struct dmar_unit *unit, int reg, uint64_t val)
  358 {
  359 
  360         KASSERT(reg != DMAR_GCMD_REG, ("8byte GCMD write"));
  361 #ifdef __i386__
  362         uint32_t high, low;
  363 
  364         low = val;
  365         high = val >> 32;
  366         bus_write_4(unit->regs, reg, low);
  367         bus_write_4(unit->regs, reg + 4, high);
  368 #else
  369         bus_write_8(unit->regs, reg, val);
  370 #endif
  371 }
  372 
  373 /*
  374  * dmar_pte_store and dmar_pte_clear ensure that on i386, 32bit writes
  375  * are issued in the correct order.  For store, the lower word,
  376  * containing the P or R and W bits, is set only after the high word
  377  * is written.  For clear, the P bit is cleared first, then the high
  378  * word is cleared.
  379  *
  380  * dmar_pte_update updates the pte.  For amd64, the update is atomic.
  381  * For i386, it first disables the entry by clearing the word
  382  * containing the P bit, and then defer to dmar_pte_store.  The locked
  383  * cmpxchg8b is probably available on any machine having DMAR support,
  384  * but interrupt translation table may be mapped uncached.
  385  */
  386 static inline void
  387 dmar_pte_store1(volatile uint64_t *dst, uint64_t val)
  388 {
  389 #ifdef __i386__
  390         volatile uint32_t *p;
  391         uint32_t hi, lo;
  392 
  393         hi = val >> 32;
  394         lo = val;
  395         p = (volatile uint32_t *)dst;
  396         *(p + 1) = hi;
  397         *p = lo;
  398 #else
  399         *dst = val;
  400 #endif
  401 }
  402 
  403 static inline void
  404 dmar_pte_store(volatile uint64_t *dst, uint64_t val)
  405 {
  406 
  407         KASSERT(*dst == 0, ("used pte %p oldval %jx newval %jx",
  408             dst, (uintmax_t)*dst, (uintmax_t)val));
  409         dmar_pte_store1(dst, val);
  410 }
  411 
  412 static inline void
  413 dmar_pte_update(volatile uint64_t *dst, uint64_t val)
  414 {
  415 
  416 #ifdef __i386__
  417         volatile uint32_t *p;
  418 
  419         p = (volatile uint32_t *)dst;
  420         *p = 0;
  421 #endif
  422         dmar_pte_store1(dst, val);
  423 }
  424 
  425 static inline void
  426 dmar_pte_clear(volatile uint64_t *dst)
  427 {
  428 #ifdef __i386__
  429         volatile uint32_t *p;
  430 
  431         p = (volatile uint32_t *)dst;
  432         *p = 0;
  433         *(p + 1) = 0;
  434 #else
  435         *dst = 0;
  436 #endif
  437 }
  438 
  439 extern struct timespec dmar_hw_timeout;
  440 
  441 #define DMAR_WAIT_UNTIL(cond)                                   \
  442 {                                                               \
  443         struct timespec last, curr;                             \
  444         bool forever;                                           \
  445                                                                 \
  446         if (dmar_hw_timeout.tv_sec == 0 &&                      \
  447             dmar_hw_timeout.tv_nsec == 0) {                     \
  448                 forever = true;                                 \
  449         } else {                                                \
  450                 forever = false;                                \
  451                 nanouptime(&curr);                              \
  452                 timespecadd(&curr, &dmar_hw_timeout, &last);    \
  453         }                                                       \
  454         for (;;) {                                              \
  455                 if (cond) {                                     \
  456                         error = 0;                              \
  457                         break;                                  \
  458                 }                                               \
  459                 nanouptime(&curr);                              \
  460                 if (!forever && timespeccmp(&last, &curr, <)) { \
  461                         error = ETIMEDOUT;                      \
  462                         break;                                  \
  463                 }                                               \
  464                 cpu_spinwait();                                 \
  465         }                                                       \
  466 }
  467 
  468 #ifdef INVARIANTS
  469 #define TD_PREP_PINNED_ASSERT                                           \
  470         int old_td_pinned;                                              \
  471         old_td_pinned = curthread->td_pinned
  472 #define TD_PINNED_ASSERT                                                \
  473         KASSERT(curthread->td_pinned == old_td_pinned,                  \
  474             ("pin count leak: %d %d %s:%d", curthread->td_pinned,       \
  475             old_td_pinned, __FILE__, __LINE__))
  476 #else
  477 #define TD_PREP_PINNED_ASSERT
  478 #define TD_PINNED_ASSERT
  479 #endif
  480 
  481 #endif

Cache object: f472bca9d6a73e59319b9c128976508a


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.