The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/arm/arm/busdma_machdep-v6.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 2012-2014 Ian Lepore
    3  * Copyright (c) 2010 Mark Tinguely
    4  * Copyright (c) 2004 Olivier Houchard
    5  * Copyright (c) 2002 Peter Grehan
    6  * Copyright (c) 1997, 1998 Justin T. Gibbs.
    7  * All rights reserved.
    8  *
    9  * Redistribution and use in source and binary forms, with or without
   10  * modification, are permitted provided that the following conditions
   11  * are met:
   12  * 1. Redistributions of source code must retain the above copyright
   13  *    notice, this list of conditions, and the following disclaimer,
   14  *    without modification, immediately at the beginning of the file.
   15  * 2. The name of the author may not be used to endorse or promote products
   16  *    derived from this software without specific prior written permission.
   17  *
   18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   21  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
   22  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   28  * SUCH DAMAGE.
   29  *
   30  *  From i386/busdma_machdep.c 191438 2009-04-23 20:24:19Z jhb
   31  */
   32 
   33 #include <sys/cdefs.h>
   34 __FBSDID("$FreeBSD$");
   35 
   36 #define _ARM32_BUS_DMA_PRIVATE
   37 #include <sys/param.h>
   38 #include <sys/kdb.h>
   39 #include <ddb/ddb.h>
   40 #include <ddb/db_output.h>
   41 #include <sys/systm.h>
   42 #include <sys/malloc.h>
   43 #include <sys/bus.h>
   44 #include <sys/busdma_bufalloc.h>
   45 #include <sys/interrupt.h>
   46 #include <sys/kernel.h>
   47 #include <sys/ktr.h>
   48 #include <sys/lock.h>
   49 #include <sys/memdesc.h>
   50 #include <sys/proc.h>
   51 #include <sys/mutex.h>
   52 #include <sys/sysctl.h>
   53 #include <sys/uio.h>
   54 
   55 #include <vm/vm.h>
   56 #include <vm/vm_page.h>
   57 #include <vm/vm_map.h>
   58 #include <vm/vm_extern.h>
   59 #include <vm/vm_kern.h>
   60 
   61 #include <machine/atomic.h>
   62 #include <machine/bus.h>
   63 #include <machine/cpufunc.h>
   64 #include <machine/md_var.h>
   65 
   66 #define MAX_BPAGES 64
   67 #define MAX_DMA_SEGMENTS        4096
   68 #define BUS_DMA_EXCL_BOUNCE     BUS_DMA_BUS2
   69 #define BUS_DMA_ALIGN_BOUNCE    BUS_DMA_BUS3
   70 #define BUS_DMA_COULD_BOUNCE    (BUS_DMA_EXCL_BOUNCE | BUS_DMA_ALIGN_BOUNCE)
   71 #define BUS_DMA_MIN_ALLOC_COMP  BUS_DMA_BUS4
   72 
   73 struct bounce_zone;
   74 
   75 struct bus_dma_tag {
   76         bus_dma_tag_t     parent;
   77         bus_size_t        alignment;
   78         bus_size_t        boundary;
   79         bus_addr_t        lowaddr;
   80         bus_addr_t        highaddr;
   81         bus_dma_filter_t *filter;
   82         void             *filterarg;
   83         bus_size_t        maxsize;
   84         u_int             nsegments;
   85         bus_size_t        maxsegsz;
   86         int               flags;
   87         int               ref_count;
   88         int               map_count;
   89         bus_dma_lock_t   *lockfunc;
   90         void             *lockfuncarg;
   91         struct bounce_zone *bounce_zone;
   92         /*
   93          * DMA range for this tag.  If the page doesn't fall within
   94          * one of these ranges, an error is returned.  The caller
   95          * may then decide what to do with the transfer.  If the
   96          * range pointer is NULL, it is ignored.
   97          */
   98         struct arm32_dma_range  *ranges;
   99         int                     _nranges;
  100 };
  101 
  102 struct bounce_page {
  103         vm_offset_t     vaddr;          /* kva of bounce buffer */
  104         bus_addr_t      busaddr;        /* Physical address */
  105         vm_offset_t     datavaddr;      /* kva of client data */
  106         bus_addr_t      dataaddr;       /* client physical address */
  107         bus_size_t      datacount;      /* client data count */
  108         STAILQ_ENTRY(bounce_page) links;
  109 };
  110 
  111 struct sync_list {
  112         vm_offset_t     vaddr;          /* kva of bounce buffer */
  113         bus_addr_t      busaddr;        /* Physical address */
  114         bus_size_t      datacount;      /* client data count */
  115 };
  116 
  117 int busdma_swi_pending;
  118 
  119 struct bounce_zone {
  120         STAILQ_ENTRY(bounce_zone) links;
  121         STAILQ_HEAD(bp_list, bounce_page) bounce_page_list;
  122         int             total_bpages;
  123         int             free_bpages;
  124         int             reserved_bpages;
  125         int             active_bpages;
  126         int             total_bounced;
  127         int             total_deferred;
  128         int             map_count;
  129         bus_size_t      alignment;
  130         bus_addr_t      lowaddr;
  131         char            zoneid[8];
  132         char            lowaddrid[20];
  133         struct sysctl_ctx_list sysctl_tree;
  134         struct sysctl_oid *sysctl_tree_top;
  135 };
  136 
  137 static struct mtx bounce_lock;
  138 static int total_bpages;
  139 static int busdma_zonecount;
  140 static uint32_t tags_total;
  141 static uint32_t maps_total;
  142 static uint32_t maps_dmamem;
  143 static uint32_t maps_coherent;
  144 static uint64_t maploads_total;
  145 static uint64_t maploads_bounced;
  146 static uint64_t maploads_coherent;
  147 static uint64_t maploads_dmamem;
  148 static uint64_t maploads_mbuf;
  149 static uint64_t maploads_physmem;
  150 
  151 static STAILQ_HEAD(, bounce_zone) bounce_zone_list;
  152 
  153 SYSCTL_NODE(_hw, OID_AUTO, busdma, CTLFLAG_RD, 0, "Busdma parameters");
  154 SYSCTL_UINT(_hw_busdma, OID_AUTO, tags_total, CTLFLAG_RD, &tags_total, 0,
  155            "Number of active tags");
  156 SYSCTL_UINT(_hw_busdma, OID_AUTO, maps_total, CTLFLAG_RD, &maps_total, 0,
  157            "Number of active maps");
  158 SYSCTL_UINT(_hw_busdma, OID_AUTO, maps_dmamem, CTLFLAG_RD, &maps_dmamem, 0,
  159            "Number of active maps for bus_dmamem_alloc buffers");
  160 SYSCTL_UINT(_hw_busdma, OID_AUTO, maps_coherent, CTLFLAG_RD, &maps_coherent, 0,
  161            "Number of active maps with BUS_DMA_COHERENT flag set");
  162 SYSCTL_UQUAD(_hw_busdma, OID_AUTO, maploads_total, CTLFLAG_RD, &maploads_total, 0,
  163            "Number of load operations performed");
  164 SYSCTL_UQUAD(_hw_busdma, OID_AUTO, maploads_bounced, CTLFLAG_RD, &maploads_bounced, 0,
  165            "Number of load operations that used bounce buffers");
  166 SYSCTL_UQUAD(_hw_busdma, OID_AUTO, maploads_coherent, CTLFLAG_RD, &maploads_dmamem, 0,
  167            "Number of load operations on BUS_DMA_COHERENT memory");
  168 SYSCTL_UQUAD(_hw_busdma, OID_AUTO, maploads_dmamem, CTLFLAG_RD, &maploads_dmamem, 0,
  169            "Number of load operations on bus_dmamem_alloc buffers");
  170 SYSCTL_UQUAD(_hw_busdma, OID_AUTO, maploads_mbuf, CTLFLAG_RD, &maploads_mbuf, 0,
  171            "Number of load operations for mbufs");
  172 SYSCTL_UQUAD(_hw_busdma, OID_AUTO, maploads_physmem, CTLFLAG_RD, &maploads_physmem, 0,
  173            "Number of load operations on physical buffers");
  174 SYSCTL_INT(_hw_busdma, OID_AUTO, total_bpages, CTLFLAG_RD, &total_bpages, 0,
  175            "Total bounce pages");
  176 
  177 struct bus_dmamap {
  178         struct bp_list         bpages;
  179         int                    pagesneeded;
  180         int                    pagesreserved;
  181         bus_dma_tag_t          dmat;
  182         struct memdesc         mem;
  183         pmap_t                 pmap;
  184         bus_dmamap_callback_t *callback;
  185         void                  *callback_arg;
  186         int                   flags;
  187 #define DMAMAP_COHERENT         (1 << 0)
  188 #define DMAMAP_DMAMEM_ALLOC     (1 << 1)
  189 #define DMAMAP_MBUF             (1 << 2)
  190         STAILQ_ENTRY(bus_dmamap) links;
  191         bus_dma_segment_t       *segments;
  192         int                    sync_count;
  193         struct sync_list       slist[];
  194 };
  195 
  196 static STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist;
  197 static STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist;
  198 
  199 static void init_bounce_pages(void *dummy);
  200 static int alloc_bounce_zone(bus_dma_tag_t dmat);
  201 static int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages);
  202 static int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map,
  203                                 int commit);
  204 static bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map,
  205                                   vm_offset_t vaddr, bus_addr_t addr,
  206                                   bus_size_t size);
  207 static void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage);
  208 static void _bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map,
  209     void *buf, bus_size_t buflen, int flags);
  210 static void _bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map,
  211     vm_paddr_t buf, bus_size_t buflen, int flags);
  212 static int _bus_dmamap_reserve_pages(bus_dma_tag_t dmat, bus_dmamap_t map,
  213     int flags);
  214 
  215 static busdma_bufalloc_t coherent_allocator;    /* Cache of coherent buffers */
  216 static busdma_bufalloc_t standard_allocator;    /* Cache of standard buffers */
  217 static void
  218 busdma_init(void *dummy)
  219 {
  220         int uma_flags;
  221 
  222         uma_flags = 0;
  223 
  224         /* Create a cache of buffers in standard (cacheable) memory. */
  225         standard_allocator = busdma_bufalloc_create("buffer", 
  226             arm_dcache_align,   /* minimum_alignment */
  227             NULL,               /* uma_alloc func */ 
  228             NULL,               /* uma_free func */
  229             uma_flags);         /* uma_zcreate_flags */
  230 
  231 #ifdef INVARIANTS
  232         /* 
  233          * Force UMA zone to allocate service structures like
  234          * slabs using own allocator. uma_debug code performs
  235          * atomic ops on uma_slab_t fields and safety of this
  236          * operation is not guaranteed for write-back caches
  237          */
  238         uma_flags = UMA_ZONE_OFFPAGE;
  239 #endif
  240         /*
  241          * Create a cache of buffers in uncacheable memory, to implement the
  242          * BUS_DMA_COHERENT (and potentially BUS_DMA_NOCACHE) flag.
  243          */
  244         coherent_allocator = busdma_bufalloc_create("coherent",
  245             arm_dcache_align,   /* minimum_alignment */
  246             busdma_bufalloc_alloc_uncacheable, 
  247             busdma_bufalloc_free_uncacheable, 
  248             uma_flags); /* uma_zcreate_flags */
  249 }
  250 
  251 /*
  252  * This init historically used SI_SUB_VM, but now the init code requires
  253  * malloc(9) using M_DEVBUF memory, which is set up later than SI_SUB_VM, by
  254  * SI_SUB_KMEM and SI_ORDER_SECOND, so we'll go right after that by using
  255  * SI_SUB_KMEM and SI_ORDER_THIRD.
  256  */
  257 SYSINIT(busdma, SI_SUB_KMEM, SI_ORDER_THIRD, busdma_init, NULL);
  258 
  259 static int
  260 exclusion_bounce_check(vm_offset_t lowaddr, vm_offset_t highaddr)
  261 {
  262         int i;
  263         for (i = 0; phys_avail[i] && phys_avail[i + 1]; i += 2) {
  264                 if ((lowaddr >= phys_avail[i] && lowaddr < phys_avail[i + 1]) ||
  265                     (lowaddr < phys_avail[i] && highaddr >= phys_avail[i]))
  266                         return (1);
  267         }
  268         return (0);
  269 }
  270 
  271 /*
  272  * Return true if the tag has an exclusion zone that could lead to bouncing.
  273  */
  274 static __inline int
  275 exclusion_bounce(bus_dma_tag_t dmat)
  276 {
  277 
  278         return (dmat->flags & BUS_DMA_EXCL_BOUNCE);
  279 }
  280 
  281 /*
  282  * Return true if the given address does not fall on the alignment boundary.
  283  */
  284 static __inline int
  285 alignment_bounce(bus_dma_tag_t dmat, bus_addr_t addr)
  286 {
  287 
  288         return (addr & (dmat->alignment - 1));
  289 }
  290 
  291 /*
  292  * Return true if the DMA should bounce because the start or end does not fall
  293  * on a cacheline boundary (which would require a partial cacheline flush).
  294  * COHERENT memory doesn't trigger cacheline flushes.  Memory allocated by
  295  * bus_dmamem_alloc() is always aligned to cacheline boundaries, and there's a
  296  * strict rule that such memory cannot be accessed by the CPU while DMA is in
  297  * progress (or by multiple DMA engines at once), so that it's always safe to do
  298  * full cacheline flushes even if that affects memory outside the range of a
  299  * given DMA operation that doesn't involve the full allocated buffer.  If we're
  300  * mapping an mbuf, that follows the same rules as a buffer we allocated.
  301  */
  302 static __inline int
  303 cacheline_bounce(bus_dmamap_t map, bus_addr_t addr, bus_size_t size)
  304 {
  305 
  306         if (map->flags & (DMAMAP_DMAMEM_ALLOC | DMAMAP_COHERENT | DMAMAP_MBUF))
  307                 return (0);
  308         return ((addr | size) & arm_dcache_align_mask);
  309 }
  310 
  311 /*
  312  * Return true if we might need to bounce the DMA described by addr and size.
  313  *
  314  * This is used to quick-check whether we need to do the more expensive work of
  315  * checking the DMA page-by-page looking for alignment and exclusion bounces.
  316  *
  317  * Note that the addr argument might be either virtual or physical.  It doesn't
  318  * matter because we only look at the low-order bits, which are the same in both
  319  * address spaces.
  320  */
  321 static __inline int
  322 might_bounce(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t addr, 
  323     bus_size_t size)
  324 {
  325 
  326         return ((dmat->flags & BUS_DMA_EXCL_BOUNCE) ||
  327             alignment_bounce(dmat, addr) ||
  328             cacheline_bounce(map, addr, size));
  329 }
  330 
  331 /*
  332  * Return true if we must bounce the DMA described by paddr and size.
  333  *
  334  * Bouncing can be triggered by DMA that doesn't begin and end on cacheline
  335  * boundaries, or doesn't begin on an alignment boundary, or falls within the
  336  * exclusion zone of any tag in the ancestry chain.
  337  *
  338  * For exclusions, walk the chain of tags comparing paddr to the exclusion zone
  339  * within each tag.  If the tag has a filter function, use it to decide whether
  340  * the DMA needs to bounce, otherwise any DMA within the zone bounces.
  341  */
  342 static int
  343 must_bounce(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t paddr, 
  344     bus_size_t size)
  345 {
  346 
  347         if (cacheline_bounce(map, paddr, size))
  348                 return (1);
  349 
  350         /*
  351          *  The tag already contains ancestors' alignment restrictions so this
  352          *  check doesn't need to be inside the loop.
  353          */
  354         if (alignment_bounce(dmat, paddr))
  355                 return (1);
  356 
  357         /*
  358          * Even though each tag has an exclusion zone that is a superset of its
  359          * own and all its ancestors' exclusions, the exclusion zone of each tag
  360          * up the chain must be checked within the loop, because the busdma
  361          * rules say the filter function is called only when the address lies
  362          * within the low-highaddr range of the tag that filterfunc belongs to.
  363          */
  364         while (dmat != NULL && exclusion_bounce(dmat)) {
  365                 if ((paddr >= dmat->lowaddr && paddr <= dmat->highaddr) &&
  366                     (dmat->filter == NULL || 
  367                     dmat->filter(dmat->filterarg, paddr) != 0))
  368                         return (1);
  369                 dmat = dmat->parent;
  370         } 
  371 
  372         return (0);
  373 }
  374 
  375 static __inline struct arm32_dma_range *
  376 _bus_dma_inrange(struct arm32_dma_range *ranges, int nranges,
  377     bus_addr_t curaddr)
  378 {
  379         struct arm32_dma_range *dr;
  380         int i;
  381 
  382         for (i = 0, dr = ranges; i < nranges; i++, dr++) {
  383                 if (curaddr >= dr->dr_sysbase &&
  384                     round_page(curaddr) <= (dr->dr_sysbase + dr->dr_len))
  385                         return (dr);
  386         }
  387 
  388         return (NULL);
  389 }
  390 
  391 /*
  392  * Convenience function for manipulating driver locks from busdma (during
  393  * busdma_swi, for example).  Drivers that don't provide their own locks
  394  * should specify &Giant to dmat->lockfuncarg.  Drivers that use their own
  395  * non-mutex locking scheme don't have to use this at all.
  396  */
  397 void
  398 busdma_lock_mutex(void *arg, bus_dma_lock_op_t op)
  399 {
  400         struct mtx *dmtx;
  401 
  402         dmtx = (struct mtx *)arg;
  403         switch (op) {
  404         case BUS_DMA_LOCK:
  405                 mtx_lock(dmtx);
  406                 break;
  407         case BUS_DMA_UNLOCK:
  408                 mtx_unlock(dmtx);
  409                 break;
  410         default:
  411                 panic("Unknown operation 0x%x for busdma_lock_mutex!", op);
  412         }
  413 }
  414 
  415 /*
  416  * dflt_lock should never get called.  It gets put into the dma tag when
  417  * lockfunc == NULL, which is only valid if the maps that are associated
  418  * with the tag are meant to never be defered.
  419  * XXX Should have a way to identify which driver is responsible here.
  420  */
  421 static void
  422 dflt_lock(void *arg, bus_dma_lock_op_t op)
  423 {
  424 
  425         panic("driver error: busdma dflt_lock called");
  426 }
  427 
  428 /*
  429  * Allocate a device specific dma_tag.
  430  */
  431 int
  432 bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
  433                    bus_size_t boundary, bus_addr_t lowaddr,
  434                    bus_addr_t highaddr, bus_dma_filter_t *filter,
  435                    void *filterarg, bus_size_t maxsize, int nsegments,
  436                    bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc,
  437                    void *lockfuncarg, bus_dma_tag_t *dmat)
  438 {
  439         bus_dma_tag_t newtag;
  440         int error = 0;
  441 
  442 #if 0
  443         if (!parent)
  444                 parent = arm_root_dma_tag;
  445 #endif
  446 
  447         /* Basic sanity checking */
  448         if (boundary != 0 && boundary < maxsegsz)
  449                 maxsegsz = boundary;
  450 
  451         /* Return a NULL tag on failure */
  452         *dmat = NULL;
  453 
  454         if (maxsegsz == 0) {
  455                 return (EINVAL);
  456         }
  457 
  458         newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF,
  459             M_ZERO | M_NOWAIT);
  460         if (newtag == NULL) {
  461                 CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d",
  462                     __func__, newtag, 0, error);
  463                 return (ENOMEM);
  464         }
  465 
  466         newtag->parent = parent;
  467         newtag->alignment = alignment;
  468         newtag->boundary = boundary;
  469         newtag->lowaddr = trunc_page((vm_paddr_t)lowaddr) + (PAGE_SIZE - 1);
  470         newtag->highaddr = trunc_page((vm_paddr_t)highaddr) +
  471             (PAGE_SIZE - 1);
  472         newtag->filter = filter;
  473         newtag->filterarg = filterarg;
  474         newtag->maxsize = maxsize;
  475         newtag->nsegments = nsegments;
  476         newtag->maxsegsz = maxsegsz;
  477         newtag->flags = flags;
  478         newtag->ref_count = 1; /* Count ourself */
  479         newtag->map_count = 0;
  480         newtag->ranges = bus_dma_get_range();
  481         newtag->_nranges = bus_dma_get_range_nb();
  482         if (lockfunc != NULL) {
  483                 newtag->lockfunc = lockfunc;
  484                 newtag->lockfuncarg = lockfuncarg;
  485         } else {
  486                 newtag->lockfunc = dflt_lock;
  487                 newtag->lockfuncarg = NULL;
  488         }
  489 
  490         /* Take into account any restrictions imposed by our parent tag */
  491         if (parent != NULL) {
  492                 newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr);
  493                 newtag->highaddr = MAX(parent->highaddr, newtag->highaddr);
  494                 newtag->alignment = MAX(parent->alignment, newtag->alignment);
  495                 newtag->flags |= parent->flags & BUS_DMA_COULD_BOUNCE;
  496                 if (newtag->boundary == 0)
  497                         newtag->boundary = parent->boundary;
  498                 else if (parent->boundary != 0)
  499                         newtag->boundary = MIN(parent->boundary,
  500                                                newtag->boundary);
  501                 if (newtag->filter == NULL) {
  502                         /*
  503                          * Short circuit to looking at our parent directly
  504                          * since we have encapsulated all of its information
  505                          */
  506                         newtag->filter = parent->filter;
  507                         newtag->filterarg = parent->filterarg;
  508                         newtag->parent = parent->parent;
  509                 }
  510                 if (newtag->parent != NULL)
  511                         atomic_add_int(&parent->ref_count, 1);
  512         }
  513 
  514         if (exclusion_bounce_check(newtag->lowaddr, newtag->highaddr))
  515                 newtag->flags |= BUS_DMA_EXCL_BOUNCE;
  516         if (alignment_bounce(newtag, 1))
  517                 newtag->flags |= BUS_DMA_ALIGN_BOUNCE;
  518 
  519         /*
  520          * Any request can auto-bounce due to cacheline alignment, in addition
  521          * to any alignment or boundary specifications in the tag, so if the
  522          * ALLOCNOW flag is set, there's always work to do.
  523          */
  524         if ((flags & BUS_DMA_ALLOCNOW) != 0) {
  525                 struct bounce_zone *bz;
  526                 /*
  527                  * Round size up to a full page, and add one more page because
  528                  * there can always be one more boundary crossing than the
  529                  * number of pages in a transfer.
  530                  */
  531                 maxsize = roundup2(maxsize, PAGE_SIZE) + PAGE_SIZE;
  532                 
  533                 if ((error = alloc_bounce_zone(newtag)) != 0) {
  534                         free(newtag, M_DEVBUF);
  535                         return (error);
  536                 }
  537                 bz = newtag->bounce_zone;
  538 
  539                 if (ptoa(bz->total_bpages) < maxsize) {
  540                         int pages;
  541 
  542                         pages = atop(maxsize) - bz->total_bpages;
  543 
  544                         /* Add pages to our bounce pool */
  545                         if (alloc_bounce_pages(newtag, pages) < pages)
  546                                 error = ENOMEM;
  547                 }
  548                 /* Performed initial allocation */
  549                 newtag->flags |= BUS_DMA_MIN_ALLOC_COMP;
  550         } else
  551                 newtag->bounce_zone = NULL;
  552 
  553         if (error != 0) {
  554                 free(newtag, M_DEVBUF);
  555         } else {
  556                 atomic_add_32(&tags_total, 1);
  557                 *dmat = newtag;
  558         }
  559         CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d",
  560             __func__, newtag, (newtag != NULL ? newtag->flags : 0), error);
  561         return (error);
  562 }
  563 
  564 int
  565 bus_dma_tag_destroy(bus_dma_tag_t dmat)
  566 {
  567         bus_dma_tag_t dmat_copy;
  568         int error;
  569 
  570         error = 0;
  571         dmat_copy = dmat;
  572 
  573         if (dmat != NULL) {
  574 
  575                 if (dmat->map_count != 0) {
  576                         error = EBUSY;
  577                         goto out;
  578                 }
  579 
  580                 while (dmat != NULL) {
  581                         bus_dma_tag_t parent;
  582 
  583                         parent = dmat->parent;
  584                         atomic_subtract_int(&dmat->ref_count, 1);
  585                         if (dmat->ref_count == 0) {
  586                                 atomic_subtract_32(&tags_total, 1);
  587                                 free(dmat, M_DEVBUF);
  588                                 /*
  589                                  * Last reference count, so
  590                                  * release our reference
  591                                  * count on our parent.
  592                                  */
  593                                 dmat = parent;
  594                         } else
  595                                 dmat = NULL;
  596                 }
  597         }
  598 out:
  599         CTR3(KTR_BUSDMA, "%s tag %p error %d", __func__, dmat_copy, error);
  600         return (error);
  601 }
  602 
  603 static int allocate_bz_and_pages(bus_dma_tag_t dmat, bus_dmamap_t mapp)
  604 {
  605         struct bounce_zone *bz;
  606         int maxpages;
  607         int error;
  608                 
  609         if (dmat->bounce_zone == NULL)
  610                 if ((error = alloc_bounce_zone(dmat)) != 0)
  611                         return (error);
  612         bz = dmat->bounce_zone;
  613         /* Initialize the new map */
  614         STAILQ_INIT(&(mapp->bpages));
  615 
  616         /*
  617          * Attempt to add pages to our pool on a per-instance basis up to a sane
  618          * limit.  Even if the tag isn't flagged as COULD_BOUNCE due to
  619          * alignment and boundary constraints, it could still auto-bounce due to
  620          * cacheline alignment, which requires at most two bounce pages.
  621          */
  622         if (dmat->flags & BUS_DMA_COULD_BOUNCE)
  623                 maxpages = MAX_BPAGES;
  624         else
  625                 maxpages = 2 * bz->map_count;
  626         if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0 ||
  627             (bz->map_count > 0 && bz->total_bpages < maxpages)) {
  628                 int pages;
  629                 
  630                 pages = atop(roundup2(dmat->maxsize, PAGE_SIZE)) + 1;
  631                 pages = MIN(maxpages - bz->total_bpages, pages);
  632                 pages = MAX(pages, 2);
  633                 if (alloc_bounce_pages(dmat, pages) < pages)
  634                         return (ENOMEM);
  635                 
  636                 if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0)
  637                         dmat->flags |= BUS_DMA_MIN_ALLOC_COMP;
  638         }
  639         bz->map_count++;
  640         return (0);
  641 }
  642 
  643 static bus_dmamap_t
  644 allocate_map(bus_dma_tag_t dmat, int mflags)
  645 {
  646         int mapsize, segsize;
  647         bus_dmamap_t map;
  648 
  649         /*
  650          * Allocate the map.  The map structure ends with an embedded
  651          * variable-sized array of sync_list structures.  Following that
  652          * we allocate enough extra space to hold the array of bus_dma_segments.
  653          */
  654         KASSERT(dmat->nsegments <= MAX_DMA_SEGMENTS, 
  655            ("cannot allocate %u dma segments (max is %u)",
  656             dmat->nsegments, MAX_DMA_SEGMENTS));
  657         segsize = sizeof(struct bus_dma_segment) * dmat->nsegments;
  658         mapsize = sizeof(*map) + sizeof(struct sync_list) * dmat->nsegments;
  659         map = malloc(mapsize + segsize, M_DEVBUF, mflags | M_ZERO);
  660         if (map == NULL) {
  661                 CTR3(KTR_BUSDMA, "%s: tag %p error %d", __func__, dmat, ENOMEM);
  662                 return (NULL);
  663         }
  664         map->segments = (bus_dma_segment_t *)((uintptr_t)map + mapsize);
  665         return (map);
  666 }
  667 
  668 /*
  669  * Allocate a handle for mapping from kva/uva/physical
  670  * address space into bus device space.
  671  */
  672 int
  673 bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
  674 {
  675         bus_dmamap_t map;
  676         int error = 0;
  677 
  678         *mapp = map = allocate_map(dmat, M_NOWAIT);
  679         if (map == NULL) {
  680                 CTR3(KTR_BUSDMA, "%s: tag %p error %d", __func__, dmat, ENOMEM);
  681                 return (ENOMEM);
  682         }
  683 
  684         /*
  685          * Bouncing might be required if the driver asks for an exclusion
  686          * region, a data alignment that is stricter than 1, or DMA that begins
  687          * or ends with a partial cacheline.  Whether bouncing will actually
  688          * happen can't be known until mapping time, but we need to pre-allocate
  689          * resources now because we might not be allowed to at mapping time.
  690          */
  691         error = allocate_bz_and_pages(dmat, map);
  692         if (error != 0) {
  693                 free(map, M_DEVBUF);
  694                 *mapp = NULL;
  695                 return (error);
  696         }
  697         if (map->flags & DMAMAP_COHERENT)
  698                 atomic_add_32(&maps_coherent, 1);
  699         atomic_add_32(&maps_total, 1);
  700         dmat->map_count++;
  701 
  702         return (0);
  703 }
  704 
  705 /*
  706  * Destroy a handle for mapping from kva/uva/physical
  707  * address space into bus device space.
  708  */
  709 int
  710 bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
  711 {
  712         if (STAILQ_FIRST(&map->bpages) != NULL || map->sync_count != 0) {
  713                 CTR3(KTR_BUSDMA, "%s: tag %p error %d",
  714                     __func__, dmat, EBUSY);
  715                 return (EBUSY);
  716         }
  717         if (dmat->bounce_zone)
  718                 dmat->bounce_zone->map_count--;
  719         if (map->flags & DMAMAP_COHERENT)
  720                 atomic_subtract_32(&maps_coherent, 1);
  721         atomic_subtract_32(&maps_total, 1);
  722         free(map, M_DEVBUF);
  723         dmat->map_count--;
  724         CTR2(KTR_BUSDMA, "%s: tag %p error 0", __func__, dmat);
  725         return (0);
  726 }
  727 
  728 
  729 /*
  730  * Allocate a piece of memory that can be efficiently mapped into
  731  * bus device space based on the constraints lited in the dma tag.
  732  * A dmamap to for use with dmamap_load is also allocated.
  733  */
  734 int
  735 bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
  736                  bus_dmamap_t *mapp)
  737 {
  738         busdma_bufalloc_t ba;
  739         struct busdma_bufzone *bufzone;
  740         bus_dmamap_t map;
  741         vm_memattr_t memattr;
  742         int mflags;
  743 
  744         if (flags & BUS_DMA_NOWAIT)
  745                 mflags = M_NOWAIT;
  746         else
  747                 mflags = M_WAITOK;
  748         if (flags & BUS_DMA_ZERO)
  749                 mflags |= M_ZERO;
  750 
  751         *mapp = map = allocate_map(dmat, mflags);
  752         if (map == NULL) {
  753                 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
  754                     __func__, dmat, dmat->flags, ENOMEM);
  755                 return (ENOMEM);
  756         }
  757         map->flags = DMAMAP_DMAMEM_ALLOC;
  758 
  759         /* Choose a busdma buffer allocator based on memory type flags. */
  760         if (flags & BUS_DMA_COHERENT) {
  761                 memattr = VM_MEMATTR_UNCACHEABLE;
  762                 ba = coherent_allocator;
  763                 map->flags |= DMAMAP_COHERENT;
  764         } else {
  765                 memattr = VM_MEMATTR_DEFAULT;
  766                 ba = standard_allocator;
  767         }
  768 
  769         /*
  770          * Try to find a bufzone in the allocator that holds a cache of buffers
  771          * of the right size for this request.  If the buffer is too big to be
  772          * held in the allocator cache, this returns NULL.
  773          */
  774         bufzone = busdma_bufalloc_findzone(ba, dmat->maxsize);
  775 
  776         /*
  777          * Allocate the buffer from the uma(9) allocator if...
  778          *  - It's small enough to be in the allocator (bufzone not NULL).
  779          *  - The alignment constraint isn't larger than the allocation size
  780          *    (the allocator aligns buffers to their size boundaries).
  781          *  - There's no need to handle lowaddr/highaddr exclusion zones.
  782          * else allocate non-contiguous pages if...
  783          *  - The page count that could get allocated doesn't exceed
  784          *    nsegments also when the maximum segment size is less
  785          *    than PAGE_SIZE.
  786          *  - The alignment constraint isn't larger than a page boundary.
  787          *  - There are no boundary-crossing constraints.
  788          * else allocate a block of contiguous pages because one or more of the
  789          * constraints is something that only the contig allocator can fulfill.
  790          */
  791         if (bufzone != NULL && dmat->alignment <= bufzone->size &&
  792             !exclusion_bounce(dmat)) {
  793                 *vaddr = uma_zalloc(bufzone->umazone, mflags);
  794         } else if (dmat->nsegments >=
  795             howmany(dmat->maxsize, MIN(dmat->maxsegsz, PAGE_SIZE)) &&
  796             dmat->alignment <= PAGE_SIZE &&
  797             (dmat->boundary % PAGE_SIZE) == 0) {
  798                 *vaddr = (void *)kmem_alloc_attr(kernel_arena, dmat->maxsize,
  799                     mflags, 0, dmat->lowaddr, memattr);
  800         } else {
  801                 *vaddr = (void *)kmem_alloc_contig(kernel_arena, dmat->maxsize,
  802                     mflags, 0, dmat->lowaddr, dmat->alignment, dmat->boundary,
  803                     memattr);
  804         }
  805 
  806 
  807         if (*vaddr == NULL) {
  808                 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
  809                     __func__, dmat, dmat->flags, ENOMEM);
  810                 free(map, M_DEVBUF);
  811                 *mapp = NULL;
  812                 return (ENOMEM);
  813         } else if ((uintptr_t)*vaddr & (dmat->alignment - 1)) {
  814                 printf("bus_dmamem_alloc failed to align memory properly.\n");
  815         }
  816         if (map->flags & DMAMAP_COHERENT)
  817                 atomic_add_32(&maps_coherent, 1);
  818         atomic_add_32(&maps_dmamem, 1);
  819         atomic_add_32(&maps_total, 1);
  820         dmat->map_count++;
  821 
  822         CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
  823             __func__, dmat, dmat->flags, 0);
  824         return (0);
  825 }
  826 
  827 /*
  828  * Free a piece of memory and it's allociated dmamap, that was allocated
  829  * via bus_dmamem_alloc.  Make the same choice for free/contigfree.
  830  */
  831 void
  832 bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
  833 {
  834         struct busdma_bufzone *bufzone;
  835         busdma_bufalloc_t ba;
  836 
  837         if (map->flags & DMAMAP_COHERENT)
  838                 ba = coherent_allocator;
  839         else
  840                 ba = standard_allocator;
  841 
  842         bufzone = busdma_bufalloc_findzone(ba, dmat->maxsize);
  843 
  844         if (bufzone != NULL && dmat->alignment <= bufzone->size &&
  845             !exclusion_bounce(dmat))
  846                 uma_zfree(bufzone->umazone, vaddr);
  847         else
  848                 kmem_free(kernel_arena, (vm_offset_t)vaddr, dmat->maxsize);
  849 
  850         dmat->map_count--;
  851         if (map->flags & DMAMAP_COHERENT)
  852                 atomic_subtract_32(&maps_coherent, 1);
  853         atomic_subtract_32(&maps_total, 1);
  854         atomic_subtract_32(&maps_dmamem, 1);
  855         free(map, M_DEVBUF);
  856         CTR3(KTR_BUSDMA, "%s: tag %p flags 0x%x", __func__, dmat, dmat->flags);
  857 }
  858 
  859 static void
  860 _bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf,
  861     bus_size_t buflen, int flags)
  862 {
  863         bus_addr_t curaddr;
  864         bus_size_t sgsize;
  865 
  866         if (map->pagesneeded == 0) {
  867                 CTR5(KTR_BUSDMA, "lowaddr= %d, boundary= %d, alignment= %d"
  868                     " map= %p, pagesneeded= %d",
  869                     dmat->lowaddr, dmat->boundary, dmat->alignment,
  870                     map, map->pagesneeded);
  871                 /*
  872                  * Count the number of bounce pages
  873                  * needed in order to complete this transfer
  874                  */
  875                 curaddr = buf;
  876                 while (buflen != 0) {
  877                         sgsize = MIN(buflen, dmat->maxsegsz);
  878                         if (must_bounce(dmat, map, curaddr, sgsize) != 0) {
  879                                 sgsize = MIN(sgsize, PAGE_SIZE);
  880                                 map->pagesneeded++;
  881                         }
  882                         curaddr += sgsize;
  883                         buflen -= sgsize;
  884                 }
  885                 CTR1(KTR_BUSDMA, "pagesneeded= %d", map->pagesneeded);
  886         }
  887 }
  888 
  889 static void
  890 _bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map,
  891     void *buf, bus_size_t buflen, int flags)
  892 {
  893         vm_offset_t vaddr;
  894         vm_offset_t vendaddr;
  895         bus_addr_t paddr;
  896 
  897         if (map->pagesneeded == 0) {
  898                 CTR5(KTR_BUSDMA, "lowaddr= %d, boundary= %d, alignment= %d"
  899                     " map= %p, pagesneeded= %d",
  900                     dmat->lowaddr, dmat->boundary, dmat->alignment,
  901                     map, map->pagesneeded);
  902                 /*
  903                  * Count the number of bounce pages
  904                  * needed in order to complete this transfer
  905                  */
  906                 vaddr = (vm_offset_t)buf;
  907                 vendaddr = (vm_offset_t)buf + buflen;
  908 
  909                 while (vaddr < vendaddr) {
  910                         if (__predict_true(map->pmap == kernel_pmap))
  911                                 paddr = pmap_kextract(vaddr);
  912                         else
  913                                 paddr = pmap_extract(map->pmap, vaddr);
  914                         if (must_bounce(dmat, map, paddr,
  915                             min(vendaddr - vaddr, (PAGE_SIZE - ((vm_offset_t)vaddr & 
  916                             PAGE_MASK)))) != 0) {
  917                                 map->pagesneeded++;
  918                         }
  919                         vaddr += (PAGE_SIZE - ((vm_offset_t)vaddr & PAGE_MASK));
  920 
  921                 }
  922                 CTR1(KTR_BUSDMA, "pagesneeded= %d", map->pagesneeded);
  923         }
  924 }
  925 
  926 static int
  927 _bus_dmamap_reserve_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int flags)
  928 {
  929 
  930         /* Reserve Necessary Bounce Pages */
  931         mtx_lock(&bounce_lock);
  932         if (flags & BUS_DMA_NOWAIT) {
  933                 if (reserve_bounce_pages(dmat, map, 0) != 0) {
  934                         map->pagesneeded = 0;
  935                         mtx_unlock(&bounce_lock);
  936                         return (ENOMEM);
  937                 }
  938         } else {
  939                 if (reserve_bounce_pages(dmat, map, 1) != 0) {
  940                         /* Queue us for resources */
  941                         STAILQ_INSERT_TAIL(&bounce_map_waitinglist, map, links);
  942                         mtx_unlock(&bounce_lock);
  943                         return (EINPROGRESS);
  944                 }
  945         }
  946         mtx_unlock(&bounce_lock);
  947 
  948         return (0);
  949 }
  950 
  951 /*
  952  * Add a single contiguous physical range to the segment list.
  953  */
  954 static int
  955 _bus_dmamap_addseg(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t curaddr,
  956                    bus_size_t sgsize, bus_dma_segment_t *segs, int *segp)
  957 {
  958         bus_addr_t baddr, bmask;
  959         int seg;
  960 
  961         /*
  962          * Make sure we don't cross any boundaries.
  963          */
  964         bmask = ~(dmat->boundary - 1);
  965         if (dmat->boundary > 0) {
  966                 baddr = (curaddr + dmat->boundary) & bmask;
  967                 if (sgsize > (baddr - curaddr))
  968                         sgsize = (baddr - curaddr);
  969         }
  970 
  971         if (dmat->ranges) {
  972                 struct arm32_dma_range *dr;
  973 
  974                 dr = _bus_dma_inrange(dmat->ranges, dmat->_nranges,
  975                     curaddr);
  976                 if (dr == NULL) {
  977                         _bus_dmamap_unload(dmat, map);
  978                         return (0);
  979                 }
  980                 /*
  981                  * In a valid DMA range.  Translate the physical
  982                  * memory address to an address in the DMA window.
  983                  */
  984                 curaddr = (curaddr - dr->dr_sysbase) + dr->dr_busbase;
  985         }
  986 
  987         /*
  988          * Insert chunk into a segment, coalescing with
  989          * previous segment if possible.
  990          */
  991         seg = *segp;
  992         if (seg == -1) {
  993                 seg = 0;
  994                 segs[seg].ds_addr = curaddr;
  995                 segs[seg].ds_len = sgsize;
  996         } else {
  997                 if (curaddr == segs[seg].ds_addr + segs[seg].ds_len &&
  998                     (segs[seg].ds_len + sgsize) <= dmat->maxsegsz &&
  999                     (dmat->boundary == 0 ||
 1000                      (segs[seg].ds_addr & bmask) == (curaddr & bmask)))
 1001                         segs[seg].ds_len += sgsize;
 1002                 else {
 1003                         if (++seg >= dmat->nsegments)
 1004                                 return (0);
 1005                         segs[seg].ds_addr = curaddr;
 1006                         segs[seg].ds_len = sgsize;
 1007                 }
 1008         }
 1009         *segp = seg;
 1010         return (sgsize);
 1011 }
 1012 
 1013 /*
 1014  * Utility function to load a physical buffer.  segp contains
 1015  * the starting segment on entrace, and the ending segment on exit.
 1016  */
 1017 int
 1018 _bus_dmamap_load_phys(bus_dma_tag_t dmat,
 1019                       bus_dmamap_t map,
 1020                       vm_paddr_t buf, bus_size_t buflen,
 1021                       int flags,
 1022                       bus_dma_segment_t *segs,
 1023                       int *segp)
 1024 {
 1025         bus_addr_t curaddr;
 1026         bus_size_t sgsize;
 1027         int error;
 1028 
 1029         if (segs == NULL)
 1030                 segs = map->segments;
 1031 
 1032         maploads_total++;
 1033         maploads_physmem++;
 1034 
 1035         if (might_bounce(dmat, map, buflen, buflen)) {
 1036                 _bus_dmamap_count_phys(dmat, map, buf, buflen, flags);
 1037                 if (map->pagesneeded != 0) {
 1038                         maploads_bounced++;
 1039                         error = _bus_dmamap_reserve_pages(dmat, map, flags);
 1040                         if (error)
 1041                                 return (error);
 1042                 }
 1043         }
 1044 
 1045         while (buflen > 0) {
 1046                 curaddr = buf;
 1047                 sgsize = MIN(buflen, dmat->maxsegsz);
 1048                 if (map->pagesneeded != 0 && must_bounce(dmat, map, curaddr,
 1049                     sgsize)) {
 1050                         sgsize = MIN(sgsize, PAGE_SIZE);
 1051                         curaddr = add_bounce_page(dmat, map, 0, curaddr,
 1052                                                   sgsize);
 1053                 }
 1054                 sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs,
 1055                     segp);
 1056                 if (sgsize == 0)
 1057                         break;
 1058                 buf += sgsize;
 1059                 buflen -= sgsize;
 1060         }
 1061 
 1062         /*
 1063          * Did we fit?
 1064          */
 1065         if (buflen != 0) {
 1066                 _bus_dmamap_unload(dmat, map);
 1067                 return (EFBIG); /* XXX better return value here? */
 1068         }
 1069         return (0);
 1070 }
 1071 
 1072 int
 1073 _bus_dmamap_load_ma(bus_dma_tag_t dmat, bus_dmamap_t map,
 1074     struct vm_page **ma, bus_size_t tlen, int ma_offs, int flags,
 1075     bus_dma_segment_t *segs, int *segp)
 1076 {
 1077 
 1078         return (bus_dmamap_load_ma_triv(dmat, map, ma, tlen, ma_offs, flags,
 1079             segs, segp));
 1080 }
 1081 
 1082 /*
 1083  * Utility function to load a linear buffer.  segp contains
 1084  * the starting segment on entrace, and the ending segment on exit.
 1085  */
 1086 int
 1087 _bus_dmamap_load_buffer(bus_dma_tag_t dmat,
 1088                         bus_dmamap_t map,
 1089                         void *buf, bus_size_t buflen,
 1090                         pmap_t pmap,
 1091                         int flags,
 1092                         bus_dma_segment_t *segs,
 1093                         int *segp)
 1094 {
 1095         bus_size_t sgsize;
 1096         bus_addr_t curaddr;
 1097         vm_offset_t vaddr;
 1098         struct sync_list *sl;
 1099         int error;
 1100 
 1101         maploads_total++;
 1102         if (map->flags & DMAMAP_COHERENT)
 1103                 maploads_coherent++;
 1104         if (map->flags & DMAMAP_DMAMEM_ALLOC)
 1105                 maploads_dmamem++;
 1106 
 1107         if (segs == NULL)
 1108                 segs = map->segments;
 1109 
 1110         if (flags & BUS_DMA_LOAD_MBUF) {
 1111                 maploads_mbuf++;
 1112                 map->flags |= DMAMAP_MBUF;
 1113         }
 1114 
 1115         map->pmap = pmap;
 1116 
 1117         if (might_bounce(dmat, map, (bus_addr_t)buf, buflen)) {
 1118                 _bus_dmamap_count_pages(dmat, map, buf, buflen, flags);
 1119                 if (map->pagesneeded != 0) {
 1120                         maploads_bounced++;
 1121                         error = _bus_dmamap_reserve_pages(dmat, map, flags);
 1122                         if (error)
 1123                                 return (error);
 1124                 }
 1125         }
 1126 
 1127         sl = NULL;
 1128         vaddr = (vm_offset_t)buf;
 1129 
 1130         while (buflen > 0) {
 1131                 /*
 1132                  * Get the physical address for this segment.
 1133                  */
 1134                 if (__predict_true(map->pmap == kernel_pmap))
 1135                         curaddr = pmap_kextract(vaddr);
 1136                 else
 1137                         curaddr = pmap_extract(map->pmap, vaddr);
 1138 
 1139                 /*
 1140                  * Compute the segment size, and adjust counts.
 1141                  */
 1142                 sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK);
 1143                 if (sgsize > dmat->maxsegsz)
 1144                         sgsize = dmat->maxsegsz;
 1145                 if (buflen < sgsize)
 1146                         sgsize = buflen;
 1147 
 1148                 if (map->pagesneeded != 0 && must_bounce(dmat, map, curaddr,
 1149                     sgsize)) {
 1150                         curaddr = add_bounce_page(dmat, map, vaddr, curaddr,
 1151                                                   sgsize);
 1152                 } else {
 1153                         sl = &map->slist[map->sync_count - 1];
 1154                         if (map->sync_count == 0 ||
 1155 #ifdef ARM_L2_PIPT
 1156                             curaddr != sl->busaddr + sl->datacount ||
 1157 #endif
 1158                             vaddr != sl->vaddr + sl->datacount) {
 1159                                 if (++map->sync_count > dmat->nsegments)
 1160                                         goto cleanup;
 1161                                 sl++;
 1162                                 sl->vaddr = vaddr;
 1163                                 sl->datacount = sgsize;
 1164                                 sl->busaddr = curaddr;
 1165                         } else
 1166                                 sl->datacount += sgsize;
 1167                 }
 1168                 sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs,
 1169                                             segp);
 1170                 if (sgsize == 0)
 1171                         break;
 1172                 vaddr += sgsize;
 1173                 buflen -= sgsize;
 1174         }
 1175 
 1176 cleanup:
 1177         /*
 1178          * Did we fit?
 1179          */
 1180         if (buflen != 0) {
 1181                 _bus_dmamap_unload(dmat, map);
 1182                 return (EFBIG); /* XXX better return value here? */
 1183         }
 1184         return (0);
 1185 }
 1186 
 1187 
 1188 void
 1189 __bus_dmamap_waitok(bus_dma_tag_t dmat, bus_dmamap_t map,
 1190                     struct memdesc *mem, bus_dmamap_callback_t *callback,
 1191                     void *callback_arg)
 1192 {
 1193 
 1194         map->mem = *mem;
 1195         map->dmat = dmat;
 1196         map->callback = callback;
 1197         map->callback_arg = callback_arg;
 1198 }
 1199 
 1200 bus_dma_segment_t *
 1201 _bus_dmamap_complete(bus_dma_tag_t dmat, bus_dmamap_t map,
 1202                      bus_dma_segment_t *segs, int nsegs, int error)
 1203 {
 1204 
 1205         if (segs == NULL)
 1206                 segs = map->segments;
 1207         return (segs);
 1208 }
 1209 
 1210 /*
 1211  * Release the mapping held by map.
 1212  */
 1213 void
 1214 _bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
 1215 {
 1216         struct bounce_page *bpage;
 1217         struct bounce_zone *bz;
 1218 
 1219         if ((bz = dmat->bounce_zone) != NULL) {
 1220                 while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
 1221                         STAILQ_REMOVE_HEAD(&map->bpages, links);
 1222                         free_bounce_page(dmat, bpage);
 1223                 }
 1224 
 1225                 bz = dmat->bounce_zone;
 1226                 bz->free_bpages += map->pagesreserved;
 1227                 bz->reserved_bpages -= map->pagesreserved;
 1228                 map->pagesreserved = 0;
 1229                 map->pagesneeded = 0;
 1230         }
 1231         map->sync_count = 0;
 1232         map->flags &= ~DMAMAP_MBUF;
 1233 }
 1234 
 1235 #ifdef notyetbounceuser
 1236 /* If busdma uses user pages, then the interrupt handler could
 1237  * be use the kernel vm mapping. Both bounce pages and sync list
 1238  * do not cross page boundaries.
 1239  * Below is a rough sequence that a person would do to fix the
 1240  * user page reference in the kernel vmspace. This would be
 1241  * done in the dma post routine.
 1242  */
 1243 void
 1244 _bus_dmamap_fix_user(vm_offset_t buf, bus_size_t len,
 1245                         pmap_t pmap, int op)
 1246 {
 1247         bus_size_t sgsize;
 1248         bus_addr_t curaddr;
 1249         vm_offset_t va;
 1250 
 1251         /* 
 1252          * each synclist entry is contained within a single page.
 1253          * this would be needed if BUS_DMASYNC_POSTxxxx was implemented
 1254          */
 1255         curaddr = pmap_extract(pmap, buf);
 1256         va = pmap_dma_map(curaddr);
 1257         switch (op) {
 1258         case SYNC_USER_INV:
 1259                 cpu_dcache_wb_range(va, sgsize);
 1260                 break;
 1261 
 1262         case SYNC_USER_COPYTO:
 1263                 bcopy((void *)va, (void *)bounce, sgsize);
 1264                 break;
 1265 
 1266         case SYNC_USER_COPYFROM:
 1267                 bcopy((void *) bounce, (void *)va, sgsize);
 1268                 break;
 1269 
 1270         default:
 1271                 break;
 1272         }
 1273 
 1274         pmap_dma_unmap(va);
 1275 }
 1276 #endif
 1277 
 1278 #ifdef ARM_L2_PIPT
 1279 #define l2cache_wb_range(va, pa, size) cpu_l2cache_wb_range(pa, size)
 1280 #define l2cache_wbinv_range(va, pa, size) cpu_l2cache_wbinv_range(pa, size)
 1281 #define l2cache_inv_range(va, pa, size) cpu_l2cache_inv_range(pa, size)
 1282 #else
 1283 #define l2cache_wb_range(va, pa, size) cpu_l2cache_wb_range(va, size)
 1284 #define l2cache_wbinv_range(va, pa, size) cpu_l2cache_wbinv_range(va, size)
 1285 #define l2cache_inv_range(va, pa, size) cpu_l2cache_inv_range(va, size)
 1286 #endif
 1287 
 1288 void
 1289 _bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
 1290 {
 1291         struct bounce_page *bpage;
 1292         struct sync_list *sl, *end;
 1293         /*
 1294          * If the buffer was from user space, it is possible that this is not
 1295          * the same vm map, especially on a POST operation.  It's not clear that
 1296          * dma on userland buffers can work at all right now.  To be safe, until
 1297          * we're able to test direct userland dma, panic on a map mismatch.
 1298          */
 1299         if ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
 1300                 if (!pmap_dmap_iscurrent(map->pmap))
 1301                         panic("_bus_dmamap_sync: wrong user map for bounce sync.");
 1302 
 1303                 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x op 0x%x "
 1304                     "performing bounce", __func__, dmat, dmat->flags, op);
 1305 
 1306                 /*
 1307                  * For PREWRITE do a writeback.  Clean the caches from the
 1308                  * innermost to the outermost levels.
 1309                  */
 1310                 if (op & BUS_DMASYNC_PREWRITE) {
 1311                         while (bpage != NULL) {
 1312                                 if (bpage->datavaddr != 0)
 1313                                         bcopy((void *)bpage->datavaddr,
 1314                                             (void *)bpage->vaddr,
 1315                                             bpage->datacount);
 1316                                 else
 1317                                         physcopyout(bpage->dataaddr,
 1318                                             (void *)bpage->vaddr,
 1319                                             bpage->datacount);
 1320                                 cpu_dcache_wb_range((vm_offset_t)bpage->vaddr,
 1321                                     bpage->datacount);
 1322                                 l2cache_wb_range((vm_offset_t)bpage->vaddr,
 1323                                     (vm_offset_t)bpage->busaddr, 
 1324                                     bpage->datacount);
 1325                                 bpage = STAILQ_NEXT(bpage, links);
 1326                         }
 1327                         dmat->bounce_zone->total_bounced++;
 1328                 }
 1329 
 1330                 /*
 1331                  * Do an invalidate for PREREAD unless a writeback was already
 1332                  * done above due to PREWRITE also being set.  The reason for a
 1333                  * PREREAD invalidate is to prevent dirty lines currently in the
 1334                  * cache from being evicted during the DMA.  If a writeback was
 1335                  * done due to PREWRITE also being set there will be no dirty
 1336                  * lines and the POSTREAD invalidate handles the rest. The
 1337                  * invalidate is done from the innermost to outermost level. If
 1338                  * L2 were done first, a dirty cacheline could be automatically
 1339                  * evicted from L1 before we invalidated it, re-dirtying the L2.
 1340                  */
 1341                 if ((op & BUS_DMASYNC_PREREAD) && !(op & BUS_DMASYNC_PREWRITE)) {
 1342                         bpage = STAILQ_FIRST(&map->bpages);
 1343                         while (bpage != NULL) {
 1344                                 cpu_dcache_inv_range((vm_offset_t)bpage->vaddr,
 1345                                     bpage->datacount);
 1346                                 l2cache_inv_range((vm_offset_t)bpage->vaddr,
 1347                                     (vm_offset_t)bpage->busaddr,
 1348                                     bpage->datacount);
 1349                                 bpage = STAILQ_NEXT(bpage, links);
 1350                         }
 1351                 }
 1352 
 1353                 /*
 1354                  * Re-invalidate the caches on a POSTREAD, even though they were
 1355                  * already invalidated at PREREAD time.  Aggressive prefetching
 1356                  * due to accesses to other data near the dma buffer could have
 1357                  * brought buffer data into the caches which is now stale.  The
 1358                  * caches are invalidated from the outermost to innermost; the
 1359                  * prefetches could be happening right now, and if L1 were
 1360                  * invalidated first, stale L2 data could be prefetched into L1.
 1361                  */
 1362                 if (op & BUS_DMASYNC_POSTREAD) {
 1363                         while (bpage != NULL) {
 1364                                 vm_offset_t startv;
 1365                                 vm_paddr_t startp;
 1366                                 int len;
 1367 
 1368                                 startv = bpage->vaddr &~ arm_dcache_align_mask;
 1369                                 startp = bpage->busaddr &~ arm_dcache_align_mask;
 1370                                 len = bpage->datacount;
 1371                                 
 1372                                 if (startv != bpage->vaddr)
 1373                                         len += bpage->vaddr & arm_dcache_align_mask;
 1374                                 if (len & arm_dcache_align_mask) 
 1375                                         len = (len -
 1376                                             (len & arm_dcache_align_mask)) +
 1377                                             arm_dcache_align;
 1378                                 l2cache_inv_range(startv, startp, len);
 1379                                 cpu_dcache_inv_range(startv, len);
 1380                                 if (bpage->datavaddr != 0)
 1381                                         bcopy((void *)bpage->vaddr,
 1382                                             (void *)bpage->datavaddr,
 1383                                             bpage->datacount);
 1384                                 else
 1385                                         physcopyin((void *)bpage->vaddr,
 1386                                             bpage->dataaddr,
 1387                                             bpage->datacount);
 1388                                 bpage = STAILQ_NEXT(bpage, links);
 1389                         }
 1390                         dmat->bounce_zone->total_bounced++;
 1391                 }
 1392         }
 1393 
 1394         /*
 1395          * For COHERENT memory no cache maintenance is necessary, but ensure all
 1396          * writes have reached memory for the PREWRITE case.  No action is
 1397          * needed for a PREREAD without PREWRITE also set, because that would
 1398          * imply that the cpu had written to the COHERENT buffer and expected
 1399          * the dma device to see that change, and by definition a PREWRITE sync
 1400          * is required to make that happen.
 1401          */
 1402         if (map->flags & DMAMAP_COHERENT) {
 1403                 if (op & BUS_DMASYNC_PREWRITE) {
 1404                         dsb();
 1405                         cpu_l2cache_drain_writebuf();
 1406                 }
 1407                 return;
 1408         }
 1409 
 1410         /*
 1411          * Cache maintenance for normal (non-COHERENT non-bounce) buffers.  All
 1412          * the comments about the sequences for flushing cache levels in the
 1413          * bounce buffer code above apply here as well.  In particular, the fact
 1414          * that the sequence is inner-to-outer for PREREAD invalidation and
 1415          * outer-to-inner for POSTREAD invalidation is not a mistake.
 1416          */
 1417         if (map->sync_count != 0) {
 1418                 if (!pmap_dmap_iscurrent(map->pmap))
 1419                         panic("_bus_dmamap_sync: wrong user map for sync.");
 1420 
 1421                 sl = &map->slist[0];
 1422                 end = &map->slist[map->sync_count];
 1423                 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x op 0x%x "
 1424                     "performing sync", __func__, dmat, dmat->flags, op);
 1425 
 1426                 switch (op) {
 1427                 case BUS_DMASYNC_PREWRITE:
 1428                 case BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD:
 1429                         while (sl != end) {
 1430                                 cpu_dcache_wb_range(sl->vaddr, sl->datacount);
 1431                                 l2cache_wb_range(sl->vaddr, sl->busaddr,
 1432                                     sl->datacount);
 1433                                 sl++;
 1434                         }
 1435                         break;
 1436 
 1437                 case BUS_DMASYNC_PREREAD:
 1438                         /*
 1439                          * An mbuf may start in the middle of a cacheline. There
 1440                          * will be no cpu writes to the beginning of that line
 1441                          * (which contains the mbuf header) while dma is in
 1442                          * progress.  Handle that case by doing a writeback of
 1443                          * just the first cacheline before invalidating the
 1444                          * overall buffer.  Any mbuf in a chain may have this
 1445                          * misalignment.  Buffers which are not mbufs bounce if
 1446                          * they are not aligned to a cacheline.
 1447                          */
 1448                         while (sl != end) {
 1449                                 if (sl->vaddr & arm_dcache_align_mask) {
 1450                                         KASSERT(map->flags & DMAMAP_MBUF,
 1451                                             ("unaligned buffer is not an mbuf"));
 1452                                         cpu_dcache_wb_range(sl->vaddr, 1);
 1453                                         l2cache_wb_range(sl->vaddr,
 1454                                             sl->busaddr, 1);
 1455                                 }
 1456                                 cpu_dcache_inv_range(sl->vaddr, sl->datacount);
 1457                                 l2cache_inv_range(sl->vaddr, sl->busaddr, 
 1458                                     sl->datacount);
 1459                                 sl++;
 1460                         }
 1461                         break;
 1462 
 1463                 case BUS_DMASYNC_POSTWRITE:
 1464                         break;
 1465 
 1466                 case BUS_DMASYNC_POSTREAD:
 1467                 case BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE:
 1468                         while (sl != end) {
 1469                                 l2cache_inv_range(sl->vaddr, sl->busaddr, 
 1470                                     sl->datacount);
 1471                                 cpu_dcache_inv_range(sl->vaddr, sl->datacount);
 1472                                 sl++;
 1473                         }
 1474                         break;
 1475 
 1476                 default:
 1477                         panic("unsupported combination of sync operations: 0x%08x\n", op);
 1478                         break;
 1479                 }
 1480         }
 1481 }
 1482 
 1483 static void
 1484 init_bounce_pages(void *dummy __unused)
 1485 {
 1486 
 1487         total_bpages = 0;
 1488         STAILQ_INIT(&bounce_zone_list);
 1489         STAILQ_INIT(&bounce_map_waitinglist);
 1490         STAILQ_INIT(&bounce_map_callbacklist);
 1491         mtx_init(&bounce_lock, "bounce pages lock", NULL, MTX_DEF);
 1492 }
 1493 SYSINIT(bpages, SI_SUB_LOCK, SI_ORDER_ANY, init_bounce_pages, NULL);
 1494 
 1495 static struct sysctl_ctx_list *
 1496 busdma_sysctl_tree(struct bounce_zone *bz)
 1497 {
 1498 
 1499         return (&bz->sysctl_tree);
 1500 }
 1501 
 1502 static struct sysctl_oid *
 1503 busdma_sysctl_tree_top(struct bounce_zone *bz)
 1504 {
 1505 
 1506         return (bz->sysctl_tree_top);
 1507 }
 1508 
 1509 static int
 1510 alloc_bounce_zone(bus_dma_tag_t dmat)
 1511 {
 1512         struct bounce_zone *bz;
 1513 
 1514         /* Check to see if we already have a suitable zone */
 1515         STAILQ_FOREACH(bz, &bounce_zone_list, links) {
 1516                 if ((dmat->alignment <= bz->alignment) &&
 1517                     (dmat->lowaddr >= bz->lowaddr)) {
 1518                         dmat->bounce_zone = bz;
 1519                         return (0);
 1520                 }
 1521         }
 1522 
 1523         if ((bz = (struct bounce_zone *)malloc(sizeof(*bz), M_DEVBUF,
 1524             M_NOWAIT | M_ZERO)) == NULL)
 1525                 return (ENOMEM);
 1526 
 1527         STAILQ_INIT(&bz->bounce_page_list);
 1528         bz->free_bpages = 0;
 1529         bz->reserved_bpages = 0;
 1530         bz->active_bpages = 0;
 1531         bz->lowaddr = dmat->lowaddr;
 1532         bz->alignment = MAX(dmat->alignment, PAGE_SIZE);
 1533         bz->map_count = 0;
 1534         snprintf(bz->zoneid, 8, "zone%d", busdma_zonecount);
 1535         busdma_zonecount++;
 1536         snprintf(bz->lowaddrid, 18, "%#jx", (uintmax_t)bz->lowaddr);
 1537         STAILQ_INSERT_TAIL(&bounce_zone_list, bz, links);
 1538         dmat->bounce_zone = bz;
 1539 
 1540         sysctl_ctx_init(&bz->sysctl_tree);
 1541         bz->sysctl_tree_top = SYSCTL_ADD_NODE(&bz->sysctl_tree,
 1542             SYSCTL_STATIC_CHILDREN(_hw_busdma), OID_AUTO, bz->zoneid,
 1543             CTLFLAG_RD, 0, "");
 1544         if (bz->sysctl_tree_top == NULL) {
 1545                 sysctl_ctx_free(&bz->sysctl_tree);
 1546                 return (0);     /* XXX error code? */
 1547         }
 1548 
 1549         SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
 1550             SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
 1551             "total_bpages", CTLFLAG_RD, &bz->total_bpages, 0,
 1552             "Total bounce pages");
 1553         SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
 1554             SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
 1555             "free_bpages", CTLFLAG_RD, &bz->free_bpages, 0,
 1556             "Free bounce pages");
 1557         SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
 1558             SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
 1559             "reserved_bpages", CTLFLAG_RD, &bz->reserved_bpages, 0,
 1560             "Reserved bounce pages");
 1561         SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
 1562             SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
 1563             "active_bpages", CTLFLAG_RD, &bz->active_bpages, 0,
 1564             "Active bounce pages");
 1565         SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
 1566             SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
 1567             "total_bounced", CTLFLAG_RD, &bz->total_bounced, 0,
 1568             "Total bounce requests (pages bounced)");
 1569         SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
 1570             SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
 1571             "total_deferred", CTLFLAG_RD, &bz->total_deferred, 0,
 1572             "Total bounce requests that were deferred");
 1573         SYSCTL_ADD_STRING(busdma_sysctl_tree(bz),
 1574             SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
 1575             "lowaddr", CTLFLAG_RD, bz->lowaddrid, 0, "");
 1576         SYSCTL_ADD_ULONG(busdma_sysctl_tree(bz),
 1577             SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
 1578             "alignment", CTLFLAG_RD, &bz->alignment, "");
 1579 
 1580         return (0);
 1581 }
 1582 
 1583 static int
 1584 alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages)
 1585 {
 1586         struct bounce_zone *bz;
 1587         int count;
 1588 
 1589         bz = dmat->bounce_zone;
 1590         count = 0;
 1591         while (numpages > 0) {
 1592                 struct bounce_page *bpage;
 1593 
 1594                 bpage = (struct bounce_page *)malloc(sizeof(*bpage), M_DEVBUF,
 1595                     M_NOWAIT | M_ZERO);
 1596 
 1597                 if (bpage == NULL)
 1598                         break;
 1599                 bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_DEVBUF,
 1600                     M_NOWAIT, 0ul, bz->lowaddr, PAGE_SIZE, 0);
 1601                 if (bpage->vaddr == 0) {
 1602                         free(bpage, M_DEVBUF);
 1603                         break;
 1604                 }
 1605                 bpage->busaddr = pmap_kextract(bpage->vaddr);
 1606                 mtx_lock(&bounce_lock);
 1607                 STAILQ_INSERT_TAIL(&bz->bounce_page_list, bpage, links);
 1608                 total_bpages++;
 1609                 bz->total_bpages++;
 1610                 bz->free_bpages++;
 1611                 mtx_unlock(&bounce_lock);
 1612                 count++;
 1613                 numpages--;
 1614         }
 1615         return (count);
 1616 }
 1617 
 1618 static int
 1619 reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit)
 1620 {
 1621         struct bounce_zone *bz;
 1622         int pages;
 1623 
 1624         mtx_assert(&bounce_lock, MA_OWNED);
 1625         bz = dmat->bounce_zone;
 1626         pages = MIN(bz->free_bpages, map->pagesneeded - map->pagesreserved);
 1627         if (commit == 0 && map->pagesneeded > (map->pagesreserved + pages))
 1628                 return (map->pagesneeded - (map->pagesreserved + pages));
 1629         bz->free_bpages -= pages;
 1630         bz->reserved_bpages += pages;
 1631         map->pagesreserved += pages;
 1632         pages = map->pagesneeded - map->pagesreserved;
 1633 
 1634         return (pages);
 1635 }
 1636 
 1637 static bus_addr_t
 1638 add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr,
 1639                 bus_addr_t addr, bus_size_t size)
 1640 {
 1641         struct bounce_zone *bz;
 1642         struct bounce_page *bpage;
 1643 
 1644         KASSERT(dmat->bounce_zone != NULL, ("no bounce zone in dma tag"));
 1645         KASSERT(map != NULL,
 1646             ("add_bounce_page: bad map %p", map));
 1647 
 1648         bz = dmat->bounce_zone;
 1649         if (map->pagesneeded == 0)
 1650                 panic("add_bounce_page: map doesn't need any pages");
 1651         map->pagesneeded--;
 1652 
 1653         if (map->pagesreserved == 0)
 1654                 panic("add_bounce_page: map doesn't need any pages");
 1655         map->pagesreserved--;
 1656 
 1657         mtx_lock(&bounce_lock);
 1658         bpage = STAILQ_FIRST(&bz->bounce_page_list);
 1659         if (bpage == NULL)
 1660                 panic("add_bounce_page: free page list is empty");
 1661 
 1662         STAILQ_REMOVE_HEAD(&bz->bounce_page_list, links);
 1663         bz->reserved_bpages--;
 1664         bz->active_bpages++;
 1665         mtx_unlock(&bounce_lock);
 1666 
 1667         if (dmat->flags & BUS_DMA_KEEP_PG_OFFSET) {
 1668                 /* Page offset needs to be preserved. */
 1669                 bpage->vaddr |= addr & PAGE_MASK;
 1670                 bpage->busaddr |= addr & PAGE_MASK;
 1671         }
 1672         bpage->datavaddr = vaddr;
 1673         bpage->dataaddr = addr;
 1674         bpage->datacount = size;
 1675         STAILQ_INSERT_TAIL(&(map->bpages), bpage, links);
 1676         return (bpage->busaddr);
 1677 }
 1678 
 1679 static void
 1680 free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage)
 1681 {
 1682         struct bus_dmamap *map;
 1683         struct bounce_zone *bz;
 1684 
 1685         bz = dmat->bounce_zone;
 1686         bpage->datavaddr = 0;
 1687         bpage->datacount = 0;
 1688         if (dmat->flags & BUS_DMA_KEEP_PG_OFFSET) {
 1689                 /*
 1690                  * Reset the bounce page to start at offset 0.  Other uses
 1691                  * of this bounce page may need to store a full page of
 1692                  * data and/or assume it starts on a page boundary.
 1693                  */
 1694                 bpage->vaddr &= ~PAGE_MASK;
 1695                 bpage->busaddr &= ~PAGE_MASK;
 1696         }
 1697 
 1698         mtx_lock(&bounce_lock);
 1699         STAILQ_INSERT_HEAD(&bz->bounce_page_list, bpage, links);
 1700         bz->free_bpages++;
 1701         bz->active_bpages--;
 1702         if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) {
 1703                 if (reserve_bounce_pages(map->dmat, map, 1) == 0) {
 1704                         STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links);
 1705                         STAILQ_INSERT_TAIL(&bounce_map_callbacklist,
 1706                             map, links);
 1707                         busdma_swi_pending = 1;
 1708                         bz->total_deferred++;
 1709                         swi_sched(vm_ih, 0);
 1710                 }
 1711         }
 1712         mtx_unlock(&bounce_lock);
 1713 }
 1714 
 1715 void
 1716 busdma_swi(void)
 1717 {
 1718         bus_dma_tag_t dmat;
 1719         struct bus_dmamap *map;
 1720 
 1721         mtx_lock(&bounce_lock);
 1722         while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) {
 1723                 STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links);
 1724                 mtx_unlock(&bounce_lock);
 1725                 dmat = map->dmat;
 1726                 dmat->lockfunc(dmat->lockfuncarg, BUS_DMA_LOCK);
 1727                 bus_dmamap_load_mem(map->dmat, map, &map->mem, map->callback,
 1728                     map->callback_arg, BUS_DMA_WAITOK);
 1729                 dmat->lockfunc(dmat->lockfuncarg, BUS_DMA_UNLOCK);
 1730                 mtx_lock(&bounce_lock);
 1731         }
 1732         mtx_unlock(&bounce_lock);
 1733 }

Cache object: 7c16b8025f74d598ea614495ab5ce9a8


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.