The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/x86/x86/busdma_bounce.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 1997, 1998 Justin T. Gibbs.
    3  * All rights reserved.
    4  *
    5  * Redistribution and use in source and binary forms, with or without
    6  * modification, are permitted provided that the following conditions
    7  * are met:
    8  * 1. Redistributions of source code must retain the above copyright
    9  *    notice, this list of conditions, and the following disclaimer,
   10  *    without modification, immediately at the beginning of the file.
   11  * 2. The name of the author may not be used to endorse or promote products
   12  *    derived from this software without specific prior written permission.
   13  *
   14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   17  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
   18  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   24  * SUCH DAMAGE.
   25  */
   26 
   27 #include <sys/cdefs.h>
   28 __FBSDID("$FreeBSD$");
   29 
   30 #include <sys/param.h>
   31 #include <sys/systm.h>
   32 #include <sys/malloc.h>
   33 #include <sys/bus.h>
   34 #include <sys/interrupt.h>
   35 #include <sys/kernel.h>
   36 #include <sys/ktr.h>
   37 #include <sys/lock.h>
   38 #include <sys/proc.h>
   39 #include <sys/memdesc.h>
   40 #include <sys/mutex.h>
   41 #include <sys/sysctl.h>
   42 #include <sys/uio.h>
   43 
   44 #include <vm/vm.h>
   45 #include <vm/vm_extern.h>
   46 #include <vm/vm_kern.h>
   47 #include <vm/vm_page.h>
   48 #include <vm/vm_map.h>
   49 
   50 #include <machine/atomic.h>
   51 #include <machine/bus.h>
   52 #include <machine/md_var.h>
   53 #include <machine/specialreg.h>
   54 #include <x86/include/busdma_impl.h>
   55 
   56 #ifdef __i386__
   57 #define MAX_BPAGES 512
   58 #else
   59 #define MAX_BPAGES 8192
   60 #endif
   61 
   62 enum {
   63         BUS_DMA_COULD_BOUNCE    = 0x01,
   64         BUS_DMA_MIN_ALLOC_COMP  = 0x02,
   65         BUS_DMA_KMEM_ALLOC      = 0x04,
   66 };
   67 
   68 struct bounce_zone;
   69 
   70 struct bus_dma_tag {
   71         struct bus_dma_tag_common common;
   72         int                     map_count;
   73         int                     bounce_flags;
   74         bus_dma_segment_t       *segments;
   75         struct bounce_zone      *bounce_zone;
   76 };
   77 
   78 struct bounce_page {
   79         vm_offset_t     vaddr;          /* kva of bounce buffer */
   80         bus_addr_t      busaddr;        /* Physical address */
   81         vm_offset_t     datavaddr;      /* kva of client data */
   82         vm_offset_t     dataoffs;       /* page offset of client data */
   83         vm_page_t       datapage[2];    /* physical page(s) of client data */
   84         bus_size_t      datacount;      /* client data count */
   85         STAILQ_ENTRY(bounce_page) links;
   86 };
   87 
   88 int busdma_swi_pending;
   89 
   90 struct bounce_zone {
   91         STAILQ_ENTRY(bounce_zone) links;
   92         STAILQ_HEAD(bp_list, bounce_page) bounce_page_list;
   93         int             total_bpages;
   94         int             free_bpages;
   95         int             reserved_bpages;
   96         int             active_bpages;
   97         int             total_bounced;
   98         int             total_deferred;
   99         int             map_count;
  100         bus_size_t      alignment;
  101         bus_addr_t      lowaddr;
  102         char            zoneid[8];
  103         char            lowaddrid[20];
  104         struct sysctl_ctx_list sysctl_tree;
  105         struct sysctl_oid *sysctl_tree_top;
  106 };
  107 
  108 static struct mtx bounce_lock;
  109 static int total_bpages;
  110 static int busdma_zonecount;
  111 static STAILQ_HEAD(, bounce_zone) bounce_zone_list;
  112 
  113 static SYSCTL_NODE(_hw, OID_AUTO, busdma, CTLFLAG_RD, 0, "Busdma parameters");
  114 SYSCTL_INT(_hw_busdma, OID_AUTO, total_bpages, CTLFLAG_RD, &total_bpages, 0,
  115            "Total bounce pages");
  116 
  117 struct bus_dmamap {
  118         struct bp_list         bpages;
  119         int                    pagesneeded;
  120         int                    pagesreserved;
  121         bus_dma_tag_t          dmat;
  122         struct memdesc         mem;
  123         bus_dmamap_callback_t *callback;
  124         void                  *callback_arg;
  125         STAILQ_ENTRY(bus_dmamap) links;
  126 };
  127 
  128 static STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist;
  129 static STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist;
  130 static struct bus_dmamap nobounce_dmamap;
  131 
  132 static void init_bounce_pages(void *dummy);
  133 static int alloc_bounce_zone(bus_dma_tag_t dmat);
  134 static int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages);
  135 static int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map,
  136                                 int commit);
  137 static bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map,
  138                                   vm_offset_t vaddr, bus_addr_t addr1,
  139                                   bus_addr_t addr2, bus_size_t size);
  140 static void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage);
  141 static void _bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map,
  142                                     pmap_t pmap, void *buf, bus_size_t buflen,
  143                                     int flags);
  144 static void _bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map,
  145                                    vm_paddr_t buf, bus_size_t buflen,
  146                                    int flags);
  147 static int _bus_dmamap_reserve_pages(bus_dma_tag_t dmat, bus_dmamap_t map,
  148                                      int flags);
  149 
  150 /*
  151  * Allocate a device specific dma_tag.
  152  */
  153 static int
  154 bounce_bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
  155     bus_addr_t boundary, bus_addr_t lowaddr, bus_addr_t highaddr,
  156     bus_dma_filter_t *filter, void *filterarg, bus_size_t maxsize,
  157     int nsegments, bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc,
  158     void *lockfuncarg, bus_dma_tag_t *dmat)
  159 {
  160         bus_dma_tag_t newtag;
  161         int error;
  162 
  163         *dmat = NULL;
  164         error = common_bus_dma_tag_create(parent != NULL ? &parent->common :
  165             NULL, alignment, boundary, lowaddr, highaddr, filter, filterarg,
  166             maxsize, nsegments, maxsegsz, flags, lockfunc, lockfuncarg,
  167             sizeof (struct bus_dma_tag), (void **)&newtag);
  168         if (error != 0)
  169                 return (error);
  170 
  171         newtag->common.impl = &bus_dma_bounce_impl;
  172         newtag->map_count = 0;
  173         newtag->segments = NULL;
  174 
  175         if (parent != NULL && ((newtag->common.filter != NULL) ||
  176             ((parent->bounce_flags & BUS_DMA_COULD_BOUNCE) != 0)))
  177                 newtag->bounce_flags |= BUS_DMA_COULD_BOUNCE;
  178 
  179         if (newtag->common.lowaddr < ptoa((vm_paddr_t)Maxmem) ||
  180             newtag->common.alignment > 1)
  181                 newtag->bounce_flags |= BUS_DMA_COULD_BOUNCE;
  182 
  183         if (((newtag->bounce_flags & BUS_DMA_COULD_BOUNCE) != 0) &&
  184             (flags & BUS_DMA_ALLOCNOW) != 0) {
  185                 struct bounce_zone *bz;
  186 
  187                 /* Must bounce */
  188                 if ((error = alloc_bounce_zone(newtag)) != 0) {
  189                         free(newtag, M_DEVBUF);
  190                         return (error);
  191                 }
  192                 bz = newtag->bounce_zone;
  193 
  194                 if (ptoa(bz->total_bpages) < maxsize) {
  195                         int pages;
  196 
  197                         pages = atop(maxsize) - bz->total_bpages;
  198 
  199                         /* Add pages to our bounce pool */
  200                         if (alloc_bounce_pages(newtag, pages) < pages)
  201                                 error = ENOMEM;
  202                 }
  203                 /* Performed initial allocation */
  204                 newtag->bounce_flags |= BUS_DMA_MIN_ALLOC_COMP;
  205         } else
  206                 error = 0;
  207         
  208         if (error != 0)
  209                 free(newtag, M_DEVBUF);
  210         else
  211                 *dmat = newtag;
  212         CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d",
  213             __func__, newtag, (newtag != NULL ? newtag->common.flags : 0),
  214             error);
  215         return (error);
  216 }
  217 
  218 static int
  219 bounce_bus_dma_tag_destroy(bus_dma_tag_t dmat)
  220 {
  221         bus_dma_tag_t dmat_copy, parent;
  222         int error;
  223 
  224         error = 0;
  225         dmat_copy = dmat;
  226 
  227         if (dmat != NULL) {
  228                 if (dmat->map_count != 0) {
  229                         error = EBUSY;
  230                         goto out;
  231                 }
  232                 while (dmat != NULL) {
  233                         parent = (bus_dma_tag_t)dmat->common.parent;
  234                         atomic_subtract_int(&dmat->common.ref_count, 1);
  235                         if (dmat->common.ref_count == 0) {
  236                                 if (dmat->segments != NULL)
  237                                         free(dmat->segments, M_DEVBUF);
  238                                 free(dmat, M_DEVBUF);
  239                                 /*
  240                                  * Last reference count, so
  241                                  * release our reference
  242                                  * count on our parent.
  243                                  */
  244                                 dmat = parent;
  245                         } else
  246                                 dmat = NULL;
  247                 }
  248         }
  249 out:
  250         CTR3(KTR_BUSDMA, "%s tag %p error %d", __func__, dmat_copy, error);
  251         return (error);
  252 }
  253 
  254 /*
  255  * Allocate a handle for mapping from kva/uva/physical
  256  * address space into bus device space.
  257  */
  258 static int
  259 bounce_bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
  260 {
  261         struct bounce_zone *bz;
  262         int error, maxpages, pages;
  263 
  264         error = 0;
  265 
  266         if (dmat->segments == NULL) {
  267                 dmat->segments = (bus_dma_segment_t *)malloc(
  268                     sizeof(bus_dma_segment_t) * dmat->common.nsegments,
  269                     M_DEVBUF, M_NOWAIT);
  270                 if (dmat->segments == NULL) {
  271                         CTR3(KTR_BUSDMA, "%s: tag %p error %d",
  272                             __func__, dmat, ENOMEM);
  273                         return (ENOMEM);
  274                 }
  275         }
  276 
  277         /*
  278          * Bouncing might be required if the driver asks for an active
  279          * exclusion region, a data alignment that is stricter than 1, and/or
  280          * an active address boundary.
  281          */
  282         if (dmat->bounce_flags & BUS_DMA_COULD_BOUNCE) {
  283                 /* Must bounce */
  284                 if (dmat->bounce_zone == NULL) {
  285                         if ((error = alloc_bounce_zone(dmat)) != 0)
  286                                 return (error);
  287                 }
  288                 bz = dmat->bounce_zone;
  289 
  290                 *mapp = (bus_dmamap_t)malloc(sizeof(**mapp), M_DEVBUF,
  291                     M_NOWAIT | M_ZERO);
  292                 if (*mapp == NULL) {
  293                         CTR3(KTR_BUSDMA, "%s: tag %p error %d",
  294                             __func__, dmat, ENOMEM);
  295                         return (ENOMEM);
  296                 }
  297 
  298                 /* Initialize the new map */
  299                 STAILQ_INIT(&((*mapp)->bpages));
  300 
  301                 /*
  302                  * Attempt to add pages to our pool on a per-instance
  303                  * basis up to a sane limit.
  304                  */
  305                 if (dmat->common.alignment > 1)
  306                         maxpages = MAX_BPAGES;
  307                 else
  308                         maxpages = MIN(MAX_BPAGES, Maxmem -
  309                             atop(dmat->common.lowaddr));
  310                 if ((dmat->bounce_flags & BUS_DMA_MIN_ALLOC_COMP) == 0 ||
  311                     (bz->map_count > 0 && bz->total_bpages < maxpages)) {
  312                         pages = MAX(atop(dmat->common.maxsize), 1);
  313                         pages = MIN(maxpages - bz->total_bpages, pages);
  314                         pages = MAX(pages, 1);
  315                         if (alloc_bounce_pages(dmat, pages) < pages)
  316                                 error = ENOMEM;
  317                         if ((dmat->bounce_flags & BUS_DMA_MIN_ALLOC_COMP)
  318                             == 0) {
  319                                 if (error == 0) {
  320                                         dmat->bounce_flags |=
  321                                             BUS_DMA_MIN_ALLOC_COMP;
  322                                 }
  323                         } else
  324                                 error = 0;
  325                 }
  326                 bz->map_count++;
  327         } else {
  328                 *mapp = NULL;
  329         }
  330         if (error == 0)
  331                 dmat->map_count++;
  332         CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
  333             __func__, dmat, dmat->common.flags, error);
  334         return (error);
  335 }
  336 
  337 /*
  338  * Destroy a handle for mapping from kva/uva/physical
  339  * address space into bus device space.
  340  */
  341 static int
  342 bounce_bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
  343 {
  344 
  345         if (map != NULL && map != &nobounce_dmamap) {
  346                 if (STAILQ_FIRST(&map->bpages) != NULL) {
  347                         CTR3(KTR_BUSDMA, "%s: tag %p error %d",
  348                             __func__, dmat, EBUSY);
  349                         return (EBUSY);
  350                 }
  351                 if (dmat->bounce_zone)
  352                         dmat->bounce_zone->map_count--;
  353                 free(map, M_DEVBUF);
  354         }
  355         dmat->map_count--;
  356         CTR2(KTR_BUSDMA, "%s: tag %p error 0", __func__, dmat);
  357         return (0);
  358 }
  359 
  360 
  361 /*
  362  * Allocate a piece of memory that can be efficiently mapped into
  363  * bus device space based on the constraints lited in the dma tag.
  364  * A dmamap to for use with dmamap_load is also allocated.
  365  */
  366 static int
  367 bounce_bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
  368     bus_dmamap_t *mapp)
  369 {
  370         vm_memattr_t attr;
  371         int mflags;
  372 
  373         if (flags & BUS_DMA_NOWAIT)
  374                 mflags = M_NOWAIT;
  375         else
  376                 mflags = M_WAITOK;
  377 
  378         /* If we succeed, no mapping/bouncing will be required */
  379         *mapp = NULL;
  380 
  381         if (dmat->segments == NULL) {
  382                 dmat->segments = (bus_dma_segment_t *)malloc(
  383                     sizeof(bus_dma_segment_t) * dmat->common.nsegments,
  384                     M_DEVBUF, mflags);
  385                 if (dmat->segments == NULL) {
  386                         CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
  387                             __func__, dmat, dmat->common.flags, ENOMEM);
  388                         return (ENOMEM);
  389                 }
  390         }
  391         if (flags & BUS_DMA_ZERO)
  392                 mflags |= M_ZERO;
  393         if (flags & BUS_DMA_NOCACHE)
  394                 attr = VM_MEMATTR_UNCACHEABLE;
  395         else
  396                 attr = VM_MEMATTR_DEFAULT;
  397 
  398         /*
  399          * Allocate the buffer from the malloc(9) allocator if...
  400          *  - It's small enough to fit into a single power of two sized bucket.
  401          *  - The alignment is less than or equal to the maximum size
  402          *  - The low address requirement is fulfilled.
  403          * else allocate non-contiguous pages if...
  404          *  - The page count that could get allocated doesn't exceed
  405          *    nsegments also when the maximum segment size is less
  406          *    than PAGE_SIZE.
  407          *  - The alignment constraint isn't larger than a page boundary.
  408          *  - There are no boundary-crossing constraints.
  409          * else allocate a block of contiguous pages because one or more of the
  410          * constraints is something that only the contig allocator can fulfill.
  411          *
  412          * NOTE: The (dmat->common.alignment <= dmat->maxsize) check
  413          * below is just a quick hack. The exact alignment guarantees
  414          * of malloc(9) need to be nailed down, and the code below
  415          * should be rewritten to take that into account.
  416          *
  417          * In the meantime warn the user if malloc gets it wrong.
  418          */
  419         if ((dmat->common.maxsize <= PAGE_SIZE) &&
  420            (dmat->common.alignment <= dmat->common.maxsize) &&
  421             dmat->common.lowaddr >= ptoa((vm_paddr_t)Maxmem) &&
  422             attr == VM_MEMATTR_DEFAULT) {
  423                 *vaddr = malloc(dmat->common.maxsize, M_DEVBUF, mflags);
  424         } else if (dmat->common.nsegments >=
  425             howmany(dmat->common.maxsize, MIN(dmat->common.maxsegsz, PAGE_SIZE)) &&
  426             dmat->common.alignment <= PAGE_SIZE &&
  427             (dmat->common.boundary % PAGE_SIZE) == 0) {
  428                 /* Page-based multi-segment allocations allowed */
  429                 *vaddr = (void *)kmem_alloc_attr(kernel_arena,
  430                     dmat->common.maxsize, mflags, 0ul, dmat->common.lowaddr,
  431                     attr);
  432                 dmat->bounce_flags |= BUS_DMA_KMEM_ALLOC;
  433         } else {
  434                 *vaddr = (void *)kmem_alloc_contig(kernel_arena,
  435                     dmat->common.maxsize, mflags, 0ul, dmat->common.lowaddr,
  436                     dmat->common.alignment != 0 ? dmat->common.alignment : 1ul,
  437                     dmat->common.boundary, attr);
  438                 dmat->bounce_flags |= BUS_DMA_KMEM_ALLOC;
  439         }
  440         if (*vaddr == NULL) {
  441                 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
  442                     __func__, dmat, dmat->common.flags, ENOMEM);
  443                 return (ENOMEM);
  444         } else if (vtophys(*vaddr) & (dmat->common.alignment - 1)) {
  445                 printf("bus_dmamem_alloc failed to align memory properly.\n");
  446         }
  447         CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
  448             __func__, dmat, dmat->common.flags, 0);
  449         return (0);
  450 }
  451 
  452 /*
  453  * Free a piece of memory and it's allociated dmamap, that was allocated
  454  * via bus_dmamem_alloc.  Make the same choice for free/contigfree.
  455  */
  456 static void
  457 bounce_bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
  458 {
  459         /*
  460          * dmamem does not need to be bounced, so the map should be
  461          * NULL and the BUS_DMA_KMEM_ALLOC flag cleared if malloc()
  462          * was used and set if kmem_alloc_contig() was used.
  463          */
  464         if (map != NULL)
  465                 panic("bus_dmamem_free: Invalid map freed\n");
  466         if ((dmat->bounce_flags & BUS_DMA_KMEM_ALLOC) == 0)
  467                 free(vaddr, M_DEVBUF);
  468         else
  469                 kmem_free(kernel_arena, (vm_offset_t)vaddr,
  470                     dmat->common.maxsize);
  471         CTR3(KTR_BUSDMA, "%s: tag %p flags 0x%x", __func__, dmat,
  472             dmat->bounce_flags);
  473 }
  474 
  475 static void
  476 _bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf,
  477     bus_size_t buflen, int flags)
  478 {
  479         bus_addr_t curaddr;
  480         bus_size_t sgsize;
  481 
  482         if ((map != &nobounce_dmamap && map->pagesneeded == 0)) {
  483                 /*
  484                  * Count the number of bounce pages
  485                  * needed in order to complete this transfer
  486                  */
  487                 curaddr = buf;
  488                 while (buflen != 0) {
  489                         sgsize = MIN(buflen, dmat->common.maxsegsz);
  490                         if (bus_dma_run_filter(&dmat->common, curaddr)) {
  491                                 sgsize = MIN(sgsize,
  492                                     PAGE_SIZE - (curaddr & PAGE_MASK));
  493                                 map->pagesneeded++;
  494                         }
  495                         curaddr += sgsize;
  496                         buflen -= sgsize;
  497                 }
  498                 CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded);
  499         }
  500 }
  501 
  502 static void
  503 _bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, pmap_t pmap,
  504     void *buf, bus_size_t buflen, int flags)
  505 {
  506         vm_offset_t vaddr;
  507         vm_offset_t vendaddr;
  508         bus_addr_t paddr;
  509         bus_size_t sg_len;
  510 
  511         if ((map != &nobounce_dmamap && map->pagesneeded == 0)) {
  512                 CTR4(KTR_BUSDMA, "lowaddr= %d Maxmem= %d, boundary= %d, "
  513                     "alignment= %d", dmat->common.lowaddr,
  514                     ptoa((vm_paddr_t)Maxmem),
  515                     dmat->common.boundary, dmat->common.alignment);
  516                 CTR3(KTR_BUSDMA, "map= %p, nobouncemap= %p, pagesneeded= %d",
  517                     map, &nobounce_dmamap, map->pagesneeded);
  518                 /*
  519                  * Count the number of bounce pages
  520                  * needed in order to complete this transfer
  521                  */
  522                 vaddr = (vm_offset_t)buf;
  523                 vendaddr = (vm_offset_t)buf + buflen;
  524 
  525                 while (vaddr < vendaddr) {
  526                         sg_len = PAGE_SIZE - ((vm_offset_t)vaddr & PAGE_MASK);
  527                         if (pmap == kernel_pmap)
  528                                 paddr = pmap_kextract(vaddr);
  529                         else
  530                                 paddr = pmap_extract(pmap, vaddr);
  531                         if (bus_dma_run_filter(&dmat->common, paddr) != 0) {
  532                                 sg_len = roundup2(sg_len,
  533                                     dmat->common.alignment);
  534                                 map->pagesneeded++;
  535                         }
  536                         vaddr += sg_len;
  537                 }
  538                 CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded);
  539         }
  540 }
  541 
  542 static void
  543 _bus_dmamap_count_ma(bus_dma_tag_t dmat, bus_dmamap_t map, struct vm_page **ma,
  544     int ma_offs, bus_size_t buflen, int flags)
  545 {
  546         bus_size_t sg_len, max_sgsize;
  547         int page_index;
  548         vm_paddr_t paddr;
  549 
  550         if ((map != &nobounce_dmamap && map->pagesneeded == 0)) {
  551                 CTR4(KTR_BUSDMA, "lowaddr= %d Maxmem= %d, boundary= %d, "
  552                     "alignment= %d", dmat->common.lowaddr,
  553                     ptoa((vm_paddr_t)Maxmem),
  554                     dmat->common.boundary, dmat->common.alignment);
  555                 CTR3(KTR_BUSDMA, "map= %p, nobouncemap= %p, pagesneeded= %d",
  556                     map, &nobounce_dmamap, map->pagesneeded);
  557 
  558                 /*
  559                  * Count the number of bounce pages
  560                  * needed in order to complete this transfer
  561                  */
  562                 page_index = 0;
  563                 while (buflen > 0) {
  564                         paddr = VM_PAGE_TO_PHYS(ma[page_index]) + ma_offs;
  565                         sg_len = PAGE_SIZE - ma_offs;
  566                         max_sgsize = MIN(buflen, dmat->common.maxsegsz);
  567                         sg_len = MIN(sg_len, max_sgsize);
  568                         if (bus_dma_run_filter(&dmat->common, paddr) != 0) {
  569                                 sg_len = roundup2(sg_len,
  570                                     dmat->common.alignment);
  571                                 sg_len = MIN(sg_len, max_sgsize);
  572                                 KASSERT((sg_len & (dmat->common.alignment - 1))
  573                                     == 0, ("Segment size is not aligned"));
  574                                 map->pagesneeded++;
  575                         }
  576                         if (((ma_offs + sg_len) & ~PAGE_MASK) != 0)
  577                                 page_index++;
  578                         ma_offs = (ma_offs + sg_len) & PAGE_MASK;
  579                         KASSERT(buflen >= sg_len,
  580                             ("Segment length overruns original buffer"));
  581                         buflen -= sg_len;
  582                 }
  583                 CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded);
  584         }
  585 }
  586 
  587 static int
  588 _bus_dmamap_reserve_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int flags)
  589 {
  590 
  591         /* Reserve Necessary Bounce Pages */
  592         mtx_lock(&bounce_lock);
  593         if (flags & BUS_DMA_NOWAIT) {
  594                 if (reserve_bounce_pages(dmat, map, 0) != 0) {
  595                         mtx_unlock(&bounce_lock);
  596                         return (ENOMEM);
  597                 }
  598         } else {
  599                 if (reserve_bounce_pages(dmat, map, 1) != 0) {
  600                         /* Queue us for resources */
  601                         STAILQ_INSERT_TAIL(&bounce_map_waitinglist, map, links);
  602                         mtx_unlock(&bounce_lock);
  603                         return (EINPROGRESS);
  604                 }
  605         }
  606         mtx_unlock(&bounce_lock);
  607 
  608         return (0);
  609 }
  610 
  611 /*
  612  * Add a single contiguous physical range to the segment list.
  613  */
  614 static int
  615 _bus_dmamap_addseg(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t curaddr,
  616     bus_size_t sgsize, bus_dma_segment_t *segs, int *segp)
  617 {
  618         bus_addr_t baddr, bmask;
  619         int seg;
  620 
  621         /*
  622          * Make sure we don't cross any boundaries.
  623          */
  624         bmask = ~(dmat->common.boundary - 1);
  625         if (dmat->common.boundary > 0) {
  626                 baddr = (curaddr + dmat->common.boundary) & bmask;
  627                 if (sgsize > (baddr - curaddr))
  628                         sgsize = (baddr - curaddr);
  629         }
  630 
  631         /*
  632          * Insert chunk into a segment, coalescing with
  633          * previous segment if possible.
  634          */
  635         seg = *segp;
  636         if (seg == -1) {
  637                 seg = 0;
  638                 segs[seg].ds_addr = curaddr;
  639                 segs[seg].ds_len = sgsize;
  640         } else {
  641                 if (curaddr == segs[seg].ds_addr + segs[seg].ds_len &&
  642                     (segs[seg].ds_len + sgsize) <= dmat->common.maxsegsz &&
  643                     (dmat->common.boundary == 0 ||
  644                      (segs[seg].ds_addr & bmask) == (curaddr & bmask)))
  645                         segs[seg].ds_len += sgsize;
  646                 else {
  647                         if (++seg >= dmat->common.nsegments)
  648                                 return (0);
  649                         segs[seg].ds_addr = curaddr;
  650                         segs[seg].ds_len = sgsize;
  651                 }
  652         }
  653         *segp = seg;
  654         return (sgsize);
  655 }
  656 
  657 /*
  658  * Utility function to load a physical buffer.  segp contains
  659  * the starting segment on entrace, and the ending segment on exit.
  660  */
  661 static int
  662 bounce_bus_dmamap_load_phys(bus_dma_tag_t dmat, bus_dmamap_t map,
  663     vm_paddr_t buf, bus_size_t buflen, int flags, bus_dma_segment_t *segs,
  664     int *segp)
  665 {
  666         bus_size_t sgsize;
  667         bus_addr_t curaddr;
  668         int error;
  669 
  670         if (map == NULL)
  671                 map = &nobounce_dmamap;
  672 
  673         if (segs == NULL)
  674                 segs = dmat->segments;
  675 
  676         if ((dmat->bounce_flags & BUS_DMA_COULD_BOUNCE) != 0) {
  677                 _bus_dmamap_count_phys(dmat, map, buf, buflen, flags);
  678                 if (map->pagesneeded != 0) {
  679                         error = _bus_dmamap_reserve_pages(dmat, map, flags);
  680                         if (error)
  681                                 return (error);
  682                 }
  683         }
  684 
  685         while (buflen > 0) {
  686                 curaddr = buf;
  687                 sgsize = MIN(buflen, dmat->common.maxsegsz);
  688                 if (((dmat->bounce_flags & BUS_DMA_COULD_BOUNCE) != 0) &&
  689                     map->pagesneeded != 0 &&
  690                     bus_dma_run_filter(&dmat->common, curaddr)) {
  691                         sgsize = MIN(sgsize, PAGE_SIZE - (curaddr & PAGE_MASK));
  692                         curaddr = add_bounce_page(dmat, map, 0, curaddr, 0,
  693                             sgsize);
  694                 }
  695                 sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs,
  696                     segp);
  697                 if (sgsize == 0)
  698                         break;
  699                 buf += sgsize;
  700                 buflen -= sgsize;
  701         }
  702 
  703         /*
  704          * Did we fit?
  705          */
  706         return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */
  707 }
  708 
  709 /*
  710  * Utility function to load a linear buffer.  segp contains
  711  * the starting segment on entrace, and the ending segment on exit.
  712  */
  713 static int
  714 bounce_bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
  715     bus_size_t buflen, pmap_t pmap, int flags, bus_dma_segment_t *segs,
  716     int *segp)
  717 {
  718         bus_size_t sgsize, max_sgsize;
  719         bus_addr_t curaddr;
  720         vm_offset_t kvaddr, vaddr;
  721         int error;
  722 
  723         if (map == NULL)
  724                 map = &nobounce_dmamap;
  725 
  726         if (segs == NULL)
  727                 segs = dmat->segments;
  728 
  729         if ((dmat->bounce_flags & BUS_DMA_COULD_BOUNCE) != 0) {
  730                 _bus_dmamap_count_pages(dmat, map, pmap, buf, buflen, flags);
  731                 if (map->pagesneeded != 0) {
  732                         error = _bus_dmamap_reserve_pages(dmat, map, flags);
  733                         if (error)
  734                                 return (error);
  735                 }
  736         }
  737 
  738         vaddr = (vm_offset_t)buf;
  739         while (buflen > 0) {
  740                 /*
  741                  * Get the physical address for this segment.
  742                  */
  743                 if (pmap == kernel_pmap) {
  744                         curaddr = pmap_kextract(vaddr);
  745                         kvaddr = vaddr;
  746                 } else {
  747                         curaddr = pmap_extract(pmap, vaddr);
  748                         kvaddr = 0;
  749                 }
  750 
  751                 /*
  752                  * Compute the segment size, and adjust counts.
  753                  */
  754                 max_sgsize = MIN(buflen, dmat->common.maxsegsz);
  755                 sgsize = PAGE_SIZE - (curaddr & PAGE_MASK);
  756                 if (((dmat->bounce_flags & BUS_DMA_COULD_BOUNCE) != 0) &&
  757                     map->pagesneeded != 0 &&
  758                     bus_dma_run_filter(&dmat->common, curaddr)) {
  759                         sgsize = roundup2(sgsize, dmat->common.alignment);
  760                         sgsize = MIN(sgsize, max_sgsize);
  761                         curaddr = add_bounce_page(dmat, map, kvaddr, curaddr, 0,
  762                             sgsize);
  763                 } else {
  764                         sgsize = MIN(sgsize, max_sgsize);
  765                 }
  766                 sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs,
  767                     segp);
  768                 if (sgsize == 0)
  769                         break;
  770                 vaddr += sgsize;
  771                 buflen -= sgsize;
  772         }
  773 
  774         /*
  775          * Did we fit?
  776          */
  777         return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */
  778 }
  779 
  780 static int
  781 bounce_bus_dmamap_load_ma(bus_dma_tag_t dmat, bus_dmamap_t map,
  782     struct vm_page **ma, bus_size_t buflen, int ma_offs, int flags,
  783     bus_dma_segment_t *segs, int *segp)
  784 {
  785         vm_paddr_t paddr, next_paddr;
  786         int error, page_index;
  787         bus_size_t sgsize, max_sgsize;
  788 
  789         if (dmat->common.flags & BUS_DMA_KEEP_PG_OFFSET) {
  790                 /*
  791                  * If we have to keep the offset of each page this function
  792                  * is not suitable, switch back to bus_dmamap_load_ma_triv
  793                  * which is going to do the right thing in this case.
  794                  */
  795                 error = bus_dmamap_load_ma_triv(dmat, map, ma, buflen, ma_offs,
  796                     flags, segs, segp);
  797                 return (error);
  798         }
  799 
  800         if (map == NULL)
  801                 map = &nobounce_dmamap;
  802 
  803         if (segs == NULL)
  804                 segs = dmat->segments;
  805 
  806         if ((dmat->bounce_flags & BUS_DMA_COULD_BOUNCE) != 0) {
  807                 _bus_dmamap_count_ma(dmat, map, ma, ma_offs, buflen, flags);
  808                 if (map->pagesneeded != 0) {
  809                         error = _bus_dmamap_reserve_pages(dmat, map, flags);
  810                         if (error)
  811                                 return (error);
  812                 }
  813         }
  814 
  815         page_index = 0;
  816         while (buflen > 0) {
  817                 /*
  818                  * Compute the segment size, and adjust counts.
  819                  */
  820                 paddr = VM_PAGE_TO_PHYS(ma[page_index]) + ma_offs;
  821                 max_sgsize = MIN(buflen, dmat->common.maxsegsz);
  822                 sgsize = PAGE_SIZE - ma_offs;
  823                 if (((dmat->bounce_flags & BUS_DMA_COULD_BOUNCE) != 0) &&
  824                     map->pagesneeded != 0 &&
  825                     bus_dma_run_filter(&dmat->common, paddr)) {
  826                         sgsize = roundup2(sgsize, dmat->common.alignment);
  827                         sgsize = MIN(sgsize, max_sgsize);
  828                         KASSERT((sgsize & (dmat->common.alignment - 1)) == 0,
  829                             ("Segment size is not aligned"));
  830                         /*
  831                          * Check if two pages of the user provided buffer
  832                          * are used.
  833                          */
  834                         if ((ma_offs + sgsize) > PAGE_SIZE)
  835                                 next_paddr =
  836                                     VM_PAGE_TO_PHYS(ma[page_index + 1]);
  837                         else
  838                                 next_paddr = 0;
  839                         paddr = add_bounce_page(dmat, map, 0, paddr,
  840                             next_paddr, sgsize);
  841                 } else {
  842                         sgsize = MIN(sgsize, max_sgsize);
  843                 }
  844                 sgsize = _bus_dmamap_addseg(dmat, map, paddr, sgsize, segs,
  845                     segp);
  846                 if (sgsize == 0)
  847                         break;
  848                 KASSERT(buflen >= sgsize,
  849                     ("Segment length overruns original buffer"));
  850                 buflen -= sgsize;
  851                 if (((ma_offs + sgsize) & ~PAGE_MASK) != 0)
  852                         page_index++;
  853                 ma_offs = (ma_offs + sgsize) & PAGE_MASK;
  854         }
  855 
  856         /*
  857          * Did we fit?
  858          */
  859         return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */
  860 }
  861 
  862 static void
  863 bounce_bus_dmamap_waitok(bus_dma_tag_t dmat, bus_dmamap_t map,
  864     struct memdesc *mem, bus_dmamap_callback_t *callback, void *callback_arg)
  865 {
  866 
  867         if (map == NULL)
  868                 return;
  869         map->mem = *mem;
  870         map->dmat = dmat;
  871         map->callback = callback;
  872         map->callback_arg = callback_arg;
  873 }
  874 
  875 static bus_dma_segment_t *
  876 bounce_bus_dmamap_complete(bus_dma_tag_t dmat, bus_dmamap_t map,
  877     bus_dma_segment_t *segs, int nsegs, int error)
  878 {
  879 
  880         if (segs == NULL)
  881                 segs = dmat->segments;
  882         return (segs);
  883 }
  884 
  885 /*
  886  * Release the mapping held by map.
  887  */
  888 static void
  889 bounce_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
  890 {
  891         struct bounce_page *bpage;
  892 
  893         if (map == NULL)
  894                 return;
  895 
  896         while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
  897                 STAILQ_REMOVE_HEAD(&map->bpages, links);
  898                 free_bounce_page(dmat, bpage);
  899         }
  900 }
  901 
  902 static void
  903 bounce_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map,
  904     bus_dmasync_op_t op)
  905 {
  906         struct bounce_page *bpage;
  907         vm_offset_t datavaddr, tempvaddr;
  908         bus_size_t datacount1, datacount2;
  909 
  910         if (map == NULL || (bpage = STAILQ_FIRST(&map->bpages)) == NULL)
  911                 return;
  912 
  913         /*
  914          * Handle data bouncing.  We might also want to add support for
  915          * invalidating the caches on broken hardware.
  916          */
  917         CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x op 0x%x "
  918             "performing bounce", __func__, dmat, dmat->common.flags, op);
  919 
  920         if ((op & BUS_DMASYNC_PREWRITE) != 0) {
  921                 while (bpage != NULL) {
  922                         tempvaddr = 0;
  923                         datavaddr = bpage->datavaddr;
  924                         datacount1 = bpage->datacount;
  925                         if (datavaddr == 0) {
  926                                 tempvaddr =
  927                                     pmap_quick_enter_page(bpage->datapage[0]);
  928                                 datavaddr = tempvaddr | bpage->dataoffs;
  929                                 datacount1 = min(PAGE_SIZE - bpage->dataoffs,
  930                                     datacount1);
  931                         }
  932 
  933                         bcopy((void *)datavaddr,
  934                             (void *)bpage->vaddr, datacount1);
  935 
  936                         if (tempvaddr != 0)
  937                                 pmap_quick_remove_page(tempvaddr);
  938 
  939                         if (bpage->datapage[1] == 0) {
  940                                 KASSERT(datacount1 == bpage->datacount,
  941                 ("Mismatch between data size and provided memory space"));
  942                                 goto next_w;
  943                         }
  944 
  945                         /*
  946                          * We are dealing with an unmapped buffer that expands
  947                          * over two pages.
  948                          */
  949                         datavaddr = pmap_quick_enter_page(bpage->datapage[1]);
  950                         datacount2 = bpage->datacount - datacount1;
  951                         bcopy((void *)datavaddr,
  952                             (void *)(bpage->vaddr + datacount1), datacount2);
  953                         pmap_quick_remove_page(datavaddr);
  954 
  955 next_w:
  956                         bpage = STAILQ_NEXT(bpage, links);
  957                 }
  958                 dmat->bounce_zone->total_bounced++;
  959         }
  960 
  961         if ((op & BUS_DMASYNC_POSTREAD) != 0) {
  962                 while (bpage != NULL) {
  963                         tempvaddr = 0;
  964                         datavaddr = bpage->datavaddr;
  965                         datacount1 = bpage->datacount;
  966                         if (datavaddr == 0) {
  967                                 tempvaddr =
  968                                     pmap_quick_enter_page(bpage->datapage[0]);
  969                                 datavaddr = tempvaddr | bpage->dataoffs;
  970                                 datacount1 = min(PAGE_SIZE - bpage->dataoffs,
  971                                     datacount1);
  972                         }
  973 
  974                         bcopy((void *)bpage->vaddr, (void *)datavaddr,
  975                             datacount1);
  976 
  977                         if (tempvaddr != 0)
  978                                 pmap_quick_remove_page(tempvaddr);
  979 
  980                         if (bpage->datapage[1] == 0) {
  981                                 KASSERT(datacount1 == bpage->datacount,
  982                 ("Mismatch between data size and provided memory space"));
  983                                 goto next_r;
  984                         }
  985 
  986                         /*
  987                          * We are dealing with an unmapped buffer that expands
  988                          * over two pages.
  989                          */
  990                         datavaddr = pmap_quick_enter_page(bpage->datapage[1]);
  991                         datacount2 = bpage->datacount - datacount1;
  992                         bcopy((void *)(bpage->vaddr + datacount1),
  993                             (void *)datavaddr, datacount2);
  994                         pmap_quick_remove_page(datavaddr);
  995 
  996 next_r:
  997                         bpage = STAILQ_NEXT(bpage, links);
  998                 }
  999                 dmat->bounce_zone->total_bounced++;
 1000         }
 1001 }
 1002 
 1003 static void
 1004 init_bounce_pages(void *dummy __unused)
 1005 {
 1006 
 1007         total_bpages = 0;
 1008         STAILQ_INIT(&bounce_zone_list);
 1009         STAILQ_INIT(&bounce_map_waitinglist);
 1010         STAILQ_INIT(&bounce_map_callbacklist);
 1011         mtx_init(&bounce_lock, "bounce pages lock", NULL, MTX_DEF);
 1012 }
 1013 SYSINIT(bpages, SI_SUB_LOCK, SI_ORDER_ANY, init_bounce_pages, NULL);
 1014 
 1015 static struct sysctl_ctx_list *
 1016 busdma_sysctl_tree(struct bounce_zone *bz)
 1017 {
 1018 
 1019         return (&bz->sysctl_tree);
 1020 }
 1021 
 1022 static struct sysctl_oid *
 1023 busdma_sysctl_tree_top(struct bounce_zone *bz)
 1024 {
 1025 
 1026         return (bz->sysctl_tree_top);
 1027 }
 1028 
 1029 static int
 1030 alloc_bounce_zone(bus_dma_tag_t dmat)
 1031 {
 1032         struct bounce_zone *bz;
 1033 
 1034         /* Check to see if we already have a suitable zone */
 1035         STAILQ_FOREACH(bz, &bounce_zone_list, links) {
 1036                 if ((dmat->common.alignment <= bz->alignment) &&
 1037                     (dmat->common.lowaddr >= bz->lowaddr)) {
 1038                         dmat->bounce_zone = bz;
 1039                         return (0);
 1040                 }
 1041         }
 1042 
 1043         if ((bz = (struct bounce_zone *)malloc(sizeof(*bz), M_DEVBUF,
 1044             M_NOWAIT | M_ZERO)) == NULL)
 1045                 return (ENOMEM);
 1046 
 1047         STAILQ_INIT(&bz->bounce_page_list);
 1048         bz->free_bpages = 0;
 1049         bz->reserved_bpages = 0;
 1050         bz->active_bpages = 0;
 1051         bz->lowaddr = dmat->common.lowaddr;
 1052         bz->alignment = MAX(dmat->common.alignment, PAGE_SIZE);
 1053         bz->map_count = 0;
 1054         snprintf(bz->zoneid, 8, "zone%d", busdma_zonecount);
 1055         busdma_zonecount++;
 1056         snprintf(bz->lowaddrid, 18, "%#jx", (uintmax_t)bz->lowaddr);
 1057         STAILQ_INSERT_TAIL(&bounce_zone_list, bz, links);
 1058         dmat->bounce_zone = bz;
 1059 
 1060         sysctl_ctx_init(&bz->sysctl_tree);
 1061         bz->sysctl_tree_top = SYSCTL_ADD_NODE(&bz->sysctl_tree,
 1062             SYSCTL_STATIC_CHILDREN(_hw_busdma), OID_AUTO, bz->zoneid,
 1063             CTLFLAG_RD, 0, "");
 1064         if (bz->sysctl_tree_top == NULL) {
 1065                 sysctl_ctx_free(&bz->sysctl_tree);
 1066                 return (0);     /* XXX error code? */
 1067         }
 1068 
 1069         SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
 1070             SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
 1071             "total_bpages", CTLFLAG_RD, &bz->total_bpages, 0,
 1072             "Total bounce pages");
 1073         SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
 1074             SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
 1075             "free_bpages", CTLFLAG_RD, &bz->free_bpages, 0,
 1076             "Free bounce pages");
 1077         SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
 1078             SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
 1079             "reserved_bpages", CTLFLAG_RD, &bz->reserved_bpages, 0,
 1080             "Reserved bounce pages");
 1081         SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
 1082             SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
 1083             "active_bpages", CTLFLAG_RD, &bz->active_bpages, 0,
 1084             "Active bounce pages");
 1085         SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
 1086             SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
 1087             "total_bounced", CTLFLAG_RD, &bz->total_bounced, 0,
 1088             "Total bounce requests");
 1089         SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
 1090             SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
 1091             "total_deferred", CTLFLAG_RD, &bz->total_deferred, 0,
 1092             "Total bounce requests that were deferred");
 1093         SYSCTL_ADD_STRING(busdma_sysctl_tree(bz),
 1094             SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
 1095             "lowaddr", CTLFLAG_RD, bz->lowaddrid, 0, "");
 1096         SYSCTL_ADD_UAUTO(busdma_sysctl_tree(bz),
 1097             SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
 1098             "alignment", CTLFLAG_RD, &bz->alignment, "");
 1099 
 1100         return (0);
 1101 }
 1102 
 1103 static int
 1104 alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages)
 1105 {
 1106         struct bounce_zone *bz;
 1107         int count;
 1108 
 1109         bz = dmat->bounce_zone;
 1110         count = 0;
 1111         while (numpages > 0) {
 1112                 struct bounce_page *bpage;
 1113 
 1114                 bpage = (struct bounce_page *)malloc(sizeof(*bpage), M_DEVBUF,
 1115                                                      M_NOWAIT | M_ZERO);
 1116 
 1117                 if (bpage == NULL)
 1118                         break;
 1119                 bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_DEVBUF,
 1120                                                          M_NOWAIT, 0ul,
 1121                                                          bz->lowaddr,
 1122                                                          PAGE_SIZE,
 1123                                                          0);
 1124                 if (bpage->vaddr == 0) {
 1125                         free(bpage, M_DEVBUF);
 1126                         break;
 1127                 }
 1128                 bpage->busaddr = pmap_kextract(bpage->vaddr);
 1129                 mtx_lock(&bounce_lock);
 1130                 STAILQ_INSERT_TAIL(&bz->bounce_page_list, bpage, links);
 1131                 total_bpages++;
 1132                 bz->total_bpages++;
 1133                 bz->free_bpages++;
 1134                 mtx_unlock(&bounce_lock);
 1135                 count++;
 1136                 numpages--;
 1137         }
 1138         return (count);
 1139 }
 1140 
 1141 static int
 1142 reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit)
 1143 {
 1144         struct bounce_zone *bz;
 1145         int pages;
 1146 
 1147         mtx_assert(&bounce_lock, MA_OWNED);
 1148         bz = dmat->bounce_zone;
 1149         pages = MIN(bz->free_bpages, map->pagesneeded - map->pagesreserved);
 1150         if (commit == 0 && map->pagesneeded > (map->pagesreserved + pages))
 1151                 return (map->pagesneeded - (map->pagesreserved + pages));
 1152         bz->free_bpages -= pages;
 1153         bz->reserved_bpages += pages;
 1154         map->pagesreserved += pages;
 1155         pages = map->pagesneeded - map->pagesreserved;
 1156 
 1157         return (pages);
 1158 }
 1159 
 1160 static bus_addr_t
 1161 add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr,
 1162                 bus_addr_t addr1, bus_addr_t addr2, bus_size_t size)
 1163 {
 1164         struct bounce_zone *bz;
 1165         struct bounce_page *bpage;
 1166 
 1167         KASSERT(dmat->bounce_zone != NULL, ("no bounce zone in dma tag"));
 1168         KASSERT(map != NULL && map != &nobounce_dmamap,
 1169             ("add_bounce_page: bad map %p", map));
 1170 
 1171         bz = dmat->bounce_zone;
 1172         if (map->pagesneeded == 0)
 1173                 panic("add_bounce_page: map doesn't need any pages");
 1174         map->pagesneeded--;
 1175 
 1176         if (map->pagesreserved == 0)
 1177                 panic("add_bounce_page: map doesn't need any pages");
 1178         map->pagesreserved--;
 1179 
 1180         mtx_lock(&bounce_lock);
 1181         bpage = STAILQ_FIRST(&bz->bounce_page_list);
 1182         if (bpage == NULL)
 1183                 panic("add_bounce_page: free page list is empty");
 1184 
 1185         STAILQ_REMOVE_HEAD(&bz->bounce_page_list, links);
 1186         bz->reserved_bpages--;
 1187         bz->active_bpages++;
 1188         mtx_unlock(&bounce_lock);
 1189 
 1190         if (dmat->common.flags & BUS_DMA_KEEP_PG_OFFSET) {
 1191                 /* Page offset needs to be preserved. */
 1192                 bpage->vaddr |= addr1 & PAGE_MASK;
 1193                 bpage->busaddr |= addr1 & PAGE_MASK;
 1194                 KASSERT(addr2 == 0,
 1195         ("Trying to bounce multiple pages with BUS_DMA_KEEP_PG_OFFSET"));
 1196         }
 1197         bpage->datavaddr = vaddr;
 1198         bpage->datapage[0] = PHYS_TO_VM_PAGE(addr1);
 1199         KASSERT((addr2 & PAGE_MASK) == 0, ("Second page is not aligned"));
 1200         bpage->datapage[1] = PHYS_TO_VM_PAGE(addr2);
 1201         bpage->dataoffs = addr1 & PAGE_MASK;
 1202         bpage->datacount = size;
 1203         STAILQ_INSERT_TAIL(&(map->bpages), bpage, links);
 1204         return (bpage->busaddr);
 1205 }
 1206 
 1207 static void
 1208 free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage)
 1209 {
 1210         struct bus_dmamap *map;
 1211         struct bounce_zone *bz;
 1212 
 1213         bz = dmat->bounce_zone;
 1214         bpage->datavaddr = 0;
 1215         bpage->datacount = 0;
 1216         if (dmat->common.flags & BUS_DMA_KEEP_PG_OFFSET) {
 1217                 /*
 1218                  * Reset the bounce page to start at offset 0.  Other uses
 1219                  * of this bounce page may need to store a full page of
 1220                  * data and/or assume it starts on a page boundary.
 1221                  */
 1222                 bpage->vaddr &= ~PAGE_MASK;
 1223                 bpage->busaddr &= ~PAGE_MASK;
 1224         }
 1225 
 1226         mtx_lock(&bounce_lock);
 1227         STAILQ_INSERT_HEAD(&bz->bounce_page_list, bpage, links);
 1228         bz->free_bpages++;
 1229         bz->active_bpages--;
 1230         if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) {
 1231                 if (reserve_bounce_pages(map->dmat, map, 1) == 0) {
 1232                         STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links);
 1233                         STAILQ_INSERT_TAIL(&bounce_map_callbacklist,
 1234                             map, links);
 1235                         busdma_swi_pending = 1;
 1236                         bz->total_deferred++;
 1237                         swi_sched(vm_ih, 0);
 1238                 }
 1239         }
 1240         mtx_unlock(&bounce_lock);
 1241 }
 1242 
 1243 void
 1244 busdma_swi(void)
 1245 {
 1246         bus_dma_tag_t dmat;
 1247         struct bus_dmamap *map;
 1248 
 1249         mtx_lock(&bounce_lock);
 1250         while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) {
 1251                 STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links);
 1252                 mtx_unlock(&bounce_lock);
 1253                 dmat = map->dmat;
 1254                 (dmat->common.lockfunc)(dmat->common.lockfuncarg, BUS_DMA_LOCK);
 1255                 bus_dmamap_load_mem(map->dmat, map, &map->mem,
 1256                     map->callback, map->callback_arg, BUS_DMA_WAITOK);
 1257                 (dmat->common.lockfunc)(dmat->common.lockfuncarg,
 1258                     BUS_DMA_UNLOCK);
 1259                 mtx_lock(&bounce_lock);
 1260         }
 1261         mtx_unlock(&bounce_lock);
 1262 }
 1263 
 1264 struct bus_dma_impl bus_dma_bounce_impl = {
 1265         .tag_create = bounce_bus_dma_tag_create,
 1266         .tag_destroy = bounce_bus_dma_tag_destroy,
 1267         .map_create = bounce_bus_dmamap_create,
 1268         .map_destroy = bounce_bus_dmamap_destroy,
 1269         .mem_alloc = bounce_bus_dmamem_alloc,
 1270         .mem_free = bounce_bus_dmamem_free,
 1271         .load_phys = bounce_bus_dmamap_load_phys,
 1272         .load_buffer = bounce_bus_dmamap_load_buffer,
 1273         .load_ma = bounce_bus_dmamap_load_ma,
 1274         .map_waitok = bounce_bus_dmamap_waitok,
 1275         .map_complete = bounce_bus_dmamap_complete,
 1276         .map_unload = bounce_bus_dmamap_unload,
 1277         .map_sync = bounce_bus_dmamap_sync
 1278 };

Cache object: fdfe9e20322d37a690638aea89fa29d4


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.