The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/mips/mips/busdma_machdep.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
    3  *
    4  * Copyright (c) 2006 Oleksandr Tymoshenko
    5  * All rights reserved.
    6  *
    7  * Redistribution and use in source and binary forms, with or without
    8  * modification, are permitted provided that the following conditions
    9  * are met:
   10  * 1. Redistributions of source code must retain the above copyright
   11  *    notice, this list of conditions, and the following disclaimer,
   12  *    without modification, immediately at the beginning of the file.
   13  * 2. The name of the author may not be used to endorse or promote products
   14  *    derived from this software without specific prior written permission.
   15  *
   16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   19  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
   20  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   26  * SUCH DAMAGE.
   27  *
   28  *  From i386/busdma_machdep.c,v 1.26 2002/04/19 22:58:09 alfred
   29  */
   30 
   31 #include <sys/cdefs.h>
   32 __FBSDID("$FreeBSD$");
   33 
   34 /*
   35  * MIPS bus dma support routines
   36  */
   37 
   38 #include <sys/param.h>
   39 #include <sys/systm.h>
   40 #include <sys/malloc.h>
   41 #include <sys/bus.h>
   42 #include <sys/busdma_bufalloc.h>
   43 #include <sys/interrupt.h>
   44 #include <sys/lock.h>
   45 #include <sys/proc.h>
   46 #include <sys/memdesc.h>
   47 #include <sys/mutex.h>
   48 #include <sys/ktr.h>
   49 #include <sys/kernel.h>
   50 #include <sys/sysctl.h>
   51 #include <sys/uio.h>
   52 
   53 #include <vm/uma.h>
   54 #include <vm/vm.h>
   55 #include <vm/vm_extern.h>
   56 #include <vm/vm_kern.h>
   57 #include <vm/vm_page.h>
   58 #include <vm/vm_map.h>
   59 
   60 #include <machine/atomic.h>
   61 #include <machine/bus.h>
   62 #include <machine/cache.h>
   63 #include <machine/cpufunc.h>
   64 #include <machine/cpuinfo.h>
   65 #include <machine/md_var.h>
   66 
   67 #define MAX_BPAGES 64
   68 #define BUS_DMA_COULD_BOUNCE    BUS_DMA_BUS3
   69 #define BUS_DMA_MIN_ALLOC_COMP  BUS_DMA_BUS4
   70 
   71 /*
   72  * On XBurst cores from Ingenic, cache-line writeback is local
   73  * only, unless accompanied by invalidation. Invalidations force
   74  * dirty line writeout and invalidation requests forwarded to
   75  * other cores if other cores have the cache line dirty.
   76  */
   77 #if defined(SMP) && defined(CPU_XBURST)
   78 #define BUS_DMA_FORCE_WBINV
   79 #endif
   80 
   81 struct bounce_zone;
   82 
   83 struct bus_dma_tag {
   84         bus_dma_tag_t           parent;
   85         bus_size_t              alignment;
   86         bus_addr_t              boundary;
   87         bus_addr_t              lowaddr;
   88         bus_addr_t              highaddr;
   89         bus_dma_filter_t        *filter;
   90         void                    *filterarg;
   91         bus_size_t              maxsize;
   92         u_int                   nsegments;
   93         bus_size_t              maxsegsz;
   94         int                     flags;
   95         int                     ref_count;
   96         int                     map_count;
   97         bus_dma_lock_t          *lockfunc;
   98         void                    *lockfuncarg;
   99         bus_dma_segment_t       *segments;
  100         struct bounce_zone *bounce_zone;
  101 };
  102 
  103 struct bounce_page {
  104         vm_offset_t     vaddr;          /* kva of bounce buffer */
  105         vm_offset_t     vaddr_nocache;  /* kva of bounce buffer uncached */
  106         bus_addr_t      busaddr;        /* Physical address */
  107         vm_offset_t     datavaddr;      /* kva of client data */
  108         bus_addr_t      dataaddr;       /* client physical address */
  109         bus_size_t      datacount;      /* client data count */
  110         STAILQ_ENTRY(bounce_page) links;
  111 };
  112 
  113 struct sync_list {
  114         vm_offset_t     vaddr;          /* kva of bounce buffer */
  115         bus_addr_t      busaddr;        /* Physical address */
  116         bus_size_t      datacount;      /* client data count */
  117 };
  118 
  119 int busdma_swi_pending;
  120 
  121 struct bounce_zone {
  122         STAILQ_ENTRY(bounce_zone) links;
  123         STAILQ_HEAD(bp_list, bounce_page) bounce_page_list;
  124         int             total_bpages;
  125         int             free_bpages;
  126         int             reserved_bpages;
  127         int             active_bpages;
  128         int             total_bounced;
  129         int             total_deferred;
  130         int             map_count;
  131         bus_size_t      alignment;
  132         bus_addr_t      lowaddr;
  133         char            zoneid[8];
  134         char            lowaddrid[20];
  135         struct sysctl_ctx_list sysctl_tree;
  136         struct sysctl_oid *sysctl_tree_top;
  137 };
  138 
  139 static struct mtx bounce_lock;
  140 static int total_bpages;
  141 static int busdma_zonecount;
  142 static STAILQ_HEAD(, bounce_zone) bounce_zone_list;
  143 
  144 static SYSCTL_NODE(_hw, OID_AUTO, busdma, CTLFLAG_RD, 0, "Busdma parameters");
  145 SYSCTL_INT(_hw_busdma, OID_AUTO, total_bpages, CTLFLAG_RD, &total_bpages, 0,
  146            "Total bounce pages");
  147 
  148 #define DMAMAP_UNCACHEABLE      0x08
  149 #define DMAMAP_CACHE_ALIGNED    0x10
  150 
  151 struct bus_dmamap {
  152         struct bp_list  bpages;
  153         int             pagesneeded;
  154         int             pagesreserved;
  155         bus_dma_tag_t   dmat;
  156         struct memdesc  mem;
  157         int             flags;
  158         TAILQ_ENTRY(bus_dmamap) freelist;
  159         STAILQ_ENTRY(bus_dmamap) links;
  160         bus_dmamap_callback_t *callback;
  161         void            *callback_arg;
  162         int             sync_count;
  163         struct sync_list *slist;
  164 };
  165 
  166 static STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist;
  167 static STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist;
  168 
  169 static void init_bounce_pages(void *dummy);
  170 static int alloc_bounce_zone(bus_dma_tag_t dmat);
  171 static int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages);
  172 static int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map,
  173                                 int commit);
  174 static bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map,
  175                                   vm_offset_t vaddr, bus_addr_t addr,
  176                                   bus_size_t size);
  177 static void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage);
  178 
  179 /* Default tag, as most drivers provide no parent tag. */
  180 bus_dma_tag_t mips_root_dma_tag;
  181 
  182 static uma_zone_t dmamap_zone;  /* Cache of struct bus_dmamap items */
  183 
  184 static busdma_bufalloc_t coherent_allocator;    /* Cache of coherent buffers */
  185 static busdma_bufalloc_t standard_allocator;    /* Cache of standard buffers */
  186 
  187 MALLOC_DEFINE(M_BUSDMA, "busdma", "busdma metadata");
  188 MALLOC_DEFINE(M_BOUNCE, "bounce", "busdma bounce pages");
  189 
  190 /*
  191  * This is the ctor function passed to uma_zcreate() for the pool of dma maps.
  192  * It'll need platform-specific changes if this code is copied.
  193  */
  194 static int
  195 dmamap_ctor(void *mem, int size, void *arg, int flags)
  196 {
  197         bus_dmamap_t map;
  198         bus_dma_tag_t dmat;
  199 
  200         map = (bus_dmamap_t)mem;
  201         dmat = (bus_dma_tag_t)arg;
  202 
  203         dmat->map_count++;
  204 
  205         bzero(map, sizeof(*map));
  206         map->dmat = dmat;
  207         STAILQ_INIT(&map->bpages);
  208 
  209         return (0);
  210 }
  211 
  212 /*
  213  * This is the dtor function passed to uma_zcreate() for the pool of dma maps.
  214  * It may need platform-specific changes if this code is copied              .
  215  */
  216 static void
  217 dmamap_dtor(void *mem, int size, void *arg)
  218 {
  219         bus_dmamap_t map;
  220 
  221         map = (bus_dmamap_t)mem;
  222 
  223         map->dmat->map_count--;
  224 }
  225 
  226 static void
  227 busdma_init(void *dummy)
  228 {
  229 
  230         /* Create a cache of maps for bus_dmamap_create(). */
  231         dmamap_zone = uma_zcreate("dma maps", sizeof(struct bus_dmamap),
  232             dmamap_ctor, dmamap_dtor, NULL, NULL, UMA_ALIGN_PTR, 0);
  233 
  234         /* Create a cache of buffers in standard (cacheable) memory. */
  235         standard_allocator = busdma_bufalloc_create("buffer",
  236             mips_dcache_max_linesize,   /* minimum_alignment */
  237             NULL,                       /* uma_alloc func */
  238             NULL,                       /* uma_free func */
  239             0);                         /* uma_zcreate_flags */
  240 
  241         /*
  242          * Create a cache of buffers in uncacheable memory, to implement the
  243          * BUS_DMA_COHERENT flag.
  244          */
  245         coherent_allocator = busdma_bufalloc_create("coherent",
  246             mips_dcache_max_linesize,   /* minimum_alignment */
  247             busdma_bufalloc_alloc_uncacheable,
  248             busdma_bufalloc_free_uncacheable,
  249             0);                         /* uma_zcreate_flags */
  250 }
  251 SYSINIT(busdma, SI_SUB_KMEM, SI_ORDER_FOURTH, busdma_init, NULL);
  252 
  253 /*
  254  * Return true if a match is made.
  255  *
  256  * To find a match walk the chain of bus_dma_tag_t's looking for 'paddr'.
  257  *
  258  * If paddr is within the bounds of the dma tag then call the filter callback
  259  * to check for a match, if there is no filter callback then assume a match.
  260  */
  261 static int
  262 run_filter(bus_dma_tag_t dmat, bus_addr_t paddr)
  263 {
  264         int retval;
  265 
  266         retval = 0;
  267 
  268         do {
  269                 if (((paddr > dmat->lowaddr && paddr <= dmat->highaddr)
  270                  || ((paddr & (dmat->alignment - 1)) != 0))
  271                  && (dmat->filter == NULL
  272                   || (*dmat->filter)(dmat->filterarg, paddr) != 0))
  273                         retval = 1;
  274 
  275                 dmat = dmat->parent;
  276         } while (retval == 0 && dmat != NULL);
  277         return (retval);
  278 }
  279 
  280 /*
  281  * Check to see if the specified page is in an allowed DMA range.
  282  */
  283 
  284 static __inline int
  285 _bus_dma_can_bounce(vm_offset_t lowaddr, vm_offset_t highaddr)
  286 {
  287         int i;
  288         for (i = 0; phys_avail[i] && phys_avail[i + 1]; i += 2) {
  289                 if ((lowaddr >= phys_avail[i] && lowaddr <= phys_avail[i + 1])
  290                     || (lowaddr < phys_avail[i] &&
  291                     highaddr > phys_avail[i]))
  292                         return (1);
  293         }
  294         return (0);
  295 }
  296 
  297 /*
  298  * Convenience function for manipulating driver locks from busdma (during
  299  * busdma_swi, for example).  Drivers that don't provide their own locks
  300  * should specify &Giant to dmat->lockfuncarg.  Drivers that use their own
  301  * non-mutex locking scheme don't have to use this at all.
  302  */
  303 void
  304 busdma_lock_mutex(void *arg, bus_dma_lock_op_t op)
  305 {
  306         struct mtx *dmtx;
  307 
  308         dmtx = (struct mtx *)arg;
  309         switch (op) {
  310         case BUS_DMA_LOCK:
  311                 mtx_lock(dmtx);
  312                 break;
  313         case BUS_DMA_UNLOCK:
  314                 mtx_unlock(dmtx);
  315                 break;
  316         default:
  317                 panic("Unknown operation 0x%x for busdma_lock_mutex!", op);
  318         }
  319 }
  320 
  321 /*
  322  * dflt_lock should never get called.  It gets put into the dma tag when
  323  * lockfunc == NULL, which is only valid if the maps that are associated
  324  * with the tag are meant to never be defered.
  325  * XXX Should have a way to identify which driver is responsible here.
  326  */
  327 static void
  328 dflt_lock(void *arg, bus_dma_lock_op_t op)
  329 {
  330 #ifdef INVARIANTS
  331         panic("driver error: busdma dflt_lock called");
  332 #else
  333         printf("DRIVER_ERROR: busdma dflt_lock called\n");
  334 #endif
  335 }
  336 
  337 static __inline bus_dmamap_t
  338 _busdma_alloc_dmamap(bus_dma_tag_t dmat)
  339 {
  340         struct sync_list *slist;
  341         bus_dmamap_t map;
  342 
  343         slist = malloc(sizeof(*slist) * dmat->nsegments, M_BUSDMA, M_NOWAIT);
  344         if (slist == NULL)
  345                 return (NULL);
  346         map = uma_zalloc_arg(dmamap_zone, dmat, M_NOWAIT);
  347         if (map != NULL)
  348                 map->slist = slist;
  349         else
  350                 free(slist, M_BUSDMA);
  351         return (map);
  352 }
  353 
  354 static __inline void
  355 _busdma_free_dmamap(bus_dmamap_t map)
  356 {
  357 
  358         free(map->slist, M_BUSDMA);
  359         uma_zfree(dmamap_zone, map);
  360 }
  361 
  362 /*
  363  * Allocate a device specific dma_tag.
  364  */
  365 #define SEG_NB 1024
  366 
  367 int
  368 bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
  369     bus_addr_t boundary, bus_addr_t lowaddr,
  370     bus_addr_t highaddr, bus_dma_filter_t *filter,
  371     void *filterarg, bus_size_t maxsize, int nsegments,
  372     bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc,
  373     void *lockfuncarg, bus_dma_tag_t *dmat)
  374 {
  375         bus_dma_tag_t newtag;
  376         int error = 0;
  377         /* Return a NULL tag on failure */
  378         *dmat = NULL;
  379         if (!parent)
  380                 parent = mips_root_dma_tag;
  381 
  382         newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_BUSDMA, M_NOWAIT);
  383         if (newtag == NULL) {
  384                 CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d",
  385                     __func__, newtag, 0, error);
  386                 return (ENOMEM);
  387         }
  388 
  389         newtag->parent = parent;
  390         newtag->alignment = alignment;
  391         newtag->boundary = boundary;
  392         newtag->lowaddr = trunc_page((vm_offset_t)lowaddr) + (PAGE_SIZE - 1);
  393         newtag->highaddr = trunc_page((vm_offset_t)highaddr) + (PAGE_SIZE - 1);
  394         newtag->filter = filter;
  395         newtag->filterarg = filterarg;
  396         newtag->maxsize = maxsize;
  397         newtag->nsegments = nsegments;
  398         newtag->maxsegsz = maxsegsz;
  399         newtag->flags = flags;
  400         if (cpuinfo.cache_coherent_dma)
  401                 newtag->flags |= BUS_DMA_COHERENT;
  402         newtag->ref_count = 1; /* Count ourself */
  403         newtag->map_count = 0;
  404         if (lockfunc != NULL) {
  405                 newtag->lockfunc = lockfunc;
  406                 newtag->lockfuncarg = lockfuncarg;
  407         } else {
  408                 newtag->lockfunc = dflt_lock;
  409                 newtag->lockfuncarg = NULL;
  410         }
  411         newtag->segments = NULL;
  412 
  413         /*
  414          * Take into account any restrictions imposed by our parent tag
  415          */
  416         if (parent != NULL) {
  417                 newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr);
  418                 newtag->highaddr = MAX(parent->highaddr, newtag->highaddr);
  419                 if (newtag->boundary == 0)
  420                         newtag->boundary = parent->boundary;
  421                 else if (parent->boundary != 0)
  422                         newtag->boundary =
  423                             MIN(parent->boundary, newtag->boundary);
  424                 if ((newtag->filter != NULL) ||
  425                     ((parent->flags & BUS_DMA_COULD_BOUNCE) != 0))
  426                         newtag->flags |= BUS_DMA_COULD_BOUNCE;
  427                 if (newtag->filter == NULL) {
  428                         /*
  429                         * Short circuit looking at our parent directly
  430                         * since we have encapsulated all of its information
  431                         */
  432                         newtag->filter = parent->filter;
  433                         newtag->filterarg = parent->filterarg;
  434                         newtag->parent = parent->parent;
  435                 }
  436                 if (newtag->parent != NULL)
  437                         atomic_add_int(&parent->ref_count, 1);
  438         }
  439         if (_bus_dma_can_bounce(newtag->lowaddr, newtag->highaddr)
  440          || newtag->alignment > 1)
  441                 newtag->flags |= BUS_DMA_COULD_BOUNCE;
  442 
  443         if (((newtag->flags & BUS_DMA_COULD_BOUNCE) != 0) &&
  444             (flags & BUS_DMA_ALLOCNOW) != 0) {
  445                 struct bounce_zone *bz;
  446 
  447                 /* Must bounce */
  448 
  449                 if ((error = alloc_bounce_zone(newtag)) != 0) {
  450                         free(newtag, M_BUSDMA);
  451                         return (error);
  452                 }
  453                 bz = newtag->bounce_zone;
  454 
  455                 if (ptoa(bz->total_bpages) < maxsize) {
  456                         int pages;
  457 
  458                         pages = atop(maxsize) - bz->total_bpages;
  459 
  460                         /* Add pages to our bounce pool */
  461                         if (alloc_bounce_pages(newtag, pages) < pages)
  462                                 error = ENOMEM;
  463                 }
  464                 /* Performed initial allocation */
  465                 newtag->flags |= BUS_DMA_MIN_ALLOC_COMP;
  466         } else
  467                 newtag->bounce_zone = NULL;
  468         if (error != 0)
  469                 free(newtag, M_BUSDMA);
  470         else
  471                 *dmat = newtag;
  472         CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d",
  473             __func__, newtag, (newtag != NULL ? newtag->flags : 0), error);
  474 
  475         return (error);
  476 }
  477 
  478 int
  479 bus_dma_tag_set_domain(bus_dma_tag_t dmat, int domain)
  480 {
  481 
  482         return (0);
  483 }
  484 
  485 int
  486 bus_dma_tag_destroy(bus_dma_tag_t dmat)
  487 {
  488 #ifdef KTR
  489         bus_dma_tag_t dmat_copy = dmat;
  490 #endif
  491 
  492         if (dmat != NULL) {
  493                 if (dmat->map_count != 0)
  494                         return (EBUSY);
  495 
  496                 while (dmat != NULL) {
  497                         bus_dma_tag_t parent;
  498 
  499                         parent = dmat->parent;
  500                         atomic_subtract_int(&dmat->ref_count, 1);
  501                         if (dmat->ref_count == 0) {
  502                                 if (dmat->segments != NULL)
  503                                         free(dmat->segments, M_BUSDMA);
  504                                 free(dmat, M_BUSDMA);
  505                                 /*
  506                                  * Last reference count, so
  507                                  * release our reference
  508                                  * count on our parent.
  509                                  */
  510                                 dmat = parent;
  511                         } else
  512                                 dmat = NULL;
  513                 }
  514         }
  515         CTR2(KTR_BUSDMA, "%s tag %p", __func__, dmat_copy);
  516 
  517         return (0);
  518 }
  519 
  520 #include <sys/kdb.h>
  521 /*
  522  * Allocate a handle for mapping from kva/uva/physical
  523  * address space into bus device space.
  524  */
  525 int
  526 bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
  527 {
  528         bus_dmamap_t newmap;
  529         int error = 0;
  530 
  531         if (dmat->segments == NULL) {
  532                 dmat->segments = (bus_dma_segment_t *)malloc(
  533                     sizeof(bus_dma_segment_t) * dmat->nsegments, M_BUSDMA,
  534                     M_NOWAIT);
  535                 if (dmat->segments == NULL) {
  536                         CTR3(KTR_BUSDMA, "%s: tag %p error %d",
  537                             __func__, dmat, ENOMEM);
  538                         return (ENOMEM);
  539                 }
  540         }
  541 
  542         newmap = _busdma_alloc_dmamap(dmat);
  543         if (newmap == NULL) {
  544                 CTR3(KTR_BUSDMA, "%s: tag %p error %d", __func__, dmat, ENOMEM);
  545                 return (ENOMEM);
  546         }
  547         *mapp = newmap;
  548 
  549         /*
  550          * Bouncing might be required if the driver asks for an active
  551          * exclusion region, a data alignment that is stricter than 1, and/or
  552          * an active address boundary.
  553          */
  554         if (dmat->flags & BUS_DMA_COULD_BOUNCE) {
  555 
  556                 /* Must bounce */
  557                 struct bounce_zone *bz;
  558                 int maxpages;
  559 
  560                 if (dmat->bounce_zone == NULL) {
  561                         if ((error = alloc_bounce_zone(dmat)) != 0) {
  562                                 _busdma_free_dmamap(newmap);
  563                                 *mapp = NULL;
  564                                 return (error);
  565                         }
  566                 }
  567                 bz = dmat->bounce_zone;
  568 
  569                 /* Initialize the new map */
  570                 STAILQ_INIT(&((*mapp)->bpages));
  571 
  572                 /*
  573                  * Attempt to add pages to our pool on a per-instance
  574                  * basis up to a sane limit.
  575                  */
  576                 maxpages = MAX_BPAGES;
  577                 if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0
  578                  || (bz->map_count > 0 && bz->total_bpages < maxpages)) {
  579                         int pages;
  580 
  581                         pages = MAX(atop(dmat->maxsize), 1);
  582                         pages = MIN(maxpages - bz->total_bpages, pages);
  583                         pages = MAX(pages, 1);
  584                         if (alloc_bounce_pages(dmat, pages) < pages)
  585                                 error = ENOMEM;
  586 
  587                         if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0) {
  588                                 if (error == 0)
  589                                         dmat->flags |= BUS_DMA_MIN_ALLOC_COMP;
  590                         } else {
  591                                 error = 0;
  592                         }
  593                 }
  594                 bz->map_count++;
  595         }
  596 
  597         CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
  598             __func__, dmat, dmat->flags, error);
  599 
  600         return (0);
  601 }
  602 
  603 /*
  604  * Destroy a handle for mapping from kva/uva/physical
  605  * address space into bus device space.
  606  */
  607 int
  608 bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
  609 {
  610 
  611         if (STAILQ_FIRST(&map->bpages) != NULL || map->sync_count != 0) {
  612                 CTR3(KTR_BUSDMA, "%s: tag %p error %d",
  613                     __func__, dmat, EBUSY);
  614                 return (EBUSY);
  615         }
  616         if (dmat->bounce_zone)
  617                 dmat->bounce_zone->map_count--;
  618         _busdma_free_dmamap(map);
  619         CTR2(KTR_BUSDMA, "%s: tag %p error 0", __func__, dmat);
  620         return (0);
  621 }
  622 
  623 /*
  624  * Allocate a piece of memory that can be efficiently mapped into
  625  * bus device space based on the constraints lited in the dma tag.
  626  * A dmamap to for use with dmamap_load is also allocated.
  627  */
  628 int
  629 bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddrp, int flags,
  630     bus_dmamap_t *mapp)
  631 {
  632         bus_dmamap_t newmap = NULL;
  633         busdma_bufalloc_t ba;
  634         struct busdma_bufzone *bufzone;
  635         vm_memattr_t memattr;
  636         void *vaddr;
  637 
  638         int mflags;
  639 
  640         if (flags & BUS_DMA_NOWAIT)
  641                 mflags = M_NOWAIT;
  642         else
  643                 mflags = M_WAITOK;
  644         if (dmat->segments == NULL) {
  645                 dmat->segments = (bus_dma_segment_t *)malloc(
  646                     sizeof(bus_dma_segment_t) * dmat->nsegments, M_BUSDMA,
  647                     mflags);
  648                 if (dmat->segments == NULL) {
  649                         CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
  650                             __func__, dmat, dmat->flags, ENOMEM);
  651                         return (ENOMEM);
  652                 }
  653         }
  654 
  655         newmap = _busdma_alloc_dmamap(dmat);
  656         if (newmap == NULL) {
  657                 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
  658                     __func__, dmat, dmat->flags, ENOMEM);
  659                 return (ENOMEM);
  660         }
  661 
  662         /*
  663          * If all the memory is coherent with DMA then we don't need to
  664          * do anything special for a coherent mapping request.
  665          */
  666         if (dmat->flags & BUS_DMA_COHERENT)
  667             flags &= ~BUS_DMA_COHERENT;
  668 
  669         if (flags & BUS_DMA_COHERENT) {
  670                 memattr = VM_MEMATTR_UNCACHEABLE;
  671                 ba = coherent_allocator;
  672                 newmap->flags |= DMAMAP_UNCACHEABLE;
  673         } else {
  674                 memattr = VM_MEMATTR_DEFAULT;
  675                 ba = standard_allocator;
  676         }
  677         /* All buffers we allocate are cache-aligned. */
  678         newmap->flags |= DMAMAP_CACHE_ALIGNED;
  679 
  680         if (flags & BUS_DMA_ZERO)
  681                 mflags |= M_ZERO;
  682 
  683         /*
  684          * Try to find a bufzone in the allocator that holds a cache of buffers
  685          * of the right size for this request.  If the buffer is too big to be
  686          * held in the allocator cache, this returns NULL.
  687          */
  688         bufzone = busdma_bufalloc_findzone(ba, dmat->maxsize);
  689 
  690         /*
  691          * Allocate the buffer from the uma(9) allocator if...
  692          *  - It's small enough to be in the allocator (bufzone not NULL).
  693          *  - The alignment constraint isn't larger than the allocation size
  694          *    (the allocator aligns buffers to their size boundaries).
  695          *  - There's no need to handle lowaddr/highaddr exclusion zones.
  696          * else allocate non-contiguous pages if...
  697          *  - The page count that could get allocated doesn't exceed
  698          *    nsegments also when the maximum segment size is less
  699          *    than PAGE_SIZE.
  700          *  - The alignment constraint isn't larger than a page boundary.
  701          *  - There are no boundary-crossing constraints.
  702          * else allocate a block of contiguous pages because one or more of the
  703          * constraints is something that only the contig allocator can fulfill.
  704          */
  705         if (bufzone != NULL && dmat->alignment <= bufzone->size &&
  706             !_bus_dma_can_bounce(dmat->lowaddr, dmat->highaddr)) {
  707                 vaddr = uma_zalloc(bufzone->umazone, mflags);
  708         } else if (dmat->nsegments >=
  709             howmany(dmat->maxsize, MIN(dmat->maxsegsz, PAGE_SIZE)) &&
  710             dmat->alignment <= PAGE_SIZE &&
  711             (dmat->boundary % PAGE_SIZE) == 0) {
  712                 vaddr = (void *)kmem_alloc_attr(dmat->maxsize, mflags, 0,
  713                     dmat->lowaddr, memattr);
  714         } else {
  715                 vaddr = (void *)kmem_alloc_contig(dmat->maxsize, mflags, 0,
  716                     dmat->lowaddr, dmat->alignment, dmat->boundary, memattr);
  717         }
  718         if (vaddr == NULL) {
  719                 _busdma_free_dmamap(newmap);
  720                 newmap = NULL;
  721         } else {
  722                 newmap->sync_count = 0;
  723         }
  724         *vaddrp = vaddr;
  725         *mapp = newmap;
  726 
  727         return (vaddr == NULL ? ENOMEM : 0);
  728 }
  729 
  730 /*
  731  * Free a piece of memory and it's allocated dmamap, that was allocated
  732  * via bus_dmamem_alloc.  Make the same choice for free/contigfree.
  733  */
  734 void
  735 bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
  736 {
  737         struct busdma_bufzone *bufzone;
  738         busdma_bufalloc_t ba;
  739 
  740         if (map->flags & DMAMAP_UNCACHEABLE)
  741                 ba = coherent_allocator;
  742         else
  743                 ba = standard_allocator;
  744 
  745         free(map->slist, M_BUSDMA);
  746         uma_zfree(dmamap_zone, map);
  747 
  748         bufzone = busdma_bufalloc_findzone(ba, dmat->maxsize);
  749 
  750         if (bufzone != NULL && dmat->alignment <= bufzone->size &&
  751             !_bus_dma_can_bounce(dmat->lowaddr, dmat->highaddr))
  752                 uma_zfree(bufzone->umazone, vaddr);
  753         else
  754                 kmem_free((vm_offset_t)vaddr, dmat->maxsize);
  755         CTR3(KTR_BUSDMA, "%s: tag %p flags 0x%x", __func__, dmat, dmat->flags);
  756 }
  757 
  758 static void
  759 _bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf,
  760     bus_size_t buflen, int flags)
  761 {
  762         bus_addr_t curaddr;
  763         bus_size_t sgsize;
  764 
  765         if (map->pagesneeded == 0) {
  766                 CTR3(KTR_BUSDMA, "lowaddr= %d, boundary= %d, alignment= %d",
  767                     dmat->lowaddr, dmat->boundary, dmat->alignment);
  768                 CTR2(KTR_BUSDMA, "map= %p, pagesneeded= %d",
  769                     map, map->pagesneeded);
  770                 /*
  771                  * Count the number of bounce pages
  772                  * needed in order to complete this transfer
  773                  */
  774                 curaddr = buf;
  775                 while (buflen != 0) {
  776                         sgsize = MIN(buflen, dmat->maxsegsz);
  777                         if (run_filter(dmat, curaddr) != 0) {
  778                                 sgsize = MIN(sgsize, PAGE_SIZE);
  779                                 map->pagesneeded++;
  780                         }
  781                         curaddr += sgsize;
  782                         buflen -= sgsize;
  783                 }
  784                 CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded);
  785         }
  786 }
  787 
  788 static void
  789 _bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, pmap_t pmap,
  790     void *buf, bus_size_t buflen, int flags)
  791 {
  792         vm_offset_t vaddr;
  793         vm_offset_t vendaddr;
  794         bus_addr_t paddr;
  795 
  796         if (map->pagesneeded == 0) {
  797                 CTR3(KTR_BUSDMA, "lowaddr= %d, boundary= %d, alignment= %d",
  798                     dmat->lowaddr, dmat->boundary, dmat->alignment);
  799                 CTR2(KTR_BUSDMA, "map= %p, pagesneeded= %d",
  800                     map, map->pagesneeded);
  801                 /*
  802                  * Count the number of bounce pages
  803                  * needed in order to complete this transfer
  804                  */
  805                 vaddr = (vm_offset_t)buf;
  806                 vendaddr = (vm_offset_t)buf + buflen;
  807 
  808                 while (vaddr < vendaddr) {
  809                         bus_size_t sg_len;
  810 
  811                         KASSERT(kernel_pmap == pmap, ("pmap is not kernel pmap"));
  812                         sg_len = PAGE_SIZE - ((vm_offset_t)vaddr & PAGE_MASK);
  813                         paddr = pmap_kextract(vaddr);
  814                         if (((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) &&
  815                             run_filter(dmat, paddr) != 0) {
  816                                 sg_len = roundup2(sg_len, dmat->alignment);
  817                                 map->pagesneeded++;
  818                         }
  819                         vaddr += sg_len;
  820                 }
  821                 CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded);
  822         }
  823 }
  824 
  825 static int
  826 _bus_dmamap_reserve_pages(bus_dma_tag_t dmat, bus_dmamap_t map,int flags)
  827 {
  828 
  829         /* Reserve Necessary Bounce Pages */
  830         mtx_lock(&bounce_lock);
  831         if (flags & BUS_DMA_NOWAIT) {
  832                 if (reserve_bounce_pages(dmat, map, 0) != 0) {
  833                         mtx_unlock(&bounce_lock);
  834                         return (ENOMEM);
  835                 }
  836         } else {
  837                 if (reserve_bounce_pages(dmat, map, 1) != 0) {
  838                         /* Queue us for resources */
  839                         STAILQ_INSERT_TAIL(&bounce_map_waitinglist,
  840                             map, links);
  841                         mtx_unlock(&bounce_lock);
  842                         return (EINPROGRESS);
  843                 }
  844         }
  845         mtx_unlock(&bounce_lock);
  846 
  847         return (0);
  848 }
  849 
  850 /*
  851  * Add a single contiguous physical range to the segment list.
  852  */
  853 static int
  854 _bus_dmamap_addseg(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t curaddr,
  855     bus_size_t sgsize, bus_dma_segment_t *segs, int *segp)
  856 {
  857         bus_addr_t baddr, bmask;
  858         int seg;
  859 
  860         /*
  861          * Make sure we don't cross any boundaries.
  862          */
  863         bmask = ~(dmat->boundary - 1);
  864         if (dmat->boundary > 0) {
  865                 baddr = (curaddr + dmat->boundary) & bmask;
  866                 if (sgsize > (baddr - curaddr))
  867                         sgsize = (baddr - curaddr);
  868         }
  869         /*
  870          * Insert chunk into a segment, coalescing with
  871          * the previous segment if possible.
  872          */
  873         seg = *segp;
  874         if (seg >= 0 &&
  875             curaddr == segs[seg].ds_addr + segs[seg].ds_len &&
  876             (segs[seg].ds_len + sgsize) <= dmat->maxsegsz &&
  877             (dmat->boundary == 0 ||
  878              (segs[seg].ds_addr & bmask) == (curaddr & bmask))) {
  879                 segs[seg].ds_len += sgsize;
  880         } else {
  881                 if (++seg >= dmat->nsegments)
  882                         return (0);
  883                 segs[seg].ds_addr = curaddr;
  884                 segs[seg].ds_len = sgsize;
  885         }
  886         *segp = seg;
  887         return (sgsize);
  888 }
  889 
  890 /*
  891  * Utility function to load a physical buffer.  segp contains
  892  * the starting segment on entrace, and the ending segment on exit.
  893  */
  894 int
  895 _bus_dmamap_load_phys(bus_dma_tag_t dmat, bus_dmamap_t map,
  896     vm_paddr_t buf, bus_size_t buflen, int flags, bus_dma_segment_t *segs,
  897     int *segp)
  898 {
  899         bus_addr_t curaddr;
  900         bus_size_t sgsize;
  901         int error;
  902 
  903         if (segs == NULL)
  904                 segs = dmat->segments;
  905 
  906         if ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) {
  907                 _bus_dmamap_count_phys(dmat, map, buf, buflen, flags);
  908                 if (map->pagesneeded != 0) {
  909                         error = _bus_dmamap_reserve_pages(dmat, map, flags);
  910                         if (error)
  911                                 return (error);
  912                 }
  913         }
  914 
  915         while (buflen > 0) {
  916                 curaddr = buf;
  917                 sgsize = MIN(buflen, dmat->maxsegsz);
  918                 if (((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) &&
  919                     map->pagesneeded != 0 && run_filter(dmat, curaddr)) {
  920                         sgsize = MIN(sgsize, PAGE_SIZE);
  921                         curaddr = add_bounce_page(dmat, map, 0, curaddr,
  922                             sgsize);
  923                 }
  924                 sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs,
  925                     segp);
  926                 if (sgsize == 0)
  927                         break;
  928                 buf += sgsize;
  929                 buflen -= sgsize;
  930         }
  931 
  932         /*
  933          * Did we fit?
  934          */
  935         if (buflen != 0) {
  936                 bus_dmamap_unload(dmat, map);
  937                 return (EFBIG); /* XXX better return value here? */
  938         }
  939         return (0);
  940 }
  941 
  942 int
  943 _bus_dmamap_load_ma(bus_dma_tag_t dmat, bus_dmamap_t map,
  944     struct vm_page **ma, bus_size_t tlen, int ma_offs, int flags,
  945     bus_dma_segment_t *segs, int *segp)
  946 {
  947 
  948         return (bus_dmamap_load_ma_triv(dmat, map, ma, tlen, ma_offs, flags,
  949             segs, segp));
  950 }
  951 
  952 /*
  953  * Utility function to load a linear buffer.  segp contains
  954  * the starting segment on entrance, and the ending segment on exit.
  955  * first indicates if this is the first invocation of this function.
  956  */
  957 int
  958 _bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
  959     bus_size_t buflen, struct pmap *pmap, int flags, bus_dma_segment_t *segs,
  960     int *segp)
  961 {
  962         bus_size_t sgsize;
  963         bus_addr_t curaddr;
  964         struct sync_list *sl;
  965         vm_offset_t vaddr = (vm_offset_t)buf;
  966         int error = 0;
  967 
  968 
  969         if (segs == NULL)
  970                 segs = dmat->segments;
  971         if ((flags & BUS_DMA_LOAD_MBUF) != 0)
  972                 map->flags |= DMAMAP_CACHE_ALIGNED;
  973 
  974         if ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) {
  975                 _bus_dmamap_count_pages(dmat, map, pmap, buf, buflen, flags);
  976                 if (map->pagesneeded != 0) {
  977                         error = _bus_dmamap_reserve_pages(dmat, map, flags);
  978                         if (error)
  979                                 return (error);
  980                 }
  981         }
  982         CTR3(KTR_BUSDMA, "lowaddr= %d boundary= %d, "
  983             "alignment= %d", dmat->lowaddr, dmat->boundary, dmat->alignment);
  984 
  985         while (buflen > 0) {
  986                 /*
  987                  * Get the physical address for this segment.
  988                  *
  989                  * XXX Don't support checking for coherent mappings
  990                  * XXX in user address space.
  991                  */
  992                 KASSERT(kernel_pmap == pmap, ("pmap is not kernel pmap"));
  993                 curaddr = pmap_kextract(vaddr);
  994 
  995                 /*
  996                  * Compute the segment size, and adjust counts.
  997                  */
  998                 sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK);
  999                 if (sgsize > dmat->maxsegsz)
 1000                         sgsize = dmat->maxsegsz;
 1001                 if (buflen < sgsize)
 1002                         sgsize = buflen;
 1003 
 1004                 if (((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) &&
 1005                     map->pagesneeded != 0 && run_filter(dmat, curaddr)) {
 1006                         curaddr = add_bounce_page(dmat, map, vaddr, curaddr,
 1007                             sgsize);
 1008                 } else {
 1009                         sl = &map->slist[map->sync_count - 1];
 1010                         if (map->sync_count == 0 ||
 1011                             vaddr != sl->vaddr + sl->datacount) {
 1012                                 if (++map->sync_count > dmat->nsegments)
 1013                                         goto cleanup;
 1014                                 sl++;
 1015                                 sl->vaddr = vaddr;
 1016                                 sl->datacount = sgsize;
 1017                                 sl->busaddr = curaddr;
 1018                         } else
 1019                                 sl->datacount += sgsize;
 1020                 }
 1021                 sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs,
 1022                     segp);
 1023                 if (sgsize == 0)
 1024                         break;
 1025                 vaddr += sgsize;
 1026                 buflen -= sgsize;
 1027         }
 1028 
 1029 cleanup:
 1030         /*
 1031          * Did we fit?
 1032          */
 1033         if (buflen != 0) {
 1034                 bus_dmamap_unload(dmat, map);
 1035                 error = EFBIG; /* XXX better return value here? */
 1036         }
 1037         return (error);
 1038 }
 1039 
 1040 void
 1041 _bus_dmamap_waitok(bus_dma_tag_t dmat, bus_dmamap_t map,
 1042     struct memdesc *mem, bus_dmamap_callback_t *callback, void *callback_arg)
 1043 {
 1044 
 1045         KASSERT(dmat != NULL, ("dmatag is NULL"));
 1046         KASSERT(map != NULL, ("dmamap is NULL"));
 1047         map->mem = *mem;
 1048         map->callback = callback;
 1049         map->callback_arg = callback_arg;
 1050 }
 1051 
 1052 bus_dma_segment_t *
 1053 _bus_dmamap_complete(bus_dma_tag_t dmat, bus_dmamap_t map,
 1054     bus_dma_segment_t *segs, int nsegs, int error)
 1055 {
 1056 
 1057         if (segs == NULL)
 1058                 segs = dmat->segments;
 1059         return (segs);
 1060 }
 1061 
 1062 /*
 1063  * Release the mapping held by map.
 1064  */
 1065 void
 1066 bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
 1067 {
 1068         struct bounce_page *bpage;
 1069 
 1070         while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
 1071                 STAILQ_REMOVE_HEAD(&map->bpages, links);
 1072                 free_bounce_page(dmat, bpage);
 1073         }
 1074         map->sync_count = 0;
 1075         return;
 1076 }
 1077 
 1078 static void
 1079 bus_dmamap_sync_buf(vm_offset_t buf, int len, bus_dmasync_op_t op, int aligned)
 1080 {
 1081         char tmp_cl[mips_dcache_max_linesize], tmp_clend[mips_dcache_max_linesize];
 1082         vm_offset_t buf_cl, buf_clend;
 1083         vm_size_t size_cl, size_clend;
 1084         int cache_linesize_mask = mips_dcache_max_linesize - 1;
 1085 
 1086         /*
 1087          * dcache invalidation operates on cache line aligned addresses
 1088          * and could modify areas of memory that share the same cache line
 1089          * at the beginning and the ending of the buffer. In order to
 1090          * prevent a data loss we save these chunks in temporary buffer
 1091          * before invalidation and restore them afer it.
 1092          *
 1093          * If the aligned flag is set the buffer is either an mbuf or came from
 1094          * our allocator caches.  In both cases they are always sized and
 1095          * aligned to cacheline boundaries, so we can skip preserving nearby
 1096          * data if a transfer appears to overlap cachelines.  An mbuf in
 1097          * particular will usually appear to be overlapped because of offsetting
 1098          * within the buffer to align the L3 headers, but we know that the bytes
 1099          * preceeding that offset are part of the same mbuf memory and are not
 1100          * unrelated adjacent data (and a rule of mbuf handling is that the cpu
 1101          * is not allowed to touch the mbuf while dma is in progress, including
 1102          * header fields).
 1103          */
 1104         if (aligned) {
 1105                 size_cl = 0;
 1106                 size_clend = 0;
 1107         } else {
 1108                 buf_cl = buf & ~cache_linesize_mask;
 1109                 size_cl = buf & cache_linesize_mask;
 1110                 buf_clend = buf + len;
 1111                 size_clend = (mips_dcache_max_linesize -
 1112                     (buf_clend & cache_linesize_mask)) & cache_linesize_mask;
 1113         }
 1114 
 1115         switch (op) {
 1116         case BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE:
 1117         case BUS_DMASYNC_POSTREAD:
 1118 
 1119                 /*
 1120                  * Save buffers that might be modified by invalidation
 1121                  */
 1122                 if (size_cl)
 1123                         memcpy (tmp_cl, (void*)buf_cl, size_cl);
 1124                 if (size_clend)
 1125                         memcpy (tmp_clend, (void*)buf_clend, size_clend);
 1126                 mips_dcache_inv_range(buf, len);
 1127                 /*
 1128                  * Restore them
 1129                  */
 1130                 if (size_cl)
 1131                         memcpy ((void*)buf_cl, tmp_cl, size_cl);
 1132                 if (size_clend)
 1133                         memcpy ((void*)buf_clend, tmp_clend, size_clend);
 1134                 /*
 1135                  * Copies above have brought corresponding memory
 1136                  * cache lines back into dirty state. Write them back
 1137                  * out and invalidate affected cache lines again if
 1138                  * necessary.
 1139                  */
 1140                 if (size_cl)
 1141                         mips_dcache_wbinv_range(buf_cl, size_cl);
 1142                 if (size_clend && (size_cl == 0 ||
 1143                     buf_clend - buf_cl > mips_dcache_max_linesize))
 1144                         mips_dcache_wbinv_range(buf_clend, size_clend);
 1145                 break;
 1146 
 1147         case BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE:
 1148                 mips_dcache_wbinv_range(buf, len);
 1149                 break;
 1150 
 1151         case BUS_DMASYNC_PREREAD:
 1152                 /*
 1153                  * Save buffers that might be modified by invalidation
 1154                  */
 1155                 if (size_cl)
 1156                         memcpy (tmp_cl, (void *)buf_cl, size_cl);
 1157                 if (size_clend)
 1158                         memcpy (tmp_clend, (void *)buf_clend, size_clend);
 1159                 mips_dcache_inv_range(buf, len);
 1160                 /*
 1161                  * Restore them
 1162                  */
 1163                 if (size_cl)
 1164                         memcpy ((void *)buf_cl, tmp_cl, size_cl);
 1165                 if (size_clend)
 1166                         memcpy ((void *)buf_clend, tmp_clend, size_clend);
 1167                 /*
 1168                  * Copies above have brought corresponding memory
 1169                  * cache lines back into dirty state. Write them back
 1170                  * out and invalidate affected cache lines again if
 1171                  * necessary.
 1172                  */
 1173                 if (size_cl)
 1174                         mips_dcache_wbinv_range(buf_cl, size_cl);
 1175                 if (size_clend && (size_cl == 0 ||
 1176                     buf_clend - buf_cl > mips_dcache_max_linesize))
 1177                         mips_dcache_wbinv_range(buf_clend, size_clend);
 1178                 break;
 1179 
 1180         case BUS_DMASYNC_PREWRITE:
 1181 #ifdef BUS_DMA_FORCE_WBINV
 1182                 mips_dcache_wbinv_range(buf, len);
 1183 #else
 1184                 mips_dcache_wb_range(buf, len);
 1185 #endif
 1186                 break;
 1187         }
 1188 }
 1189 
 1190 static void
 1191 _bus_dmamap_sync_bp(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
 1192 {
 1193         struct bounce_page *bpage;
 1194 
 1195         STAILQ_FOREACH(bpage, &map->bpages, links) {
 1196                 if (op & BUS_DMASYNC_PREWRITE) {
 1197                         if (bpage->datavaddr != 0)
 1198                                 bcopy((void *)bpage->datavaddr,
 1199                                     (void *)(bpage->vaddr_nocache != 0 ?
 1200                                              bpage->vaddr_nocache :
 1201                                              bpage->vaddr),
 1202                                     bpage->datacount);
 1203                         else
 1204                                 physcopyout(bpage->dataaddr,
 1205                                     (void *)(bpage->vaddr_nocache != 0 ?
 1206                                              bpage->vaddr_nocache :
 1207                                              bpage->vaddr),
 1208                                     bpage->datacount);
 1209                         if (bpage->vaddr_nocache == 0) {
 1210 #ifdef BUS_DMA_FORCE_WBINV
 1211                                 mips_dcache_wbinv_range(bpage->vaddr,
 1212                                     bpage->datacount);
 1213 #else
 1214                                 mips_dcache_wb_range(bpage->vaddr,
 1215                                     bpage->datacount);
 1216 #endif
 1217                         }
 1218                         dmat->bounce_zone->total_bounced++;
 1219                 }
 1220                 if (op & BUS_DMASYNC_POSTREAD) {
 1221                         if (bpage->vaddr_nocache == 0) {
 1222                                 mips_dcache_inv_range(bpage->vaddr,
 1223                                     bpage->datacount);
 1224                         }
 1225                         if (bpage->datavaddr != 0)
 1226                                 bcopy((void *)(bpage->vaddr_nocache != 0 ?
 1227                                     bpage->vaddr_nocache : bpage->vaddr),
 1228                                     (void *)bpage->datavaddr, bpage->datacount);
 1229                         else
 1230                                 physcopyin((void *)(bpage->vaddr_nocache != 0 ?
 1231                                     bpage->vaddr_nocache : bpage->vaddr),
 1232                                     bpage->dataaddr, bpage->datacount);
 1233                         dmat->bounce_zone->total_bounced++;
 1234                 }
 1235         }
 1236 }
 1237 
 1238 void
 1239 bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
 1240 {
 1241         struct sync_list *sl, *end;
 1242         int aligned;
 1243 
 1244         if (op == BUS_DMASYNC_POSTWRITE)
 1245                 return;
 1246         if (STAILQ_FIRST(&map->bpages))
 1247                 _bus_dmamap_sync_bp(dmat, map, op);
 1248 
 1249         if ((dmat->flags & BUS_DMA_COHERENT) ||
 1250             (map->flags & DMAMAP_UNCACHEABLE)) {
 1251                 if (op & BUS_DMASYNC_PREWRITE)
 1252                         mips_sync();
 1253                 return;
 1254         }
 1255 
 1256         aligned = (map->flags & DMAMAP_CACHE_ALIGNED) ? 1 : 0;
 1257 
 1258         CTR3(KTR_BUSDMA, "%s: op %x flags %x", __func__, op, map->flags);
 1259         if (map->sync_count) {
 1260                 end = &map->slist[map->sync_count];
 1261                 for (sl = &map->slist[0]; sl != end; sl++)
 1262                         bus_dmamap_sync_buf(sl->vaddr, sl->datacount, op,
 1263                             aligned);
 1264         }
 1265 }
 1266 
 1267 static void
 1268 init_bounce_pages(void *dummy __unused)
 1269 {
 1270 
 1271         total_bpages = 0;
 1272         STAILQ_INIT(&bounce_zone_list);
 1273         STAILQ_INIT(&bounce_map_waitinglist);
 1274         STAILQ_INIT(&bounce_map_callbacklist);
 1275         mtx_init(&bounce_lock, "bounce pages lock", NULL, MTX_DEF);
 1276 }
 1277 SYSINIT(bpages, SI_SUB_LOCK, SI_ORDER_ANY, init_bounce_pages, NULL);
 1278 
 1279 static struct sysctl_ctx_list *
 1280 busdma_sysctl_tree(struct bounce_zone *bz)
 1281 {
 1282         return (&bz->sysctl_tree);
 1283 }
 1284 
 1285 static struct sysctl_oid *
 1286 busdma_sysctl_tree_top(struct bounce_zone *bz)
 1287 {
 1288         return (bz->sysctl_tree_top);
 1289 }
 1290 
 1291 static int
 1292 alloc_bounce_zone(bus_dma_tag_t dmat)
 1293 {
 1294         struct bounce_zone *bz;
 1295 
 1296         /* Check to see if we already have a suitable zone */
 1297         STAILQ_FOREACH(bz, &bounce_zone_list, links) {
 1298                 if ((dmat->alignment <= bz->alignment)
 1299                  && (dmat->lowaddr >= bz->lowaddr)) {
 1300                         dmat->bounce_zone = bz;
 1301                         return (0);
 1302                 }
 1303         }
 1304 
 1305         if ((bz = (struct bounce_zone *)malloc(sizeof(*bz), M_BUSDMA,
 1306             M_NOWAIT | M_ZERO)) == NULL)
 1307                 return (ENOMEM);
 1308 
 1309         STAILQ_INIT(&bz->bounce_page_list);
 1310         bz->free_bpages = 0;
 1311         bz->reserved_bpages = 0;
 1312         bz->active_bpages = 0;
 1313         bz->lowaddr = dmat->lowaddr;
 1314         bz->alignment = MAX(dmat->alignment, PAGE_SIZE);
 1315         bz->map_count = 0;
 1316         snprintf(bz->zoneid, 8, "zone%d", busdma_zonecount);
 1317         busdma_zonecount++;
 1318         snprintf(bz->lowaddrid, 18, "%#jx", (uintmax_t)bz->lowaddr);
 1319         STAILQ_INSERT_TAIL(&bounce_zone_list, bz, links);
 1320         dmat->bounce_zone = bz;
 1321 
 1322         sysctl_ctx_init(&bz->sysctl_tree);
 1323         bz->sysctl_tree_top = SYSCTL_ADD_NODE(&bz->sysctl_tree,
 1324             SYSCTL_STATIC_CHILDREN(_hw_busdma), OID_AUTO, bz->zoneid,
 1325             CTLFLAG_RD, 0, "");
 1326         if (bz->sysctl_tree_top == NULL) {
 1327                 sysctl_ctx_free(&bz->sysctl_tree);
 1328                 return (0);     /* XXX error code? */
 1329         }
 1330 
 1331         SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
 1332             SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
 1333             "total_bpages", CTLFLAG_RD, &bz->total_bpages, 0,
 1334             "Total bounce pages");
 1335         SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
 1336             SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
 1337             "free_bpages", CTLFLAG_RD, &bz->free_bpages, 0,
 1338             "Free bounce pages");
 1339         SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
 1340             SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
 1341             "reserved_bpages", CTLFLAG_RD, &bz->reserved_bpages, 0,
 1342             "Reserved bounce pages");
 1343         SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
 1344             SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
 1345             "active_bpages", CTLFLAG_RD, &bz->active_bpages, 0,
 1346             "Active bounce pages");
 1347         SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
 1348             SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
 1349             "total_bounced", CTLFLAG_RD, &bz->total_bounced, 0,
 1350             "Total bounce requests");
 1351         SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
 1352             SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
 1353             "total_deferred", CTLFLAG_RD, &bz->total_deferred, 0,
 1354             "Total bounce requests that were deferred");
 1355         SYSCTL_ADD_STRING(busdma_sysctl_tree(bz),
 1356             SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
 1357             "lowaddr", CTLFLAG_RD, bz->lowaddrid, 0, "");
 1358         SYSCTL_ADD_UAUTO(busdma_sysctl_tree(bz),
 1359             SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
 1360             "alignment", CTLFLAG_RD, &bz->alignment, "");
 1361 
 1362         return (0);
 1363 }
 1364 
 1365 static int
 1366 alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages)
 1367 {
 1368         struct bounce_zone *bz;
 1369         int count;
 1370 
 1371         bz = dmat->bounce_zone;
 1372         count = 0;
 1373         while (numpages > 0) {
 1374                 struct bounce_page *bpage;
 1375 
 1376                 bpage = (struct bounce_page *)malloc(sizeof(*bpage), M_BUSDMA,
 1377                                                      M_NOWAIT | M_ZERO);
 1378 
 1379                 if (bpage == NULL)
 1380                         break;
 1381                 bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_BOUNCE,
 1382                                                          M_NOWAIT, 0ul,
 1383                                                          bz->lowaddr,
 1384                                                          PAGE_SIZE,
 1385                                                          0);
 1386                 if (bpage->vaddr == 0) {
 1387                         free(bpage, M_BUSDMA);
 1388                         break;
 1389                 }
 1390                 bpage->busaddr = pmap_kextract(bpage->vaddr);
 1391                 bpage->vaddr_nocache =
 1392                     (vm_offset_t)pmap_mapdev(bpage->busaddr, PAGE_SIZE);
 1393                 mtx_lock(&bounce_lock);
 1394                 STAILQ_INSERT_TAIL(&bz->bounce_page_list, bpage, links);
 1395                 total_bpages++;
 1396                 bz->total_bpages++;
 1397                 bz->free_bpages++;
 1398                 mtx_unlock(&bounce_lock);
 1399                 count++;
 1400                 numpages--;
 1401         }
 1402         return (count);
 1403 }
 1404 
 1405 static int
 1406 reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit)
 1407 {
 1408         struct bounce_zone *bz;
 1409         int pages;
 1410 
 1411         mtx_assert(&bounce_lock, MA_OWNED);
 1412         bz = dmat->bounce_zone;
 1413         pages = MIN(bz->free_bpages, map->pagesneeded - map->pagesreserved);
 1414         if (commit == 0 && map->pagesneeded > (map->pagesreserved + pages))
 1415                 return (map->pagesneeded - (map->pagesreserved + pages));
 1416         bz->free_bpages -= pages;
 1417         bz->reserved_bpages += pages;
 1418         map->pagesreserved += pages;
 1419         pages = map->pagesneeded - map->pagesreserved;
 1420 
 1421         return (pages);
 1422 }
 1423 
 1424 static bus_addr_t
 1425 add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr,
 1426                 bus_addr_t addr, bus_size_t size)
 1427 {
 1428         struct bounce_zone *bz;
 1429         struct bounce_page *bpage;
 1430 
 1431         KASSERT(dmat->bounce_zone != NULL, ("no bounce zone in dma tag"));
 1432         KASSERT(map != NULL, ("add_bounce_page: bad map %p", map));
 1433 
 1434         bz = dmat->bounce_zone;
 1435         if (map->pagesneeded == 0)
 1436                 panic("add_bounce_page: map doesn't need any pages");
 1437         map->pagesneeded--;
 1438 
 1439         if (map->pagesreserved == 0)
 1440                 panic("add_bounce_page: map doesn't need any pages");
 1441         map->pagesreserved--;
 1442 
 1443         mtx_lock(&bounce_lock);
 1444         bpage = STAILQ_FIRST(&bz->bounce_page_list);
 1445         if (bpage == NULL)
 1446                 panic("add_bounce_page: free page list is empty");
 1447 
 1448         STAILQ_REMOVE_HEAD(&bz->bounce_page_list, links);
 1449         bz->reserved_bpages--;
 1450         bz->active_bpages++;
 1451         mtx_unlock(&bounce_lock);
 1452 
 1453         if (dmat->flags & BUS_DMA_KEEP_PG_OFFSET) {
 1454                 /* Page offset needs to be preserved. */
 1455                 bpage->vaddr |= addr & PAGE_MASK;
 1456                 bpage->busaddr |= addr & PAGE_MASK;
 1457         }
 1458         bpage->datavaddr = vaddr;
 1459         bpage->dataaddr = addr;
 1460         bpage->datacount = size;
 1461         STAILQ_INSERT_TAIL(&(map->bpages), bpage, links);
 1462         return (bpage->busaddr);
 1463 }
 1464 
 1465 static void
 1466 free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage)
 1467 {
 1468         struct bus_dmamap *map;
 1469         struct bounce_zone *bz;
 1470 
 1471         bz = dmat->bounce_zone;
 1472         bpage->datavaddr = 0;
 1473         bpage->datacount = 0;
 1474         if (dmat->flags & BUS_DMA_KEEP_PG_OFFSET) {
 1475                 /*
 1476                  * Reset the bounce page to start at offset 0.  Other uses
 1477                  * of this bounce page may need to store a full page of
 1478                  * data and/or assume it starts on a page boundary.
 1479                  */
 1480                 bpage->vaddr &= ~PAGE_MASK;
 1481                 bpage->busaddr &= ~PAGE_MASK;
 1482         }
 1483 
 1484         mtx_lock(&bounce_lock);
 1485         STAILQ_INSERT_HEAD(&bz->bounce_page_list, bpage, links);
 1486         bz->free_bpages++;
 1487         bz->active_bpages--;
 1488         if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) {
 1489                 if (reserve_bounce_pages(map->dmat, map, 1) == 0) {
 1490                         STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links);
 1491                         STAILQ_INSERT_TAIL(&bounce_map_callbacklist,
 1492                                            map, links);
 1493                         busdma_swi_pending = 1;
 1494                         bz->total_deferred++;
 1495                         swi_sched(vm_ih, 0);
 1496                 }
 1497         }
 1498         mtx_unlock(&bounce_lock);
 1499 }
 1500 
 1501 void
 1502 busdma_swi(void)
 1503 {
 1504         bus_dma_tag_t dmat;
 1505         struct bus_dmamap *map;
 1506 
 1507         mtx_lock(&bounce_lock);
 1508         while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) {
 1509                 STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links);
 1510                 mtx_unlock(&bounce_lock);
 1511                 dmat = map->dmat;
 1512                 (dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_LOCK);
 1513                 bus_dmamap_load_mem(map->dmat, map, &map->mem, map->callback,
 1514                     map->callback_arg, BUS_DMA_WAITOK);
 1515                 (dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_UNLOCK);
 1516                 mtx_lock(&bounce_lock);
 1517         }
 1518         mtx_unlock(&bounce_lock);
 1519 }

Cache object: 7ae3d821a9e59c12284e30858e1bfe0a


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.