The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/powerpc/powerpc/busdma_machdep.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
    3  *
    4  * Copyright (c) 1997, 1998 Justin T. Gibbs.
    5  * All rights reserved.
    6  *
    7  * Redistribution and use in source and binary forms, with or without
    8  * modification, are permitted provided that the following conditions
    9  * are met:
   10  * 1. Redistributions of source code must retain the above copyright
   11  *    notice, this list of conditions, and the following disclaimer,
   12  *    without modification, immediately at the beginning of the file.
   13  * 2. The name of the author may not be used to endorse or promote products
   14  *    derived from this software without specific prior written permission.
   15  *
   16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   19  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
   20  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   26  * SUCH DAMAGE.
   27  */
   28 
   29 /*
   30  * From amd64/busdma_machdep.c, r204214
   31  */
   32 
   33 #include <sys/cdefs.h>
   34 __FBSDID("$FreeBSD$");
   35 
   36 #include <sys/param.h>
   37 #include <sys/systm.h>
   38 #include <sys/malloc.h>
   39 #include <sys/bus.h>
   40 #include <sys/interrupt.h>
   41 #include <sys/kernel.h>
   42 #include <sys/ktr.h>
   43 #include <sys/lock.h>
   44 #include <sys/proc.h>
   45 #include <sys/memdesc.h>
   46 #include <sys/mutex.h>
   47 #include <sys/sysctl.h>
   48 #include <sys/uio.h>
   49 
   50 #include <vm/vm.h>
   51 #include <vm/vm_extern.h>
   52 #include <vm/vm_kern.h>
   53 #include <vm/vm_page.h>
   54 #include <vm/vm_map.h>
   55 
   56 #include <machine/atomic.h>
   57 #include <machine/bus.h>
   58 #include <machine/cpufunc.h>
   59 #include <machine/md_var.h>
   60 
   61 #include "iommu_if.h"
   62 
   63 #define MAX_BPAGES MIN(8192, physmem/40)
   64 
   65 struct bounce_page;
   66 struct bounce_zone;
   67 
   68 struct bus_dma_tag {
   69         bus_dma_tag_t     parent;
   70         bus_size_t        alignment;
   71         bus_addr_t        boundary;
   72         bus_addr_t        lowaddr;
   73         bus_addr_t        highaddr;
   74         bus_dma_filter_t *filter;
   75         void             *filterarg;
   76         bus_size_t        maxsize;
   77         bus_size_t        maxsegsz;
   78         u_int             nsegments;
   79         int               flags;
   80         int               ref_count;
   81         int               map_count;
   82         bus_dma_lock_t   *lockfunc;
   83         void             *lockfuncarg;
   84         struct bounce_zone *bounce_zone;
   85         device_t          iommu;
   86         void             *iommu_cookie;
   87 };
   88 
   89 static SYSCTL_NODE(_hw, OID_AUTO, busdma, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
   90     "Busdma parameters");
   91 
   92 struct bus_dmamap {
   93         STAILQ_HEAD(, bounce_page) bpages;
   94         int                    pagesneeded;
   95         int                    pagesreserved;
   96         bus_dma_tag_t          dmat;
   97         struct memdesc         mem;
   98         bus_dma_segment_t     *segments;
   99         int                    nsegs;
  100         bus_dmamap_callback_t *callback;
  101         void                  *callback_arg;
  102         STAILQ_ENTRY(bus_dmamap) links;
  103         int                    contigalloc;
  104 };
  105 
  106 static MALLOC_DEFINE(M_BUSDMA, "busdma", "busdma metadata");
  107 
  108 static __inline int run_filter(bus_dma_tag_t dmat, bus_addr_t paddr);
  109 
  110 #define dmat_alignment(dmat)    ((dmat)->alignment)
  111 #define dmat_flags(dmat)        ((dmat)->flags)
  112 #define dmat_lowaddr(dmat)      ((dmat)->lowaddr)
  113 #define dmat_lockfunc(dmat)     ((dmat)->lockfunc)
  114 #define dmat_lockfuncarg(dmat)  ((dmat)->lockfuncarg)
  115 
  116 #include "../../kern/subr_busdma_bounce.c"
  117 
  118 /*
  119  * Return true if a match is made.
  120  *
  121  * To find a match walk the chain of bus_dma_tag_t's looking for 'paddr'.
  122  *
  123  * If paddr is within the bounds of the dma tag then call the filter callback
  124  * to check for a match, if there is no filter callback then assume a match.
  125  */
  126 static __inline int
  127 run_filter(bus_dma_tag_t dmat, bus_addr_t paddr)
  128 {
  129         int retval;
  130 
  131         retval = 0;
  132 
  133         do {
  134                 if (dmat->filter == NULL && dmat->iommu == NULL &&
  135                     paddr > dmat->lowaddr && paddr <= dmat->highaddr)
  136                         retval = 1;
  137                 if (dmat->filter == NULL &&
  138                     !vm_addr_align_ok(paddr, dmat->alignment))
  139                         retval = 1;
  140                 if (dmat->filter != NULL &&
  141                     (*dmat->filter)(dmat->filterarg, paddr) != 0)
  142                         retval = 1;
  143 
  144                 dmat = dmat->parent;            
  145         } while (retval == 0 && dmat != NULL);
  146         return (retval);
  147 }
  148 
  149 #define BUS_DMA_COULD_BOUNCE    BUS_DMA_BUS3
  150 #define BUS_DMA_MIN_ALLOC_COMP  BUS_DMA_BUS4
  151 /*
  152  * Allocate a device specific dma_tag.
  153  */
  154 int
  155 bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
  156                    bus_addr_t boundary, bus_addr_t lowaddr,
  157                    bus_addr_t highaddr, bus_dma_filter_t *filter,
  158                    void *filterarg, bus_size_t maxsize, int nsegments,
  159                    bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc,
  160                    void *lockfuncarg, bus_dma_tag_t *dmat)
  161 {
  162         bus_dma_tag_t newtag;
  163         int error = 0;
  164 
  165         /* Basic sanity checking */
  166         if (boundary != 0 && boundary < maxsegsz)
  167                 maxsegsz = boundary;
  168 
  169         if (maxsegsz == 0) {
  170                 return (EINVAL);
  171         }
  172 
  173         /* Return a NULL tag on failure */
  174         *dmat = NULL;
  175 
  176         newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF,
  177             M_ZERO | M_NOWAIT);
  178         if (newtag == NULL) {
  179                 CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d",
  180                     __func__, newtag, 0, error);
  181                 return (ENOMEM);
  182         }
  183 
  184         newtag->parent = parent;
  185         newtag->alignment = alignment;
  186         newtag->boundary = boundary;
  187         newtag->lowaddr = trunc_page((vm_paddr_t)lowaddr) + (PAGE_SIZE - 1);
  188         newtag->highaddr = trunc_page((vm_paddr_t)highaddr) + (PAGE_SIZE - 1);
  189         newtag->filter = filter;
  190         newtag->filterarg = filterarg;
  191         newtag->maxsize = maxsize;
  192         newtag->nsegments = nsegments;
  193         newtag->maxsegsz = maxsegsz;
  194         newtag->flags = flags;
  195         newtag->ref_count = 1; /* Count ourself */
  196         newtag->map_count = 0;
  197         if (lockfunc != NULL) {
  198                 newtag->lockfunc = lockfunc;
  199                 newtag->lockfuncarg = lockfuncarg;
  200         } else {
  201                 newtag->lockfunc = _busdma_dflt_lock;
  202                 newtag->lockfuncarg = NULL;
  203         }
  204 
  205         /* Take into account any restrictions imposed by our parent tag */
  206         if (parent != NULL) {
  207                 newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr);
  208                 newtag->highaddr = MAX(parent->highaddr, newtag->highaddr);
  209                 if (newtag->boundary == 0)
  210                         newtag->boundary = parent->boundary;
  211                 else if (parent->boundary != 0)
  212                         newtag->boundary = MIN(parent->boundary,
  213                                                newtag->boundary);
  214                 if (newtag->filter == NULL) {
  215                         /*
  216                          * Short circuit looking at our parent directly
  217                          * since we have encapsulated all of its information
  218                          */
  219                         newtag->filter = parent->filter;
  220                         newtag->filterarg = parent->filterarg;
  221                         newtag->parent = parent->parent;
  222                 }
  223                 if (newtag->parent != NULL)
  224                         atomic_add_int(&parent->ref_count, 1);
  225                 newtag->iommu = parent->iommu;
  226                 newtag->iommu_cookie = parent->iommu_cookie;
  227         }
  228 
  229         if (newtag->lowaddr < ptoa((vm_paddr_t)Maxmem) && newtag->iommu == NULL)
  230                 newtag->flags |= BUS_DMA_COULD_BOUNCE;
  231 
  232         if (newtag->alignment > 1)
  233                 newtag->flags |= BUS_DMA_COULD_BOUNCE;
  234 
  235         if (((newtag->flags & BUS_DMA_COULD_BOUNCE) != 0) &&
  236             (flags & BUS_DMA_ALLOCNOW) != 0) {
  237                 struct bounce_zone *bz;
  238 
  239                 /* Must bounce */
  240 
  241                 if ((error = alloc_bounce_zone(newtag)) != 0) {
  242                         free(newtag, M_DEVBUF);
  243                         return (error);
  244                 }
  245                 bz = newtag->bounce_zone;
  246 
  247                 if (ptoa(bz->total_bpages) < maxsize) {
  248                         int pages;
  249 
  250                         pages = atop(maxsize) - bz->total_bpages;
  251 
  252                         /* Add pages to our bounce pool */
  253                         if (alloc_bounce_pages(newtag, pages) < pages)
  254                                 error = ENOMEM;
  255                 }
  256                 /* Performed initial allocation */
  257                 newtag->flags |= BUS_DMA_MIN_ALLOC_COMP;
  258         }
  259 
  260         if (error != 0) {
  261                 free(newtag, M_DEVBUF);
  262         } else {
  263                 *dmat = newtag;
  264         }
  265         CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d",
  266             __func__, newtag, (newtag != NULL ? newtag->flags : 0), error);
  267         return (error);
  268 }
  269 
  270 void
  271 bus_dma_template_clone(bus_dma_template_t *t, bus_dma_tag_t dmat)
  272 {
  273 
  274         if (t == NULL || dmat == NULL)
  275                 return;
  276 
  277         t->parent = dmat->parent;
  278         t->alignment = dmat->alignment;
  279         t->boundary = dmat->boundary;
  280         t->lowaddr = dmat->lowaddr;
  281         t->highaddr = dmat->highaddr;
  282         t->maxsize = dmat->maxsize;
  283         t->nsegments = dmat->nsegments;
  284         t->maxsegsize = dmat->maxsegsz;
  285         t->flags = dmat->flags;
  286         t->lockfunc = dmat->lockfunc;
  287         t->lockfuncarg = dmat->lockfuncarg;
  288 }
  289 
  290 int
  291 bus_dma_tag_set_domain(bus_dma_tag_t dmat, int domain)
  292 {
  293 
  294         return (0);
  295 }
  296 
  297 int
  298 bus_dma_tag_destroy(bus_dma_tag_t dmat)
  299 {
  300         bus_dma_tag_t dmat_copy __unused;
  301         int error;
  302 
  303         error = 0;
  304         dmat_copy = dmat;
  305 
  306         if (dmat != NULL) {
  307                 if (dmat->map_count != 0) {
  308                         error = EBUSY;
  309                         goto out;
  310                 }
  311 
  312                 while (dmat != NULL) {
  313                         bus_dma_tag_t parent;
  314 
  315                         parent = dmat->parent;
  316                         atomic_subtract_int(&dmat->ref_count, 1);
  317                         if (dmat->ref_count == 0) {
  318                                 free(dmat, M_DEVBUF);
  319                                 /*
  320                                  * Last reference count, so
  321                                  * release our reference
  322                                  * count on our parent.
  323                                  */
  324                                 dmat = parent;
  325                         } else
  326                                 dmat = NULL;
  327                 }
  328         }
  329 out:
  330         CTR3(KTR_BUSDMA, "%s tag %p error %d", __func__, dmat_copy, error);
  331         return (error);
  332 }
  333 
  334 /*
  335  * Allocate a handle for mapping from kva/uva/physical
  336  * address space into bus device space.
  337  */
  338 int
  339 bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
  340 {
  341         int error;
  342 
  343         error = 0;
  344 
  345         *mapp = (bus_dmamap_t)malloc(sizeof(**mapp), M_DEVBUF,
  346                                      M_NOWAIT | M_ZERO);
  347         if (*mapp == NULL) {
  348                 CTR3(KTR_BUSDMA, "%s: tag %p error %d",
  349                     __func__, dmat, ENOMEM);
  350                 return (ENOMEM);
  351         }
  352 
  353         /*
  354          * Bouncing might be required if the driver asks for an active
  355          * exclusion region, a data alignment that is stricter than 1, and/or
  356          * an active address boundary.
  357          */
  358         if (dmat->flags & BUS_DMA_COULD_BOUNCE) {
  359                 /* Must bounce */
  360                 struct bounce_zone *bz;
  361                 int maxpages;
  362 
  363                 if (dmat->bounce_zone == NULL) {
  364                         if ((error = alloc_bounce_zone(dmat)) != 0)
  365                                 return (error);
  366                 }
  367                 bz = dmat->bounce_zone;
  368 
  369                 /* Initialize the new map */
  370                 STAILQ_INIT(&((*mapp)->bpages));
  371 
  372                 /*
  373                  * Attempt to add pages to our pool on a per-instance
  374                  * basis up to a sane limit.
  375                  */
  376                 if (dmat->alignment > 1)
  377                         maxpages = MAX_BPAGES;
  378                 else
  379                         maxpages = MIN(MAX_BPAGES, Maxmem -atop(dmat->lowaddr));
  380                 if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0
  381                  || (bz->map_count > 0 && bz->total_bpages < maxpages)) {
  382                         int pages;
  383 
  384                         pages = MAX(atop(dmat->maxsize), 1);
  385                         pages = MIN(maxpages - bz->total_bpages, pages);
  386                         pages = MAX(pages, 1);
  387                         if (alloc_bounce_pages(dmat, pages) < pages)
  388                                 error = ENOMEM;
  389 
  390                         if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0) {
  391                                 if (error == 0)
  392                                         dmat->flags |= BUS_DMA_MIN_ALLOC_COMP;
  393                         } else {
  394                                 error = 0;
  395                         }
  396                 }
  397                 bz->map_count++;
  398         }
  399 
  400         (*mapp)->nsegs = 0;
  401         (*mapp)->segments = (bus_dma_segment_t *)malloc(
  402             sizeof(bus_dma_segment_t) * dmat->nsegments, M_DEVBUF,
  403             M_NOWAIT);
  404         if ((*mapp)->segments == NULL) {
  405                 CTR3(KTR_BUSDMA, "%s: tag %p error %d",
  406                     __func__, dmat, ENOMEM);
  407                 return (ENOMEM);
  408         }
  409 
  410         if (error == 0)
  411                 dmat->map_count++;
  412         CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
  413             __func__, dmat, dmat->flags, error);
  414         return (error);
  415 }
  416 
  417 /*
  418  * Destroy a handle for mapping from kva/uva/physical
  419  * address space into bus device space.
  420  */
  421 int
  422 bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
  423 {
  424         if (dmat->flags & BUS_DMA_COULD_BOUNCE) {
  425                 if (STAILQ_FIRST(&map->bpages) != NULL) {
  426                         CTR3(KTR_BUSDMA, "%s: tag %p error %d",
  427                             __func__, dmat, EBUSY);
  428                         return (EBUSY);
  429                 }
  430                 if (dmat->bounce_zone)
  431                         dmat->bounce_zone->map_count--;
  432         }
  433         free(map->segments, M_DEVBUF);
  434         free(map, M_DEVBUF);
  435         dmat->map_count--;
  436         CTR2(KTR_BUSDMA, "%s: tag %p error 0", __func__, dmat);
  437         return (0);
  438 }
  439 
  440 /*
  441  * Allocate a piece of memory that can be efficiently mapped into
  442  * bus device space based on the constraints lited in the dma tag.
  443  * A dmamap to for use with dmamap_load is also allocated.
  444  */
  445 int
  446 bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
  447                  bus_dmamap_t *mapp)
  448 {
  449         vm_memattr_t attr;
  450         int mflags;
  451 
  452         if (flags & BUS_DMA_NOWAIT)
  453                 mflags = M_NOWAIT;
  454         else
  455                 mflags = M_WAITOK;
  456 
  457         bus_dmamap_create(dmat, flags, mapp);
  458 
  459         if (flags & BUS_DMA_ZERO)
  460                 mflags |= M_ZERO;
  461         if (flags & BUS_DMA_NOCACHE)
  462                 attr = VM_MEMATTR_UNCACHEABLE;
  463         else
  464                 attr = VM_MEMATTR_DEFAULT;
  465 
  466         /* 
  467          * XXX:
  468          * (dmat->alignment <= dmat->maxsize) is just a quick hack; the exact
  469          * alignment guarantees of malloc need to be nailed down, and the
  470          * code below should be rewritten to take that into account.
  471          *
  472          * In the meantime, we'll warn the user if malloc gets it wrong.
  473          */
  474         if ((dmat->maxsize <= PAGE_SIZE) &&
  475            (dmat->alignment <= dmat->maxsize) &&
  476             dmat->lowaddr >= ptoa((vm_paddr_t)Maxmem) &&
  477             attr == VM_MEMATTR_DEFAULT) {
  478                 *vaddr = malloc(dmat->maxsize, M_DEVBUF, mflags);
  479         } else {
  480                 /*
  481                  * XXX Use Contigmalloc until it is merged into this facility
  482                  *     and handles multi-seg allocations.  Nobody is doing
  483                  *     multi-seg allocations yet though.
  484                  * XXX Certain AGP hardware does.
  485                  */
  486                 *vaddr = kmem_alloc_contig(dmat->maxsize, mflags, 0ul,
  487                     dmat->lowaddr, dmat->alignment ? dmat->alignment : 1ul,
  488                     dmat->boundary, attr);
  489                 (*mapp)->contigalloc = 1;
  490         }
  491         if (*vaddr == NULL) {
  492                 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
  493                     __func__, dmat, dmat->flags, ENOMEM);
  494                 return (ENOMEM);
  495         } else if (!vm_addr_align_ok(vtophys(*vaddr), dmat->alignment)) {
  496                 printf("bus_dmamem_alloc failed to align memory properly.\n");
  497         }
  498         CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
  499             __func__, dmat, dmat->flags, 0);
  500         return (0);
  501 }
  502 
  503 /*
  504  * Free a piece of memory and it's allociated dmamap, that was allocated
  505  * via bus_dmamem_alloc.  Make the same choice for free/contigfree.
  506  */
  507 void
  508 bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
  509 {
  510 
  511         if (!map->contigalloc)
  512                 free(vaddr, M_DEVBUF);
  513         else
  514                 kmem_free(vaddr, dmat->maxsize);
  515         bus_dmamap_destroy(dmat, map);
  516         CTR3(KTR_BUSDMA, "%s: tag %p flags 0x%x", __func__, dmat, dmat->flags);
  517 }
  518 
  519 static void
  520 _bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf,
  521     bus_size_t buflen, int flags)
  522 {
  523         bus_addr_t curaddr;
  524         bus_size_t sgsize;
  525 
  526         if (map->pagesneeded == 0) {
  527                 CTR4(KTR_BUSDMA, "lowaddr= %d Maxmem= %d, boundary= %d, "
  528                     "alignment= %d", dmat->lowaddr, ptoa((vm_paddr_t)Maxmem),
  529                     dmat->boundary, dmat->alignment);
  530                 CTR2(KTR_BUSDMA, "map= %p, pagesneeded= %d", map, map->pagesneeded);
  531                 /*
  532                  * Count the number of bounce pages
  533                  * needed in order to complete this transfer
  534                  */
  535                 curaddr = buf;
  536                 while (buflen != 0) {
  537                         sgsize = MIN(buflen, dmat->maxsegsz);
  538                         if (run_filter(dmat, curaddr) != 0) {
  539                                 sgsize = MIN(sgsize,
  540                                     PAGE_SIZE - (curaddr & PAGE_MASK));
  541                                 map->pagesneeded++;
  542                         }
  543                         curaddr += sgsize;
  544                         buflen -= sgsize;
  545                 }
  546                 CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded);
  547         }
  548 }
  549 
  550 static void
  551 _bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, pmap_t pmap,
  552     void *buf, bus_size_t buflen, int flags)
  553 {
  554         vm_offset_t vaddr;
  555         vm_offset_t vendaddr;
  556         bus_addr_t paddr;
  557 
  558         if (map->pagesneeded == 0) {
  559                 CTR4(KTR_BUSDMA, "lowaddr= %d Maxmem= %d, boundary= %d, "
  560                     "alignment= %d", dmat->lowaddr, ptoa((vm_paddr_t)Maxmem),
  561                     dmat->boundary, dmat->alignment);
  562                 CTR2(KTR_BUSDMA, "map= %p, pagesneeded= %d", map, map->pagesneeded);
  563                 /*
  564                  * Count the number of bounce pages
  565                  * needed in order to complete this transfer
  566                  */
  567                 vaddr = (vm_offset_t)buf;
  568                 vendaddr = (vm_offset_t)buf + buflen;
  569 
  570                 while (vaddr < vendaddr) {
  571                         bus_size_t sg_len;
  572 
  573                         sg_len = PAGE_SIZE - ((vm_offset_t)vaddr & PAGE_MASK);
  574                         if (pmap == kernel_pmap)
  575                                 paddr = pmap_kextract(vaddr);
  576                         else
  577                                 paddr = pmap_extract(pmap, vaddr);
  578                         if (run_filter(dmat, paddr) != 0) {
  579                                 sg_len = roundup2(sg_len, dmat->alignment);
  580                                 map->pagesneeded++;
  581                         }
  582                         vaddr += sg_len;
  583                 }
  584                 CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded);
  585         }
  586 }
  587 
  588 /*
  589  * Add a single contiguous physical range to the segment list.
  590  */
  591 static int
  592 _bus_dmamap_addseg(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t curaddr,
  593                    bus_size_t sgsize, bus_dma_segment_t *segs, int *segp)
  594 {
  595         int seg;
  596 
  597         /*
  598          * Make sure we don't cross any boundaries.
  599          */
  600         if (!vm_addr_bound_ok(curaddr, sgsize, dmat->boundary))
  601                 sgsize = roundup2(curaddr, dmat->boundary) - curaddr;
  602 
  603         /*
  604          * Insert chunk into a segment, coalescing with
  605          * previous segment if possible.
  606          */
  607         seg = *segp;
  608         if (seg == -1) {
  609                 seg = 0;
  610                 segs[seg].ds_addr = curaddr;
  611                 segs[seg].ds_len = sgsize;
  612         } else {
  613                 if (curaddr == segs[seg].ds_addr + segs[seg].ds_len &&
  614                     (segs[seg].ds_len + sgsize) <= dmat->maxsegsz &&
  615                     vm_addr_bound_ok(segs[seg].ds_addr,
  616                     segs[seg].ds_len + sgsize, dmat->boundary))
  617                         segs[seg].ds_len += sgsize;
  618                 else {
  619                         if (++seg >= dmat->nsegments)
  620                                 return (0);
  621                         segs[seg].ds_addr = curaddr;
  622                         segs[seg].ds_len = sgsize;
  623                 }
  624         }
  625         *segp = seg;
  626         return (sgsize);
  627 }
  628 
  629 /*
  630  * Utility function to load a physical buffer.  segp contains
  631  * the starting segment on entrace, and the ending segment on exit.
  632  */
  633 int
  634 _bus_dmamap_load_phys(bus_dma_tag_t dmat,
  635                       bus_dmamap_t map,
  636                       vm_paddr_t buf, bus_size_t buflen,
  637                       int flags,
  638                       bus_dma_segment_t *segs,
  639                       int *segp)
  640 {
  641         bus_addr_t curaddr;
  642         bus_size_t sgsize;
  643         int error;
  644 
  645         if (segs == NULL)
  646                 segs = map->segments;
  647 
  648         if ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) {
  649                 _bus_dmamap_count_phys(dmat, map, buf, buflen, flags);
  650                 if (map->pagesneeded != 0) {
  651                         error = _bus_dmamap_reserve_pages(dmat, map, flags);
  652                         if (error)
  653                                 return (error);
  654                 }
  655         }
  656 
  657         while (buflen > 0) {
  658                 curaddr = buf;
  659                 sgsize = MIN(buflen, dmat->maxsegsz);
  660                 if (map->pagesneeded != 0 && run_filter(dmat, curaddr)) {
  661                         sgsize = MIN(sgsize, PAGE_SIZE - (curaddr & PAGE_MASK));
  662                         curaddr = add_bounce_page(dmat, map, 0, curaddr,
  663                             sgsize);
  664                 }
  665                 sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs,
  666                     segp);
  667                 if (sgsize == 0)
  668                         break;
  669                 buf += sgsize;
  670                 buflen -= sgsize;
  671         }
  672 
  673         /*
  674          * Did we fit?
  675          */
  676         return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */
  677 }
  678 
  679 int
  680 _bus_dmamap_load_ma(bus_dma_tag_t dmat, bus_dmamap_t map,
  681     struct vm_page **ma, bus_size_t tlen, int ma_offs, int flags,
  682     bus_dma_segment_t *segs, int *segp)
  683 {
  684 
  685         return (bus_dmamap_load_ma_triv(dmat, map, ma, tlen, ma_offs, flags,
  686             segs, segp));
  687 }
  688 
  689 /*
  690  * Utility function to load a linear buffer.  segp contains
  691  * the starting segment on entrance, and the ending segment on exit.
  692  */
  693 int
  694 _bus_dmamap_load_buffer(bus_dma_tag_t dmat,
  695                         bus_dmamap_t map,
  696                         void *buf, bus_size_t buflen,
  697                         pmap_t pmap,
  698                         int flags,
  699                         bus_dma_segment_t *segs,
  700                         int *segp)
  701 {
  702         bus_size_t sgsize;
  703         bus_addr_t curaddr;
  704         vm_offset_t kvaddr, vaddr;
  705         int error;
  706 
  707         if (segs == NULL)
  708                 segs = map->segments;
  709 
  710         if ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) {
  711                 _bus_dmamap_count_pages(dmat, map, pmap, buf, buflen, flags);
  712                 if (map->pagesneeded != 0) {
  713                         error = _bus_dmamap_reserve_pages(dmat, map, flags);
  714                         if (error)
  715                                 return (error);
  716                 }
  717         }
  718 
  719         vaddr = (vm_offset_t)buf;
  720 
  721         while (buflen > 0) {
  722                 bus_size_t max_sgsize;
  723 
  724                 /*
  725                  * Get the physical address for this segment.
  726                  */
  727                 if (pmap == kernel_pmap) {
  728                         curaddr = pmap_kextract(vaddr);
  729                         kvaddr = vaddr;
  730                 } else {
  731                         curaddr = pmap_extract(pmap, vaddr);
  732                         kvaddr = 0;
  733                 }
  734 
  735                 /*
  736                  * Compute the segment size, and adjust counts.
  737                  */
  738                 max_sgsize = MIN(buflen, dmat->maxsegsz);
  739                 sgsize = PAGE_SIZE - (curaddr & PAGE_MASK);
  740                 if (map->pagesneeded != 0 && run_filter(dmat, curaddr)) {
  741                         sgsize = roundup2(sgsize, dmat->alignment);
  742                         sgsize = MIN(sgsize, max_sgsize);
  743                         curaddr = add_bounce_page(dmat, map, kvaddr, curaddr,
  744                             sgsize);
  745                 } else {
  746                         sgsize = MIN(sgsize, max_sgsize);
  747                 }
  748 
  749                 sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs,
  750                     segp);
  751                 if (sgsize == 0)
  752                         break;
  753                 vaddr += sgsize;
  754                 buflen -= sgsize;
  755         }
  756 
  757         /*
  758          * Did we fit?
  759          */
  760         return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */
  761 }
  762 
  763 void
  764 _bus_dmamap_waitok(bus_dma_tag_t dmat, bus_dmamap_t map,
  765                     struct memdesc *mem, bus_dmamap_callback_t *callback,
  766                     void *callback_arg)
  767 {
  768 
  769         if (dmat->flags & BUS_DMA_COULD_BOUNCE) {
  770                 map->dmat = dmat;
  771                 map->mem = *mem;
  772                 map->callback = callback;
  773                 map->callback_arg = callback_arg;
  774         }
  775 }
  776 
  777 bus_dma_segment_t *
  778 _bus_dmamap_complete(bus_dma_tag_t dmat, bus_dmamap_t map,
  779                      bus_dma_segment_t *segs, int nsegs, int error)
  780 {
  781 
  782         map->nsegs = nsegs;
  783         if (segs != NULL)
  784                 memcpy(map->segments, segs, map->nsegs*sizeof(segs[0]));
  785         if (dmat->iommu != NULL)
  786                 IOMMU_MAP(dmat->iommu, map->segments, &map->nsegs,
  787                     dmat->lowaddr, dmat->highaddr, dmat->alignment,
  788                     dmat->boundary, dmat->iommu_cookie);
  789 
  790         if (segs != NULL)
  791                 memcpy(segs, map->segments, map->nsegs*sizeof(segs[0]));
  792         else
  793                 segs = map->segments;
  794 
  795         return (segs);
  796 }
  797 
  798 /*
  799  * Release the mapping held by map.
  800  */
  801 void
  802 bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
  803 {
  804         if (dmat->iommu) {
  805                 IOMMU_UNMAP(dmat->iommu, map->segments, map->nsegs, dmat->iommu_cookie);
  806                 map->nsegs = 0;
  807         }
  808 
  809         free_bounce_pages(dmat, map);
  810 }
  811 
  812 void
  813 bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
  814 {
  815         struct bounce_page *bpage;
  816         vm_offset_t datavaddr, tempvaddr;
  817 
  818         if ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
  819                 /*
  820                  * Handle data bouncing.  We might also
  821                  * want to add support for invalidating
  822                  * the caches on broken hardware
  823                  */
  824                 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x op 0x%x "
  825                     "performing bounce", __func__, dmat, dmat->flags, op);
  826 
  827                 if (op & BUS_DMASYNC_PREWRITE) {
  828                         while (bpage != NULL) {
  829                                 tempvaddr = 0;
  830                                 datavaddr = bpage->datavaddr;
  831                                 if (datavaddr == 0) {
  832                                         tempvaddr = pmap_quick_enter_page(
  833                                             bpage->datapage);
  834                                         datavaddr = tempvaddr |
  835                                             bpage->dataoffs;
  836                                 }
  837 
  838                                 bcopy((void *)datavaddr,
  839                                     (void *)bpage->vaddr, bpage->datacount);
  840 
  841                                 if (tempvaddr != 0)
  842                                         pmap_quick_remove_page(tempvaddr);
  843                                 bpage = STAILQ_NEXT(bpage, links);
  844                         }
  845                         dmat->bounce_zone->total_bounced++;
  846                 }
  847 
  848                 if (op & BUS_DMASYNC_POSTREAD) {
  849                         while (bpage != NULL) {
  850                                 tempvaddr = 0;
  851                                 datavaddr = bpage->datavaddr;
  852                                 if (datavaddr == 0) {
  853                                         tempvaddr = pmap_quick_enter_page(
  854                                             bpage->datapage);
  855                                         datavaddr = tempvaddr |
  856                                             bpage->dataoffs;
  857                                 }
  858 
  859                                 bcopy((void *)bpage->vaddr,
  860                                     (void *)datavaddr, bpage->datacount);
  861 
  862                                 if (tempvaddr != 0)
  863                                         pmap_quick_remove_page(tempvaddr);
  864                                 bpage = STAILQ_NEXT(bpage, links);
  865                         }
  866                         dmat->bounce_zone->total_bounced++;
  867                 }
  868         }
  869 
  870         powerpc_sync();
  871 }
  872 
  873 int
  874 bus_dma_tag_set_iommu(bus_dma_tag_t tag, device_t iommu, void *cookie)
  875 {
  876         tag->iommu = iommu;
  877         tag->iommu_cookie = cookie;
  878 
  879         return (0);
  880 }

Cache object: d0801c930c46196712f9cea77880ac2f


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.