The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/arm/arm/busdma_machdep.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 2004 Olivier Houchard
    3  * Copyright (c) 2002 Peter Grehan
    4  * Copyright (c) 1997, 1998 Justin T. Gibbs.
    5  * All rights reserved.
    6  *
    7  * Redistribution and use in source and binary forms, with or without
    8  * modification, are permitted provided that the following conditions
    9  * are met:
   10  * 1. Redistributions of source code must retain the above copyright
   11  *    notice, this list of conditions, and the following disclaimer,
   12  *    without modification, immediately at the beginning of the file.
   13  * 2. The name of the author may not be used to endorse or promote products
   14  *    derived from this software without specific prior written permission.
   15  *
   16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   19  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
   20  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   26  * SUCH DAMAGE.
   27  *
   28  *   From i386/busdma_machdep.c,v 1.26 2002/04/19 22:58:09 alfred
   29  */
   30 
   31 #include <sys/cdefs.h>
   32 __FBSDID("$FreeBSD: releng/6.0/sys/arm/arm/busdma_machdep.c 147591 2005-06-24 23:57:27Z cognet $");
   33 
   34 /*
   35  * MacPPC bus dma support routines
   36  */
   37 
   38 #define _ARM32_BUS_DMA_PRIVATE
   39 #include <sys/param.h>
   40 #include <sys/systm.h>
   41 #include <sys/malloc.h>
   42 #include <sys/bus.h>
   43 #include <sys/interrupt.h>
   44 #include <sys/lock.h>
   45 #include <sys/proc.h>
   46 #include <sys/mutex.h>
   47 #include <sys/mbuf.h>
   48 #include <sys/uio.h>
   49 #include <sys/ktr.h>
   50 #include <sys/kernel.h>
   51 
   52 #include <vm/vm.h>
   53 #include <vm/vm_page.h>
   54 #include <vm/vm_map.h>
   55 
   56 #include <machine/atomic.h>
   57 #include <machine/bus.h>
   58 #include <machine/cpufunc.h>
   59 
   60 struct bus_dma_tag {
   61         bus_dma_tag_t           parent;
   62         bus_size_t              alignment;
   63         bus_size_t              boundary;
   64         bus_addr_t              lowaddr;
   65         bus_addr_t              highaddr;
   66         bus_dma_filter_t        *filter;
   67         void                    *filterarg;
   68         bus_size_t              maxsize;
   69         u_int                   nsegments;
   70         bus_size_t              maxsegsz;
   71         int                     flags;
   72         int                     ref_count;
   73         int                     map_count;
   74         bus_dma_lock_t          *lockfunc;
   75         void                    *lockfuncarg;
   76         /*
   77          * DMA range for this tag.  If the page doesn't fall within
   78          * one of these ranges, an error is returned.  The caller
   79          * may then decide what to do with the transfer.  If the
   80          * range pointer is NULL, it is ignored.
   81          */
   82         struct arm32_dma_range  *ranges;
   83         int                     _nranges;
   84 };
   85 
   86 #define DMAMAP_LINEAR           0x1
   87 #define DMAMAP_MBUF             0x2
   88 #define DMAMAP_UIO              0x4
   89 #define DMAMAP_ALLOCATED        0x10
   90 #define DMAMAP_TYPE_MASK        (DMAMAP_LINEAR|DMAMAP_MBUF|DMAMAP_UIO)
   91 #define DMAMAP_COHERENT         0x8
   92 struct bus_dmamap {
   93         bus_dma_tag_t   dmat;
   94         int             flags;
   95         void            *buffer;
   96         TAILQ_ENTRY(bus_dmamap) freelist;
   97         int             len;
   98 };
   99 
  100 static TAILQ_HEAD(,bus_dmamap) dmamap_freelist = 
  101         TAILQ_HEAD_INITIALIZER(dmamap_freelist);
  102 
  103 #define BUSDMA_STATIC_MAPS      500
  104 static struct bus_dmamap map_pool[BUSDMA_STATIC_MAPS];
  105 
  106 static struct mtx busdma_mtx;
  107 
  108 MTX_SYSINIT(busdma_mtx, &busdma_mtx, "busdma lock", MTX_DEF);
  109 
  110 static void
  111 arm_dmamap_freelist_init(void *dummy)
  112 {
  113         int i;
  114 
  115         for (i = 0; i < BUSDMA_STATIC_MAPS; i++) 
  116                 TAILQ_INSERT_HEAD(&dmamap_freelist, &map_pool[i], freelist);
  117 }
  118 
  119 SYSINIT(busdma, SI_SUB_VM, SI_ORDER_ANY, arm_dmamap_freelist_init, NULL);
  120 
  121 /*
  122  * Check to see if the specified page is in an allowed DMA range.
  123  */
  124 
  125 static __inline int
  126 bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dma_segment_t *segs,
  127     bus_dmamap_t map, void *buf, bus_size_t buflen, struct pmap *pmap,
  128     int flags, vm_offset_t *lastaddrp, int *segp);
  129 
  130 static __inline struct arm32_dma_range *
  131 _bus_dma_inrange(struct arm32_dma_range *ranges, int nranges,
  132     bus_addr_t curaddr)
  133 {
  134         struct arm32_dma_range *dr;
  135         int i;
  136 
  137         for (i = 0, dr = ranges; i < nranges; i++, dr++) {
  138                 if (curaddr >= dr->dr_sysbase &&
  139                     round_page(curaddr) <= (dr->dr_sysbase + dr->dr_len))
  140                         return (dr);
  141         }
  142 
  143         return (NULL);
  144 }
  145 /*
  146  * Convenience function for manipulating driver locks from busdma (during
  147  * busdma_swi, for example).  Drivers that don't provide their own locks
  148  * should specify &Giant to dmat->lockfuncarg.  Drivers that use their own
  149  * non-mutex locking scheme don't have to use this at all.
  150  */
  151 void
  152 busdma_lock_mutex(void *arg, bus_dma_lock_op_t op)
  153 {
  154         struct mtx *dmtx;
  155 
  156         dmtx = (struct mtx *)arg;
  157         switch (op) {
  158         case BUS_DMA_LOCK:
  159                 mtx_lock(dmtx);
  160                 break;
  161         case BUS_DMA_UNLOCK:
  162                 mtx_unlock(dmtx);
  163                 break;
  164         default:
  165                 panic("Unknown operation 0x%x for busdma_lock_mutex!", op);
  166         }
  167 }
  168 
  169 /*
  170  * dflt_lock should never get called.  It gets put into the dma tag when
  171  * lockfunc == NULL, which is only valid if the maps that are associated
  172  * with the tag are meant to never be defered.
  173  * XXX Should have a way to identify which driver is responsible here.
  174  */
  175 static void
  176 dflt_lock(void *arg, bus_dma_lock_op_t op)
  177 {
  178 #ifdef INVARIANTS
  179         panic("driver error: busdma dflt_lock called");
  180 #else
  181         printf("DRIVER_ERROR: busdma dflt_lock called\n");
  182 #endif
  183 }
  184 
  185 static __inline bus_dmamap_t
  186 _busdma_alloc_dmamap(void)
  187 {
  188         bus_dmamap_t map;
  189 
  190         mtx_lock(&busdma_mtx);
  191         map = TAILQ_FIRST(&dmamap_freelist);
  192         TAILQ_REMOVE(&dmamap_freelist, map, freelist);
  193         mtx_unlock(&busdma_mtx);
  194         if (!map) {
  195                 map = malloc(sizeof(*map), M_DEVBUF, M_NOWAIT);
  196                 if (map)
  197                         map->flags = DMAMAP_ALLOCATED;
  198         } else
  199                 map->flags = 0;
  200         return (map);
  201 }
  202 
  203 static __inline void 
  204 _busdma_free_dmamap(bus_dmamap_t map)
  205 {
  206         if (map->flags & DMAMAP_ALLOCATED)
  207                 free(map, M_DEVBUF);
  208         else {
  209                 mtx_lock(&busdma_mtx);
  210                 TAILQ_INSERT_HEAD(&dmamap_freelist, map, freelist);
  211                 mtx_unlock(&busdma_mtx);
  212         }
  213 }
  214 
  215 /*
  216  * Allocate a device specific dma_tag.
  217  */
  218 #define SEG_NB 1024
  219 
  220 int
  221 bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
  222                    bus_size_t boundary, bus_addr_t lowaddr,
  223                    bus_addr_t highaddr, bus_dma_filter_t *filter,
  224                    void *filterarg, bus_size_t maxsize, int nsegments,
  225                    bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc,
  226                    void *lockfuncarg, bus_dma_tag_t *dmat)
  227 {
  228         bus_dma_tag_t newtag;
  229         int error = 0;
  230         /* Return a NULL tag on failure */
  231         *dmat = NULL;
  232 
  233         newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF, M_NOWAIT);
  234         if (newtag == NULL) {
  235                 CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d",
  236                     __func__, newtag, 0, error);
  237                 return (ENOMEM);
  238         }
  239 
  240         newtag->parent = parent;
  241         newtag->alignment = alignment;
  242         newtag->boundary = boundary;
  243         newtag->lowaddr = trunc_page((vm_offset_t)lowaddr) + (PAGE_SIZE - 1);
  244         newtag->highaddr = trunc_page((vm_offset_t)highaddr) + (PAGE_SIZE - 1);
  245         newtag->filter = filter;
  246         newtag->filterarg = filterarg;
  247         newtag->maxsize = maxsize;
  248         newtag->nsegments = nsegments;
  249         newtag->maxsegsz = maxsegsz;
  250         newtag->flags = flags;
  251         newtag->ref_count = 1; /* Count ourself */
  252         newtag->map_count = 0;
  253         newtag->ranges = bus_dma_get_range();
  254         newtag->_nranges = bus_dma_get_range_nb();
  255         if (lockfunc != NULL) {
  256                 newtag->lockfunc = lockfunc;
  257                 newtag->lockfuncarg = lockfuncarg;
  258         } else {
  259                 newtag->lockfunc = dflt_lock;
  260                 newtag->lockfuncarg = NULL;
  261         }
  262         /*
  263          * Take into account any restrictions imposed by our parent tag
  264          */
  265         if (parent != NULL) {
  266                 newtag->lowaddr = min(parent->lowaddr, newtag->lowaddr);
  267                 newtag->highaddr = max(parent->highaddr, newtag->highaddr);
  268                 if (newtag->boundary == 0)
  269                         newtag->boundary = parent->boundary;
  270                 else if (parent->boundary != 0)
  271                         newtag->boundary = min(parent->boundary,
  272                                                newtag->boundary);
  273                 if (newtag->filter == NULL) {
  274                         /*
  275                          * Short circuit looking at our parent directly
  276                          * since we have encapsulated all of its information
  277                          */
  278                         newtag->filter = parent->filter;
  279                         newtag->filterarg = parent->filterarg;
  280                         newtag->parent = parent->parent;
  281                 }
  282                 if (newtag->parent != NULL)
  283                         atomic_add_int(&parent->ref_count, 1);
  284         }
  285 
  286         *dmat = newtag;
  287         CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d",
  288             __func__, newtag, (newtag != NULL ? newtag->flags : 0), error);
  289 
  290         return (error);
  291 }
  292 
  293 int
  294 bus_dma_tag_destroy(bus_dma_tag_t dmat)
  295 {
  296 #ifdef KTR
  297         bus_dma_tag_t dmat_copy = dmat;
  298 #endif
  299 
  300         if (dmat != NULL) {
  301                 
  302                 if (dmat->map_count != 0)
  303                         return (EBUSY);
  304                 
  305                 while (dmat != NULL) {
  306                         bus_dma_tag_t parent;
  307                         
  308                         parent = dmat->parent;
  309                         atomic_subtract_int(&dmat->ref_count, 1);
  310                         if (dmat->ref_count == 0) {
  311                                 free(dmat, M_DEVBUF);
  312                                 /*
  313                                  * Last reference count, so
  314                                  * release our reference
  315                                  * count on our parent.
  316                                  */
  317                                 dmat = parent;
  318                         } else
  319                                 dmat = NULL;
  320                 }
  321         }
  322         CTR2(KTR_BUSDMA, "%s tag %p", __func__, dmat_copy);
  323 
  324         return (0);
  325 }
  326 
  327 /*
  328  * Allocate a handle for mapping from kva/uva/physical
  329  * address space into bus device space.
  330  */
  331 int
  332 bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
  333 {
  334         bus_dmamap_t newmap;
  335 #ifdef KTR
  336         int error = 0;
  337 #endif
  338 
  339         newmap = _busdma_alloc_dmamap();
  340         if (newmap == NULL) {
  341                 CTR3(KTR_BUSDMA, "%s: tag %p error %d", __func__, dmat, ENOMEM);
  342                 return (ENOMEM);
  343         }
  344         *mapp = newmap;
  345         newmap->dmat = dmat;
  346         dmat->map_count++;
  347 
  348         CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
  349             __func__, dmat, dmat->flags, error);
  350 
  351         return (0);
  352 }
  353 
  354 /*
  355  * Destroy a handle for mapping from kva/uva/physical
  356  * address space into bus device space.
  357  */
  358 int
  359 bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
  360 {
  361 
  362         _busdma_free_dmamap(map);
  363         dmat->map_count--;
  364         CTR2(KTR_BUSDMA, "%s: tag %p error 0", __func__, dmat);
  365         return (0);
  366 }
  367 
  368 /*
  369  * Allocate a piece of memory that can be efficiently mapped into
  370  * bus device space based on the constraints lited in the dma tag.
  371  * A dmamap to for use with dmamap_load is also allocated.
  372  */
  373 int
  374 bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
  375                  bus_dmamap_t *mapp)
  376 {
  377         bus_dmamap_t newmap = NULL;
  378 
  379         int mflags;
  380 
  381         if (flags & BUS_DMA_NOWAIT)
  382                 mflags = M_NOWAIT;
  383         else
  384                 mflags = M_WAITOK;
  385         if (flags & BUS_DMA_ZERO)
  386                 mflags |= M_ZERO;
  387 
  388         newmap = _busdma_alloc_dmamap();
  389         if (newmap == NULL) {
  390                 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
  391                     __func__, dmat, dmat->flags, ENOMEM);
  392                 return (ENOMEM);
  393         }
  394         dmat->map_count++;
  395         *mapp = newmap;
  396         newmap->dmat = dmat;
  397         
  398         if (dmat->maxsize <= PAGE_SIZE) {
  399                 *vaddr = malloc(dmat->maxsize, M_DEVBUF, mflags);
  400         } else {
  401                 /*
  402                  * XXX Use Contigmalloc until it is merged into this facility
  403                  *     and handles multi-seg allocations.  Nobody is doing
  404                  *     multi-seg allocations yet though.
  405                  */
  406                 *vaddr = contigmalloc(dmat->maxsize, M_DEVBUF, mflags,
  407                     0ul, dmat->lowaddr, dmat->alignment? dmat->alignment : 1ul,
  408                     dmat->boundary);
  409         }
  410         if (*vaddr == NULL) {
  411                 if (newmap != NULL) {
  412                         _busdma_free_dmamap(newmap);
  413                         dmat->map_count--;
  414                 }
  415                 *mapp = NULL;
  416                 return (ENOMEM);
  417         }
  418         return (0);
  419 }
  420 
  421 /*
  422  * Free a piece of memory and it's allocated dmamap, that was allocated
  423  * via bus_dmamem_alloc.  Make the same choice for free/contigfree.
  424  */
  425 void
  426 bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
  427 {
  428         if (dmat->maxsize <= PAGE_SIZE)
  429                 free(vaddr, M_DEVBUF);
  430         else {
  431                 contigfree(vaddr, dmat->maxsize, M_DEVBUF);
  432         }
  433         dmat->map_count--;
  434         _busdma_free_dmamap(map);
  435         CTR3(KTR_BUSDMA, "%s: tag %p flags 0x%x", __func__, dmat, dmat->flags);
  436 }
  437 
  438 /*
  439  * Utility function to load a linear buffer.  lastaddrp holds state
  440  * between invocations (for multiple-buffer loads).  segp contains
  441  * the starting segment on entrance, and the ending segment on exit.
  442  * first indicates if this is the first invocation of this function.
  443  */
  444 static __inline int
  445 bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dma_segment_t *segs,
  446     bus_dmamap_t map, void *buf, bus_size_t buflen, struct pmap *pmap,
  447     int flags, vm_offset_t *lastaddrp, int *segp)
  448 {
  449         bus_size_t sgsize;
  450         bus_addr_t curaddr, lastaddr, baddr, bmask;
  451         vm_offset_t vaddr = (vm_offset_t)buf;
  452         int seg;
  453         int error = 0;
  454         pd_entry_t *pde;
  455         pt_entry_t pte;
  456         pt_entry_t *ptep;
  457 
  458         lastaddr = *lastaddrp;
  459         bmask = ~(dmat->boundary - 1);
  460 
  461         CTR3(KTR_BUSDMA, "lowaddr= %d boundary= %d, "
  462             "alignment= %d", dmat->lowaddr, dmat->boundary, dmat->alignment);
  463 
  464         for (seg = *segp; buflen > 0 ; ) {
  465                 /*
  466                  * Get the physical address for this segment.
  467                  *
  468                  * XXX Don't support checking for coherent mappings
  469                  * XXX in user address space.
  470                  */
  471                 if (__predict_true(pmap == pmap_kernel())) {
  472                         (void) pmap_get_pde_pte(pmap, vaddr, &pde, &ptep);
  473                         if (__predict_false(pmap_pde_section(pde))) {
  474                                 curaddr = (*pde & L1_S_FRAME) |
  475                                     (vaddr & L1_S_OFFSET);
  476                                 if (*pde & L1_S_CACHE_MASK) {
  477                                         map->flags &=
  478                                             ~DMAMAP_COHERENT;
  479                                 }
  480                         } else {
  481                                 pte = *ptep;
  482                                 KASSERT((pte & L2_TYPE_MASK) != L2_TYPE_INV,
  483                                     ("INV type"));
  484                                 if (__predict_false((pte & L2_TYPE_MASK)
  485                                                     == L2_TYPE_L)) {
  486                                         curaddr = (pte & L2_L_FRAME) |
  487                                             (vaddr & L2_L_OFFSET);
  488                                         if (pte & L2_L_CACHE_MASK) {
  489                                                 map->flags &=
  490                                                     ~DMAMAP_COHERENT;
  491                                                 
  492                                         }
  493                                 } else {
  494                                         curaddr = (pte & L2_S_FRAME) |
  495                                             (vaddr & L2_S_OFFSET);
  496                                         if (pte & L2_S_CACHE_MASK) {
  497                                                 map->flags &=
  498                                                     ~DMAMAP_COHERENT;
  499                                         }
  500                                 }
  501                         }
  502                 } else {
  503                         curaddr = pmap_extract(pmap, vaddr);
  504                         map->flags &= ~DMAMAP_COHERENT;
  505                 }
  506 
  507                 if (dmat->ranges) {
  508                         struct arm32_dma_range *dr;
  509 
  510                         dr = _bus_dma_inrange(dmat->ranges, dmat->_nranges,
  511                             curaddr);
  512                         if (dr == NULL)
  513                                 return (EINVAL);
  514                         /*
  515                          * In a valid DMA range.  Translate the physical
  516                          * memory address to an address in the DMA window.
  517                          */
  518                         curaddr = (curaddr - dr->dr_sysbase) + dr->dr_busbase;
  519                                                 
  520                 }
  521                 /*
  522                  * Compute the segment size, and adjust counts.
  523                  */
  524                 sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK);
  525                 if (buflen < sgsize)
  526                         sgsize = buflen;
  527 
  528                 /*
  529                  * Make sure we don't cross any boundaries.
  530                  */
  531                 if (dmat->boundary > 0) {
  532                         baddr = (curaddr + dmat->boundary) & bmask;
  533                         if (sgsize > (baddr - curaddr))
  534                                 sgsize = (baddr - curaddr);
  535                 }
  536 
  537                 /*
  538                  * Insert chunk into a segment, coalescing with
  539                  * the previous segment if possible.
  540                  */
  541                 if (seg >= 0 && curaddr == lastaddr &&
  542                     (segs[seg].ds_len + sgsize) <= dmat->maxsegsz &&
  543                     (dmat->boundary == 0 ||
  544                      (segs[seg].ds_addr & bmask) == 
  545                      (curaddr & bmask))) {
  546                         segs[seg].ds_len += sgsize;
  547                         goto segdone;
  548                 } else {
  549                         if (++seg >= dmat->nsegments)
  550                                 break;
  551                         segs[seg].ds_addr = curaddr;
  552                         segs[seg].ds_len = sgsize;
  553                 }
  554                 if (error)
  555                         break;
  556 segdone:
  557                 lastaddr = curaddr + sgsize;
  558                 vaddr += sgsize;
  559                 buflen -= sgsize;
  560         }
  561 
  562         *segp = seg;
  563         *lastaddrp = lastaddr;
  564 
  565         /*
  566          * Did we fit?
  567          */
  568         if (buflen != 0)
  569                 error = EFBIG; /* XXX better return value here? */
  570         return (error);
  571 }
  572 
  573 /*
  574  * Map the buffer buf into bus space using the dmamap map.
  575  */
  576 int
  577 bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
  578                 bus_size_t buflen, bus_dmamap_callback_t *callback,
  579                 void *callback_arg, int flags)
  580 {
  581         vm_offset_t     lastaddr = 0;
  582         int             error, nsegs = -1;
  583 #ifdef __CC_SUPPORTS_DYNAMIC_ARRAY_INIT
  584         bus_dma_segment_t dm_segments[dmat->nsegments];
  585 #else
  586         bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS];
  587 #endif
  588 
  589         KASSERT(dmat != NULL, ("dmatag is NULL"));
  590         KASSERT(map != NULL, ("dmamap is NULL"));
  591         map->flags &= ~DMAMAP_TYPE_MASK;
  592         map->flags |= DMAMAP_LINEAR|DMAMAP_COHERENT;
  593         map->buffer = buf;
  594         map->len = buflen;
  595         error = bus_dmamap_load_buffer(dmat,
  596             dm_segments, map, buf, buflen, kernel_pmap,
  597             flags, &lastaddr, &nsegs);
  598         if (error)
  599                 (*callback)(callback_arg, NULL, 0, error);
  600         else
  601                 (*callback)(callback_arg, dm_segments, nsegs + 1, error);
  602         
  603         CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
  604             __func__, dmat, dmat->flags, nsegs + 1, error);
  605 
  606         return (0);
  607 }
  608 
  609 /*
  610  * Like bus_dmamap_load(), but for mbufs.
  611  */
  612 int
  613 bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m0,
  614                      bus_dmamap_callback2_t *callback, void *callback_arg,
  615                      int flags)
  616 {
  617 #ifdef __CC_SUPPORTS_DYNAMIC_ARRAY_INIT
  618         bus_dma_segment_t dm_segments[dmat->nsegments];
  619 #else
  620         bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS];
  621 #endif
  622         int nsegs = -1, error = 0;
  623 
  624         M_ASSERTPKTHDR(m0);
  625 
  626         map->flags &= ~DMAMAP_TYPE_MASK;
  627         map->flags |= DMAMAP_MBUF | DMAMAP_COHERENT;
  628         map->buffer = m0;
  629         map->len = 0;
  630         if (m0->m_pkthdr.len <= dmat->maxsize) {
  631                 vm_offset_t lastaddr = 0;
  632                 struct mbuf *m;
  633 
  634                 for (m = m0; m != NULL && error == 0; m = m->m_next) {
  635                         if (m->m_len > 0) {
  636                                 error = bus_dmamap_load_buffer(dmat,
  637                                     dm_segments, map, m->m_data, m->m_len, 
  638                                     pmap_kernel(), flags, &lastaddr, &nsegs);
  639                                 map->len += m->m_len;
  640                         }
  641                 }
  642         } else {
  643                 error = EINVAL;
  644         }
  645 
  646         if (error) {
  647                 /* 
  648                  * force "no valid mappings" on error in callback.
  649                  */
  650                 (*callback)(callback_arg, dm_segments, 0, 0, error);
  651         } else {
  652                 (*callback)(callback_arg, dm_segments, nsegs + 1,
  653                     m0->m_pkthdr.len, error);
  654         }
  655         CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
  656             __func__, dmat, dmat->flags, error, nsegs + 1);
  657 
  658         return (error);
  659 }
  660 
  661 int
  662 bus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map,
  663                         struct mbuf *m0, bus_dma_segment_t *segs, int *nsegs,
  664                         int flags)
  665 {
  666         int error = 0;
  667         M_ASSERTPKTHDR(m0);
  668 
  669         flags |= BUS_DMA_NOWAIT;
  670         *nsegs = -1;
  671         map->flags &= ~DMAMAP_TYPE_MASK;
  672         map->flags |= DMAMAP_MBUF | DMAMAP_COHERENT;
  673         map->buffer = m0;                       
  674         map->len = 0;
  675         if (m0->m_pkthdr.len <= dmat->maxsize) {
  676                 vm_offset_t lastaddr = 0;
  677                 struct mbuf *m;
  678 
  679                 for (m = m0; m != NULL && error == 0; m = m->m_next) {
  680                         if (m->m_len > 0) {
  681                                 error = bus_dmamap_load_buffer(dmat, segs, map,
  682                                                 m->m_data, m->m_len,
  683                                                 pmap_kernel(), flags, &lastaddr,
  684                                                 nsegs);
  685                                 map->len += m->m_len;
  686                         }
  687                 }
  688         } else {
  689                 error = EINVAL;
  690         }
  691 
  692         /* XXX FIXME: Having to increment nsegs is really annoying */
  693         ++*nsegs;
  694         CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
  695             __func__, dmat, dmat->flags, error, *nsegs);
  696         return (error);
  697 }
  698 
  699 /*
  700  * Like bus_dmamap_load(), but for uios.
  701  */
  702 int
  703 bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, struct uio *uio,
  704     bus_dmamap_callback2_t *callback, void *callback_arg,
  705     int flags)
  706 {
  707         vm_offset_t lastaddr;
  708 #ifdef __CC_SUPPORTS_DYNAMIC_ARRAY_INIT
  709         bus_dma_segment_t dm_segments[dmat->nsegments];
  710 #else
  711         bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS];
  712 #endif
  713         int nsegs, i, error;
  714         bus_size_t resid;
  715         struct iovec *iov;
  716         struct pmap *pmap;
  717 
  718         resid = uio->uio_resid;
  719         iov = uio->uio_iov;
  720         map->flags &= ~DMAMAP_TYPE_MASK;
  721         map->flags |= DMAMAP_UIO|DMAMAP_COHERENT;
  722         map->buffer = uio;
  723         map->len = 0;
  724 
  725         if (uio->uio_segflg == UIO_USERSPACE) {
  726                 KASSERT(uio->uio_td != NULL,
  727                     ("bus_dmamap_load_uio: USERSPACE but no proc"));
  728                 pmap = vmspace_pmap(uio->uio_td->td_proc->p_vmspace);
  729         } else
  730                 pmap = kernel_pmap;
  731 
  732         error = 0;
  733         nsegs = -1;
  734         for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) {
  735                 /*
  736                  * Now at the first iovec to load.  Load each iovec
  737                  * until we have exhausted the residual count.
  738                  */
  739                 bus_size_t minlen =
  740                     resid < iov[i].iov_len ? resid : iov[i].iov_len;
  741                 caddr_t addr = (caddr_t) iov[i].iov_base;
  742 
  743                 if (minlen > 0) {
  744                         error = bus_dmamap_load_buffer(dmat, dm_segments, map,
  745                             addr, minlen, pmap, flags, &lastaddr, &nsegs);
  746 
  747                         map->len += minlen;
  748                         resid -= minlen;
  749                 }
  750         }
  751 
  752         if (error) {
  753                 /* 
  754                  * force "no valid mappings" on error in callback.
  755                  */
  756                 (*callback)(callback_arg, dm_segments, 0, 0, error);
  757         } else {
  758                 (*callback)(callback_arg, dm_segments, nsegs+1,
  759                     uio->uio_resid, error);
  760         }
  761 
  762         CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
  763             __func__, dmat, dmat->flags, error, nsegs + 1);
  764         return (error);
  765 }
  766 
  767 /*
  768  * Release the mapping held by map.
  769  */
  770 void
  771 _bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
  772 {
  773         map->flags &= ~DMAMAP_TYPE_MASK;
  774         return;
  775 }
  776 
  777 static __inline void
  778 bus_dmamap_sync_buf(void *buf, int len, bus_dmasync_op_t op)
  779 {
  780 
  781         if (op & BUS_DMASYNC_PREWRITE)
  782                 cpu_dcache_wb_range((vm_offset_t)buf, len);
  783         if (op & BUS_DMASYNC_POSTREAD) {
  784                 if ((((vm_offset_t)buf | len) & arm_dcache_align_mask) == 0)
  785                         cpu_dcache_inv_range((vm_offset_t)buf, len);
  786                 else    
  787                         cpu_dcache_wbinv_range((vm_offset_t)buf, len);
  788 
  789         }
  790 }
  791 
  792 void
  793 _bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
  794 {
  795         struct mbuf *m;
  796         struct uio *uio;
  797         int resid;
  798         struct iovec *iov;
  799         
  800         if (!(op & (BUS_DMASYNC_PREWRITE | BUS_DMASYNC_POSTREAD)))
  801                 return;
  802         if (map->flags & DMAMAP_COHERENT)
  803                 return;
  804         if ((op && BUS_DMASYNC_POSTREAD) && (map->len > PAGE_SIZE)) {
  805                 cpu_dcache_wbinv_all();
  806                 return;
  807         }
  808         CTR3(KTR_BUSDMA, "%s: op %x flags %x", __func__, op, map->flags);
  809         switch(map->flags & DMAMAP_TYPE_MASK) {
  810         case DMAMAP_LINEAR:
  811                 bus_dmamap_sync_buf(map->buffer, map->len, op);
  812                 break;
  813         case DMAMAP_MBUF:
  814                 m = map->buffer;
  815                 while (m) {
  816                         if (m->m_len > 0)
  817                                 bus_dmamap_sync_buf(m->m_data, m->m_len, op);
  818                         m = m->m_next;
  819                 }
  820                 break;
  821         case DMAMAP_UIO:
  822                 uio = map->buffer;
  823                 iov = uio->uio_iov;
  824                 resid = uio->uio_resid;
  825                 for (int i = 0; i < uio->uio_iovcnt && resid != 0; i++) {
  826                         bus_size_t minlen = resid < iov[i].iov_len ? resid :
  827                             iov[i].iov_len;
  828                         if (minlen > 0) {
  829                                 bus_dmamap_sync_buf(iov[i].iov_base, minlen, 
  830                                     op);
  831                                 resid -= minlen;
  832                         }
  833                 }
  834                 break;
  835         default:
  836                 break;
  837         }
  838         cpu_drain_writebuf();
  839 }

Cache object: 4a91f5aeada7b7a2c60879672ee64d07


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.