The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/arm/arm/busdma_machdep.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 2004 Olivier Houchard
    3  * Copyright (c) 2002 Peter Grehan
    4  * Copyright (c) 1997, 1998 Justin T. Gibbs.
    5  * All rights reserved.
    6  *
    7  * Redistribution and use in source and binary forms, with or without
    8  * modification, are permitted provided that the following conditions
    9  * are met:
   10  * 1. Redistributions of source code must retain the above copyright
   11  *    notice, this list of conditions, and the following disclaimer,
   12  *    without modification, immediately at the beginning of the file.
   13  * 2. The name of the author may not be used to endorse or promote products
   14  *    derived from this software without specific prior written permission.
   15  *
   16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   19  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
   20  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   26  * SUCH DAMAGE.
   27  *
   28  *   From i386/busdma_machdep.c,v 1.26 2002/04/19 22:58:09 alfred
   29  */
   30 
   31 #include <sys/cdefs.h>
   32 __FBSDID("$FreeBSD: releng/6.1/sys/arm/arm/busdma_machdep.c 156052 2006-02-27 01:11:43Z cognet $");
   33 
   34 /*
   35  * MacPPC bus dma support routines
   36  */
   37 
   38 #define _ARM32_BUS_DMA_PRIVATE
   39 #include <sys/param.h>
   40 #include <sys/systm.h>
   41 #include <sys/malloc.h>
   42 #include <sys/bus.h>
   43 #include <sys/interrupt.h>
   44 #include <sys/lock.h>
   45 #include <sys/proc.h>
   46 #include <sys/mutex.h>
   47 #include <sys/mbuf.h>
   48 #include <sys/uio.h>
   49 #include <sys/ktr.h>
   50 #include <sys/kernel.h>
   51 
   52 #include <vm/vm.h>
   53 #include <vm/vm_page.h>
   54 #include <vm/vm_map.h>
   55 
   56 #include <machine/atomic.h>
   57 #include <machine/bus.h>
   58 #include <machine/cpufunc.h>
   59 
   60 struct bus_dma_tag {
   61         bus_dma_tag_t           parent;
   62         bus_size_t              alignment;
   63         bus_size_t              boundary;
   64         bus_addr_t              lowaddr;
   65         bus_addr_t              highaddr;
   66         bus_dma_filter_t        *filter;
   67         void                    *filterarg;
   68         bus_size_t              maxsize;
   69         u_int                   nsegments;
   70         bus_size_t              maxsegsz;
   71         int                     flags;
   72         int                     ref_count;
   73         int                     map_count;
   74         bus_dma_lock_t          *lockfunc;
   75         void                    *lockfuncarg;
   76         /*
   77          * DMA range for this tag.  If the page doesn't fall within
   78          * one of these ranges, an error is returned.  The caller
   79          * may then decide what to do with the transfer.  If the
   80          * range pointer is NULL, it is ignored.
   81          */
   82         struct arm32_dma_range  *ranges;
   83         int                     _nranges;
   84 };
   85 
   86 #define DMAMAP_LINEAR           0x1
   87 #define DMAMAP_MBUF             0x2
   88 #define DMAMAP_UIO              0x4
   89 #define DMAMAP_ALLOCATED        0x10
   90 #define DMAMAP_TYPE_MASK        (DMAMAP_LINEAR|DMAMAP_MBUF|DMAMAP_UIO)
   91 #define DMAMAP_COHERENT         0x8
   92 struct bus_dmamap {
   93         bus_dma_tag_t   dmat;
   94         int             flags;
   95         void            *buffer;
   96         TAILQ_ENTRY(bus_dmamap) freelist;
   97         int             len;
   98 };
   99 
  100 static TAILQ_HEAD(,bus_dmamap) dmamap_freelist = 
  101         TAILQ_HEAD_INITIALIZER(dmamap_freelist);
  102 
  103 #define BUSDMA_STATIC_MAPS      500
  104 static struct bus_dmamap map_pool[BUSDMA_STATIC_MAPS];
  105 
  106 static struct mtx busdma_mtx;
  107 
  108 MTX_SYSINIT(busdma_mtx, &busdma_mtx, "busdma lock", MTX_DEF);
  109 
  110 static void
  111 arm_dmamap_freelist_init(void *dummy)
  112 {
  113         int i;
  114 
  115         for (i = 0; i < BUSDMA_STATIC_MAPS; i++) 
  116                 TAILQ_INSERT_HEAD(&dmamap_freelist, &map_pool[i], freelist);
  117 }
  118 
  119 SYSINIT(busdma, SI_SUB_VM, SI_ORDER_ANY, arm_dmamap_freelist_init, NULL);
  120 
  121 /*
  122  * Check to see if the specified page is in an allowed DMA range.
  123  */
  124 
  125 static __inline int
  126 bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dma_segment_t *segs,
  127     bus_dmamap_t map, void *buf, bus_size_t buflen, struct pmap *pmap,
  128     int flags, vm_offset_t *lastaddrp, int *segp);
  129 
  130 static __inline struct arm32_dma_range *
  131 _bus_dma_inrange(struct arm32_dma_range *ranges, int nranges,
  132     bus_addr_t curaddr)
  133 {
  134         struct arm32_dma_range *dr;
  135         int i;
  136 
  137         for (i = 0, dr = ranges; i < nranges; i++, dr++) {
  138                 if (curaddr >= dr->dr_sysbase &&
  139                     round_page(curaddr) <= (dr->dr_sysbase + dr->dr_len))
  140                         return (dr);
  141         }
  142 
  143         return (NULL);
  144 }
  145 /*
  146  * Convenience function for manipulating driver locks from busdma (during
  147  * busdma_swi, for example).  Drivers that don't provide their own locks
  148  * should specify &Giant to dmat->lockfuncarg.  Drivers that use their own
  149  * non-mutex locking scheme don't have to use this at all.
  150  */
  151 void
  152 busdma_lock_mutex(void *arg, bus_dma_lock_op_t op)
  153 {
  154         struct mtx *dmtx;
  155 
  156         dmtx = (struct mtx *)arg;
  157         switch (op) {
  158         case BUS_DMA_LOCK:
  159                 mtx_lock(dmtx);
  160                 break;
  161         case BUS_DMA_UNLOCK:
  162                 mtx_unlock(dmtx);
  163                 break;
  164         default:
  165                 panic("Unknown operation 0x%x for busdma_lock_mutex!", op);
  166         }
  167 }
  168 
  169 /*
  170  * dflt_lock should never get called.  It gets put into the dma tag when
  171  * lockfunc == NULL, which is only valid if the maps that are associated
  172  * with the tag are meant to never be defered.
  173  * XXX Should have a way to identify which driver is responsible here.
  174  */
  175 static void
  176 dflt_lock(void *arg, bus_dma_lock_op_t op)
  177 {
  178 #ifdef INVARIANTS
  179         panic("driver error: busdma dflt_lock called");
  180 #else
  181         printf("DRIVER_ERROR: busdma dflt_lock called\n");
  182 #endif
  183 }
  184 
  185 static __inline bus_dmamap_t
  186 _busdma_alloc_dmamap(void)
  187 {
  188         bus_dmamap_t map;
  189 
  190         mtx_lock(&busdma_mtx);
  191         map = TAILQ_FIRST(&dmamap_freelist);
  192         if (map)
  193                 TAILQ_REMOVE(&dmamap_freelist, map, freelist);
  194         mtx_unlock(&busdma_mtx);
  195         if (!map) {
  196                 map = malloc(sizeof(*map), M_DEVBUF, M_NOWAIT);
  197                 if (map)
  198                         map->flags = DMAMAP_ALLOCATED;
  199         } else
  200                 map->flags = 0;
  201         return (map);
  202 }
  203 
  204 static __inline void 
  205 _busdma_free_dmamap(bus_dmamap_t map)
  206 {
  207         if (map->flags & DMAMAP_ALLOCATED)
  208                 free(map, M_DEVBUF);
  209         else {
  210                 mtx_lock(&busdma_mtx);
  211                 TAILQ_INSERT_HEAD(&dmamap_freelist, map, freelist);
  212                 mtx_unlock(&busdma_mtx);
  213         }
  214 }
  215 
  216 /*
  217  * Allocate a device specific dma_tag.
  218  */
  219 #define SEG_NB 1024
  220 
  221 int
  222 bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
  223                    bus_size_t boundary, bus_addr_t lowaddr,
  224                    bus_addr_t highaddr, bus_dma_filter_t *filter,
  225                    void *filterarg, bus_size_t maxsize, int nsegments,
  226                    bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc,
  227                    void *lockfuncarg, bus_dma_tag_t *dmat)
  228 {
  229         bus_dma_tag_t newtag;
  230         int error = 0;
  231         /* Return a NULL tag on failure */
  232         *dmat = NULL;
  233 
  234         newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF, M_NOWAIT);
  235         if (newtag == NULL) {
  236                 CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d",
  237                     __func__, newtag, 0, error);
  238                 return (ENOMEM);
  239         }
  240 
  241         newtag->parent = parent;
  242         newtag->alignment = alignment;
  243         newtag->boundary = boundary;
  244         newtag->lowaddr = trunc_page((vm_offset_t)lowaddr) + (PAGE_SIZE - 1);
  245         newtag->highaddr = trunc_page((vm_offset_t)highaddr) + (PAGE_SIZE - 1);
  246         newtag->filter = filter;
  247         newtag->filterarg = filterarg;
  248         newtag->maxsize = maxsize;
  249         newtag->nsegments = nsegments;
  250         newtag->maxsegsz = maxsegsz;
  251         newtag->flags = flags;
  252         newtag->ref_count = 1; /* Count ourself */
  253         newtag->map_count = 0;
  254         newtag->ranges = bus_dma_get_range();
  255         newtag->_nranges = bus_dma_get_range_nb();
  256         if (lockfunc != NULL) {
  257                 newtag->lockfunc = lockfunc;
  258                 newtag->lockfuncarg = lockfuncarg;
  259         } else {
  260                 newtag->lockfunc = dflt_lock;
  261                 newtag->lockfuncarg = NULL;
  262         }
  263         /*
  264          * Take into account any restrictions imposed by our parent tag
  265          */
  266         if (parent != NULL) {
  267                 newtag->lowaddr = min(parent->lowaddr, newtag->lowaddr);
  268                 newtag->highaddr = max(parent->highaddr, newtag->highaddr);
  269                 if (newtag->boundary == 0)
  270                         newtag->boundary = parent->boundary;
  271                 else if (parent->boundary != 0)
  272                         newtag->boundary = min(parent->boundary,
  273                                                newtag->boundary);
  274                 if (newtag->filter == NULL) {
  275                         /*
  276                          * Short circuit looking at our parent directly
  277                          * since we have encapsulated all of its information
  278                          */
  279                         newtag->filter = parent->filter;
  280                         newtag->filterarg = parent->filterarg;
  281                         newtag->parent = parent->parent;
  282                 }
  283                 if (newtag->parent != NULL)
  284                         atomic_add_int(&parent->ref_count, 1);
  285         }
  286 
  287         *dmat = newtag;
  288         CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d",
  289             __func__, newtag, (newtag != NULL ? newtag->flags : 0), error);
  290 
  291         return (error);
  292 }
  293 
  294 int
  295 bus_dma_tag_destroy(bus_dma_tag_t dmat)
  296 {
  297 #ifdef KTR
  298         bus_dma_tag_t dmat_copy = dmat;
  299 #endif
  300 
  301         if (dmat != NULL) {
  302                 
  303                 if (dmat->map_count != 0)
  304                         return (EBUSY);
  305                 
  306                 while (dmat != NULL) {
  307                         bus_dma_tag_t parent;
  308                         
  309                         parent = dmat->parent;
  310                         atomic_subtract_int(&dmat->ref_count, 1);
  311                         if (dmat->ref_count == 0) {
  312                                 free(dmat, M_DEVBUF);
  313                                 /*
  314                                  * Last reference count, so
  315                                  * release our reference
  316                                  * count on our parent.
  317                                  */
  318                                 dmat = parent;
  319                         } else
  320                                 dmat = NULL;
  321                 }
  322         }
  323         CTR2(KTR_BUSDMA, "%s tag %p", __func__, dmat_copy);
  324 
  325         return (0);
  326 }
  327 
  328 /*
  329  * Allocate a handle for mapping from kva/uva/physical
  330  * address space into bus device space.
  331  */
  332 int
  333 bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
  334 {
  335         bus_dmamap_t newmap;
  336 #ifdef KTR
  337         int error = 0;
  338 #endif
  339 
  340         newmap = _busdma_alloc_dmamap();
  341         if (newmap == NULL) {
  342                 CTR3(KTR_BUSDMA, "%s: tag %p error %d", __func__, dmat, ENOMEM);
  343                 return (ENOMEM);
  344         }
  345         *mapp = newmap;
  346         newmap->dmat = dmat;
  347         dmat->map_count++;
  348 
  349         CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
  350             __func__, dmat, dmat->flags, error);
  351 
  352         return (0);
  353 }
  354 
  355 /*
  356  * Destroy a handle for mapping from kva/uva/physical
  357  * address space into bus device space.
  358  */
  359 int
  360 bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
  361 {
  362 
  363         _busdma_free_dmamap(map);
  364         dmat->map_count--;
  365         CTR2(KTR_BUSDMA, "%s: tag %p error 0", __func__, dmat);
  366         return (0);
  367 }
  368 
  369 /*
  370  * Allocate a piece of memory that can be efficiently mapped into
  371  * bus device space based on the constraints lited in the dma tag.
  372  * A dmamap to for use with dmamap_load is also allocated.
  373  */
  374 int
  375 bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
  376                  bus_dmamap_t *mapp)
  377 {
  378         bus_dmamap_t newmap = NULL;
  379 
  380         int mflags;
  381 
  382         if (flags & BUS_DMA_NOWAIT)
  383                 mflags = M_NOWAIT;
  384         else
  385                 mflags = M_WAITOK;
  386         if (flags & BUS_DMA_ZERO)
  387                 mflags |= M_ZERO;
  388 
  389         newmap = _busdma_alloc_dmamap();
  390         if (newmap == NULL) {
  391                 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
  392                     __func__, dmat, dmat->flags, ENOMEM);
  393                 return (ENOMEM);
  394         }
  395         dmat->map_count++;
  396         *mapp = newmap;
  397         newmap->dmat = dmat;
  398         
  399         if (dmat->maxsize <= PAGE_SIZE) {
  400                 *vaddr = malloc(dmat->maxsize, M_DEVBUF, mflags);
  401         } else {
  402                 /*
  403                  * XXX Use Contigmalloc until it is merged into this facility
  404                  *     and handles multi-seg allocations.  Nobody is doing
  405                  *     multi-seg allocations yet though.
  406                  */
  407                 *vaddr = contigmalloc(dmat->maxsize, M_DEVBUF, mflags,
  408                     0ul, dmat->lowaddr, dmat->alignment? dmat->alignment : 1ul,
  409                     dmat->boundary);
  410         }
  411         if (*vaddr == NULL) {
  412                 if (newmap != NULL) {
  413                         _busdma_free_dmamap(newmap);
  414                         dmat->map_count--;
  415                 }
  416                 *mapp = NULL;
  417                 return (ENOMEM);
  418         }
  419         return (0);
  420 }
  421 
  422 /*
  423  * Free a piece of memory and it's allocated dmamap, that was allocated
  424  * via bus_dmamem_alloc.  Make the same choice for free/contigfree.
  425  */
  426 void
  427 bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
  428 {
  429         if (dmat->maxsize <= PAGE_SIZE)
  430                 free(vaddr, M_DEVBUF);
  431         else {
  432                 contigfree(vaddr, dmat->maxsize, M_DEVBUF);
  433         }
  434         dmat->map_count--;
  435         _busdma_free_dmamap(map);
  436         CTR3(KTR_BUSDMA, "%s: tag %p flags 0x%x", __func__, dmat, dmat->flags);
  437 }
  438 
  439 /*
  440  * Utility function to load a linear buffer.  lastaddrp holds state
  441  * between invocations (for multiple-buffer loads).  segp contains
  442  * the starting segment on entrance, and the ending segment on exit.
  443  * first indicates if this is the first invocation of this function.
  444  */
  445 static __inline int
  446 bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dma_segment_t *segs,
  447     bus_dmamap_t map, void *buf, bus_size_t buflen, struct pmap *pmap,
  448     int flags, vm_offset_t *lastaddrp, int *segp)
  449 {
  450         bus_size_t sgsize;
  451         bus_addr_t curaddr, lastaddr, baddr, bmask;
  452         vm_offset_t vaddr = (vm_offset_t)buf;
  453         int seg;
  454         int error = 0;
  455         pd_entry_t *pde;
  456         pt_entry_t pte;
  457         pt_entry_t *ptep;
  458 
  459         lastaddr = *lastaddrp;
  460         bmask = ~(dmat->boundary - 1);
  461 
  462         CTR3(KTR_BUSDMA, "lowaddr= %d boundary= %d, "
  463             "alignment= %d", dmat->lowaddr, dmat->boundary, dmat->alignment);
  464 
  465         for (seg = *segp; buflen > 0 ; ) {
  466                 /*
  467                  * Get the physical address for this segment.
  468                  *
  469                  * XXX Don't support checking for coherent mappings
  470                  * XXX in user address space.
  471                  */
  472                 if (__predict_true(pmap == pmap_kernel())) {
  473                         (void) pmap_get_pde_pte(pmap, vaddr, &pde, &ptep);
  474                         if (__predict_false(pmap_pde_section(pde))) {
  475                                 curaddr = (*pde & L1_S_FRAME) |
  476                                     (vaddr & L1_S_OFFSET);
  477                                 if (*pde & L1_S_CACHE_MASK) {
  478                                         map->flags &=
  479                                             ~DMAMAP_COHERENT;
  480                                 }
  481                         } else {
  482                                 pte = *ptep;
  483                                 KASSERT((pte & L2_TYPE_MASK) != L2_TYPE_INV,
  484                                     ("INV type"));
  485                                 if (__predict_false((pte & L2_TYPE_MASK)
  486                                                     == L2_TYPE_L)) {
  487                                         curaddr = (pte & L2_L_FRAME) |
  488                                             (vaddr & L2_L_OFFSET);
  489                                         if (pte & L2_L_CACHE_MASK) {
  490                                                 map->flags &=
  491                                                     ~DMAMAP_COHERENT;
  492                                                 
  493                                         }
  494                                 } else {
  495                                         curaddr = (pte & L2_S_FRAME) |
  496                                             (vaddr & L2_S_OFFSET);
  497                                         if (pte & L2_S_CACHE_MASK) {
  498                                                 map->flags &=
  499                                                     ~DMAMAP_COHERENT;
  500                                         }
  501                                 }
  502                         }
  503                 } else {
  504                         curaddr = pmap_extract(pmap, vaddr);
  505                         map->flags &= ~DMAMAP_COHERENT;
  506                 }
  507 
  508                 if (dmat->ranges) {
  509                         struct arm32_dma_range *dr;
  510 
  511                         dr = _bus_dma_inrange(dmat->ranges, dmat->_nranges,
  512                             curaddr);
  513                         if (dr == NULL)
  514                                 return (EINVAL);
  515                         /*
  516                          * In a valid DMA range.  Translate the physical
  517                          * memory address to an address in the DMA window.
  518                          */
  519                         curaddr = (curaddr - dr->dr_sysbase) + dr->dr_busbase;
  520                                                 
  521                 }
  522                 /*
  523                  * Compute the segment size, and adjust counts.
  524                  */
  525                 sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK);
  526                 if (buflen < sgsize)
  527                         sgsize = buflen;
  528 
  529                 /*
  530                  * Make sure we don't cross any boundaries.
  531                  */
  532                 if (dmat->boundary > 0) {
  533                         baddr = (curaddr + dmat->boundary) & bmask;
  534                         if (sgsize > (baddr - curaddr))
  535                                 sgsize = (baddr - curaddr);
  536                 }
  537 
  538                 /*
  539                  * Insert chunk into a segment, coalescing with
  540                  * the previous segment if possible.
  541                  */
  542                 if (seg >= 0 && curaddr == lastaddr &&
  543                     (segs[seg].ds_len + sgsize) <= dmat->maxsegsz &&
  544                     (dmat->boundary == 0 ||
  545                      (segs[seg].ds_addr & bmask) == 
  546                      (curaddr & bmask))) {
  547                         segs[seg].ds_len += sgsize;
  548                         goto segdone;
  549                 } else {
  550                         if (++seg >= dmat->nsegments)
  551                                 break;
  552                         segs[seg].ds_addr = curaddr;
  553                         segs[seg].ds_len = sgsize;
  554                 }
  555                 if (error)
  556                         break;
  557 segdone:
  558                 lastaddr = curaddr + sgsize;
  559                 vaddr += sgsize;
  560                 buflen -= sgsize;
  561         }
  562 
  563         *segp = seg;
  564         *lastaddrp = lastaddr;
  565 
  566         /*
  567          * Did we fit?
  568          */
  569         if (buflen != 0)
  570                 error = EFBIG; /* XXX better return value here? */
  571         return (error);
  572 }
  573 
  574 /*
  575  * Map the buffer buf into bus space using the dmamap map.
  576  */
  577 int
  578 bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
  579                 bus_size_t buflen, bus_dmamap_callback_t *callback,
  580                 void *callback_arg, int flags)
  581 {
  582         vm_offset_t     lastaddr = 0;
  583         int             error, nsegs = -1;
  584 #ifdef __CC_SUPPORTS_DYNAMIC_ARRAY_INIT
  585         bus_dma_segment_t dm_segments[dmat->nsegments];
  586 #else
  587         bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS];
  588 #endif
  589 
  590         KASSERT(dmat != NULL, ("dmatag is NULL"));
  591         KASSERT(map != NULL, ("dmamap is NULL"));
  592         map->flags &= ~DMAMAP_TYPE_MASK;
  593         map->flags |= DMAMAP_LINEAR|DMAMAP_COHERENT;
  594         map->buffer = buf;
  595         map->len = buflen;
  596         error = bus_dmamap_load_buffer(dmat,
  597             dm_segments, map, buf, buflen, kernel_pmap,
  598             flags, &lastaddr, &nsegs);
  599         if (error)
  600                 (*callback)(callback_arg, NULL, 0, error);
  601         else
  602                 (*callback)(callback_arg, dm_segments, nsegs + 1, error);
  603         
  604         CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
  605             __func__, dmat, dmat->flags, nsegs + 1, error);
  606 
  607         return (0);
  608 }
  609 
  610 /*
  611  * Like bus_dmamap_load(), but for mbufs.
  612  */
  613 int
  614 bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m0,
  615                      bus_dmamap_callback2_t *callback, void *callback_arg,
  616                      int flags)
  617 {
  618 #ifdef __CC_SUPPORTS_DYNAMIC_ARRAY_INIT
  619         bus_dma_segment_t dm_segments[dmat->nsegments];
  620 #else
  621         bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS];
  622 #endif
  623         int nsegs = -1, error = 0;
  624 
  625         M_ASSERTPKTHDR(m0);
  626 
  627         map->flags &= ~DMAMAP_TYPE_MASK;
  628         map->flags |= DMAMAP_MBUF | DMAMAP_COHERENT;
  629         map->buffer = m0;
  630         map->len = 0;
  631         if (m0->m_pkthdr.len <= dmat->maxsize) {
  632                 vm_offset_t lastaddr = 0;
  633                 struct mbuf *m;
  634 
  635                 for (m = m0; m != NULL && error == 0; m = m->m_next) {
  636                         if (m->m_len > 0) {
  637                                 error = bus_dmamap_load_buffer(dmat,
  638                                     dm_segments, map, m->m_data, m->m_len, 
  639                                     pmap_kernel(), flags, &lastaddr, &nsegs);
  640                                 map->len += m->m_len;
  641                         }
  642                 }
  643         } else {
  644                 error = EINVAL;
  645         }
  646 
  647         if (error) {
  648                 /* 
  649                  * force "no valid mappings" on error in callback.
  650                  */
  651                 (*callback)(callback_arg, dm_segments, 0, 0, error);
  652         } else {
  653                 (*callback)(callback_arg, dm_segments, nsegs + 1,
  654                     m0->m_pkthdr.len, error);
  655         }
  656         CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
  657             __func__, dmat, dmat->flags, error, nsegs + 1);
  658 
  659         return (error);
  660 }
  661 
  662 int
  663 bus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map,
  664                         struct mbuf *m0, bus_dma_segment_t *segs, int *nsegs,
  665                         int flags)
  666 {
  667         int error = 0;
  668         M_ASSERTPKTHDR(m0);
  669 
  670         flags |= BUS_DMA_NOWAIT;
  671         *nsegs = -1;
  672         map->flags &= ~DMAMAP_TYPE_MASK;
  673         map->flags |= DMAMAP_MBUF | DMAMAP_COHERENT;
  674         map->buffer = m0;                       
  675         map->len = 0;
  676         if (m0->m_pkthdr.len <= dmat->maxsize) {
  677                 vm_offset_t lastaddr = 0;
  678                 struct mbuf *m;
  679 
  680                 for (m = m0; m != NULL && error == 0; m = m->m_next) {
  681                         if (m->m_len > 0) {
  682                                 error = bus_dmamap_load_buffer(dmat, segs, map,
  683                                                 m->m_data, m->m_len,
  684                                                 pmap_kernel(), flags, &lastaddr,
  685                                                 nsegs);
  686                                 map->len += m->m_len;
  687                         }
  688                 }
  689         } else {
  690                 error = EINVAL;
  691         }
  692 
  693         /* XXX FIXME: Having to increment nsegs is really annoying */
  694         ++*nsegs;
  695         CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
  696             __func__, dmat, dmat->flags, error, *nsegs);
  697         return (error);
  698 }
  699 
  700 /*
  701  * Like bus_dmamap_load(), but for uios.
  702  */
  703 int
  704 bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, struct uio *uio,
  705     bus_dmamap_callback2_t *callback, void *callback_arg,
  706     int flags)
  707 {
  708         vm_offset_t lastaddr;
  709 #ifdef __CC_SUPPORTS_DYNAMIC_ARRAY_INIT
  710         bus_dma_segment_t dm_segments[dmat->nsegments];
  711 #else
  712         bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS];
  713 #endif
  714         int nsegs, i, error;
  715         bus_size_t resid;
  716         struct iovec *iov;
  717         struct pmap *pmap;
  718 
  719         resid = uio->uio_resid;
  720         iov = uio->uio_iov;
  721         map->flags &= ~DMAMAP_TYPE_MASK;
  722         map->flags |= DMAMAP_UIO|DMAMAP_COHERENT;
  723         map->buffer = uio;
  724         map->len = 0;
  725 
  726         if (uio->uio_segflg == UIO_USERSPACE) {
  727                 KASSERT(uio->uio_td != NULL,
  728                     ("bus_dmamap_load_uio: USERSPACE but no proc"));
  729                 pmap = vmspace_pmap(uio->uio_td->td_proc->p_vmspace);
  730         } else
  731                 pmap = kernel_pmap;
  732 
  733         error = 0;
  734         nsegs = -1;
  735         for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) {
  736                 /*
  737                  * Now at the first iovec to load.  Load each iovec
  738                  * until we have exhausted the residual count.
  739                  */
  740                 bus_size_t minlen =
  741                     resid < iov[i].iov_len ? resid : iov[i].iov_len;
  742                 caddr_t addr = (caddr_t) iov[i].iov_base;
  743 
  744                 if (minlen > 0) {
  745                         error = bus_dmamap_load_buffer(dmat, dm_segments, map,
  746                             addr, minlen, pmap, flags, &lastaddr, &nsegs);
  747 
  748                         map->len += minlen;
  749                         resid -= minlen;
  750                 }
  751         }
  752 
  753         if (error) {
  754                 /* 
  755                  * force "no valid mappings" on error in callback.
  756                  */
  757                 (*callback)(callback_arg, dm_segments, 0, 0, error);
  758         } else {
  759                 (*callback)(callback_arg, dm_segments, nsegs+1,
  760                     uio->uio_resid, error);
  761         }
  762 
  763         CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
  764             __func__, dmat, dmat->flags, error, nsegs + 1);
  765         return (error);
  766 }
  767 
  768 /*
  769  * Release the mapping held by map.
  770  */
  771 void
  772 _bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
  773 {
  774         map->flags &= ~DMAMAP_TYPE_MASK;
  775         return;
  776 }
  777 
  778 static __inline void
  779 bus_dmamap_sync_buf(void *buf, int len, bus_dmasync_op_t op)
  780 {
  781 
  782         if (op & BUS_DMASYNC_PREWRITE)
  783                 cpu_dcache_wb_range((vm_offset_t)buf, len);
  784         if (op & BUS_DMASYNC_POSTREAD) {
  785                 if ((((vm_offset_t)buf | len) & arm_dcache_align_mask) == 0)
  786                         cpu_dcache_inv_range((vm_offset_t)buf, len);
  787                 else    
  788                         cpu_dcache_wbinv_range((vm_offset_t)buf, len);
  789 
  790         }
  791 }
  792 
  793 void
  794 _bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
  795 {
  796         struct mbuf *m;
  797         struct uio *uio;
  798         int resid;
  799         struct iovec *iov;
  800         
  801         if (!(op & (BUS_DMASYNC_PREWRITE | BUS_DMASYNC_POSTREAD)))
  802                 return;
  803         if (map->flags & DMAMAP_COHERENT)
  804                 return;
  805         if ((op && BUS_DMASYNC_POSTREAD) && (map->len > PAGE_SIZE)) {
  806                 cpu_dcache_wbinv_all();
  807                 return;
  808         }
  809         CTR3(KTR_BUSDMA, "%s: op %x flags %x", __func__, op, map->flags);
  810         switch(map->flags & DMAMAP_TYPE_MASK) {
  811         case DMAMAP_LINEAR:
  812                 bus_dmamap_sync_buf(map->buffer, map->len, op);
  813                 break;
  814         case DMAMAP_MBUF:
  815                 m = map->buffer;
  816                 while (m) {
  817                         if (m->m_len > 0)
  818                                 bus_dmamap_sync_buf(m->m_data, m->m_len, op);
  819                         m = m->m_next;
  820                 }
  821                 break;
  822         case DMAMAP_UIO:
  823                 uio = map->buffer;
  824                 iov = uio->uio_iov;
  825                 resid = uio->uio_resid;
  826                 for (int i = 0; i < uio->uio_iovcnt && resid != 0; i++) {
  827                         bus_size_t minlen = resid < iov[i].iov_len ? resid :
  828                             iov[i].iov_len;
  829                         if (minlen > 0) {
  830                                 bus_dmamap_sync_buf(iov[i].iov_base, minlen, 
  831                                     op);
  832                                 resid -= minlen;
  833                         }
  834                 }
  835                 break;
  836         default:
  837                 break;
  838         }
  839         cpu_drain_writebuf();
  840 }

Cache object: 504eaf9b7503b5e9ea4539dd6b414dbc


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.