The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/powerpc/powerpc/busdma_machdep.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 2002 Peter Grehan
    3  * Copyright (c) 1997, 1998 Justin T. Gibbs.
    4  * All rights reserved.
    5  *
    6  * Redistribution and use in source and binary forms, with or without
    7  * modification, are permitted provided that the following conditions
    8  * are met:
    9  * 1. Redistributions of source code must retain the above copyright
   10  *    notice, this list of conditions, and the following disclaimer,
   11  *    without modification, immediately at the beginning of the file.
   12  * 2. The name of the author may not be used to endorse or promote products
   13  *    derived from this software without specific prior written permission.
   14  *
   15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   18  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
   19  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   25  * SUCH DAMAGE.
   26  *
   27  *   From i386/busdma_machdep.c,v 1.26 2002/04/19 22:58:09 alfred
   28  */
   29 
   30 #include <sys/cdefs.h>
   31 __FBSDID("$FreeBSD: releng/6.4/sys/powerpc/powerpc/busdma_machdep.c 147851 2005-07-09 06:53:52Z grehan $");
   32 
   33 /*
   34  * MacPPC bus dma support routines
   35  */
   36 
   37 #include <sys/param.h>
   38 #include <sys/systm.h>
   39 #include <sys/malloc.h>
   40 #include <sys/bus.h>
   41 #include <sys/interrupt.h>
   42 #include <sys/lock.h>
   43 #include <sys/proc.h>
   44 #include <sys/mutex.h>
   45 #include <sys/mbuf.h>
   46 #include <sys/uio.h>
   47 
   48 #include <vm/vm.h>
   49 #include <vm/vm_page.h>
   50 #include <vm/vm_map.h>
   51 
   52 #include <machine/atomic.h>
   53 #include <machine/bus.h>
   54 #include <machine/cpufunc.h>
   55 
   56 struct bus_dma_tag {
   57         bus_dma_tag_t     parent;
   58         bus_size_t        alignment;
   59         bus_size_t        boundary;
   60         bus_addr_t        lowaddr;
   61         bus_addr_t        highaddr;
   62         bus_dma_filter_t *filter;
   63         void             *filterarg;
   64         bus_size_t        maxsize;
   65         u_int             nsegments;
   66         bus_size_t        maxsegsz;
   67         int               flags;
   68         int               ref_count;
   69         int               map_count;
   70         bus_dma_lock_t   *lockfunc;
   71         void             *lockfuncarg;
   72 };
   73 
   74 struct bus_dmamap {
   75         bus_dma_tag_t          dmat;
   76         void                  *buf;             /* unmapped buffer pointer */
   77         bus_size_t             buflen;          /* unmapped buffer length */
   78         bus_dmamap_callback_t *callback;
   79         void                  *callback_arg;
   80 };
   81 
   82 /*
   83  * Convenience function for manipulating driver locks from busdma (during
   84  * busdma_swi, for example).  Drivers that don't provide their own locks
   85  * should specify &Giant to dmat->lockfuncarg.  Drivers that use their own
   86  * non-mutex locking scheme don't have to use this at all.
   87  */
   88 void
   89 busdma_lock_mutex(void *arg, bus_dma_lock_op_t op)
   90 {
   91         struct mtx *dmtx;
   92 
   93         dmtx = (struct mtx *)arg;
   94         switch (op) {
   95         case BUS_DMA_LOCK:
   96                 mtx_lock(dmtx);
   97                 break;
   98         case BUS_DMA_UNLOCK:
   99                 mtx_unlock(dmtx);
  100                 break;
  101         default:
  102                 panic("Unknown operation 0x%x for busdma_lock_mutex!", op);
  103         }
  104 }
  105 
  106 /*
  107  * dflt_lock should never get called.  It gets put into the dma tag when
  108  * lockfunc == NULL, which is only valid if the maps that are associated
  109  * with the tag are meant to never be defered.
  110  * XXX Should have a way to identify which driver is responsible here.
  111  */
  112 static void
  113 dflt_lock(void *arg, bus_dma_lock_op_t op)
  114 {
  115 #ifdef INVARIANTS
  116         panic("driver error: busdma dflt_lock called");
  117 #else
  118         printf("DRIVER_ERROR: busdma dflt_lock called\n");
  119 #endif
  120 }
  121 
  122 /*
  123  * Allocate a device specific dma_tag.
  124  */
  125 int
  126 bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
  127                    bus_size_t boundary, bus_addr_t lowaddr,
  128                    bus_addr_t highaddr, bus_dma_filter_t *filter,
  129                    void *filterarg, bus_size_t maxsize, int nsegments,
  130                    bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc,
  131                    void *lockfuncarg, bus_dma_tag_t *dmat)
  132 {
  133         bus_dma_tag_t newtag;
  134         int error = 0;
  135 
  136         /* Return a NULL tag on failure */
  137         *dmat = NULL;
  138 
  139         newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF, M_NOWAIT);
  140         if (newtag == NULL)
  141                 return (ENOMEM);
  142 
  143         newtag->parent = parent;
  144         newtag->alignment = alignment;
  145         newtag->boundary = boundary;
  146         newtag->lowaddr = trunc_page((vm_offset_t)lowaddr) + (PAGE_SIZE - 1);
  147         newtag->highaddr = trunc_page((vm_offset_t)highaddr) + (PAGE_SIZE - 1);
  148         newtag->filter = filter;
  149         newtag->filterarg = filterarg;
  150         newtag->maxsize = maxsize;
  151         newtag->nsegments = nsegments;
  152         newtag->maxsegsz = maxsegsz;
  153         newtag->flags = flags;
  154         newtag->ref_count = 1; /* Count ourself */
  155         newtag->map_count = 0;
  156         if (lockfunc != NULL) {
  157                 newtag->lockfunc = lockfunc;
  158                 newtag->lockfuncarg = lockfuncarg;
  159         } else {
  160                 newtag->lockfunc = dflt_lock;
  161                 newtag->lockfuncarg = NULL;
  162         }
  163 
  164         /*
  165          * Take into account any restrictions imposed by our parent tag
  166          */
  167         if (parent != NULL) {
  168                 newtag->lowaddr = min(parent->lowaddr, newtag->lowaddr);
  169                 newtag->highaddr = max(parent->highaddr, newtag->highaddr);
  170                 if (newtag->boundary == 0)
  171                         newtag->boundary = parent->boundary;
  172                 else if (parent->boundary != 0)
  173                         newtag->boundary = MIN(parent->boundary,
  174                                                newtag->boundary);
  175                 if (newtag->filter == NULL) {
  176                         /*
  177                          * Short circuit looking at our parent directly
  178                          * since we have encapsulated all of its information
  179                          */
  180                         newtag->filter = parent->filter;
  181                         newtag->filterarg = parent->filterarg;
  182                         newtag->parent = parent->parent;
  183                 }
  184                 if (newtag->parent != NULL)
  185                         atomic_add_int(&parent->ref_count, 1);
  186         }
  187 
  188         *dmat = newtag;
  189         return (error);
  190 }
  191 
  192 int
  193 bus_dma_tag_destroy(bus_dma_tag_t dmat)
  194 {
  195         if (dmat != NULL) {
  196                 
  197                 if (dmat->map_count != 0)
  198                         return (EBUSY);
  199                 
  200                 while (dmat != NULL) {
  201                         bus_dma_tag_t parent;
  202                         
  203                         parent = dmat->parent;
  204                         atomic_subtract_int(&dmat->ref_count, 1);
  205                         if (dmat->ref_count == 0) {
  206                                 free(dmat, M_DEVBUF);
  207                                 /*
  208                                  * Last reference count, so
  209                                  * release our reference
  210                                  * count on our parent.
  211                                  */
  212                                 dmat = parent;
  213                         } else
  214                                 dmat = NULL;
  215                 }
  216         }
  217         return (0);
  218 }
  219 
  220 /*
  221  * Allocate a handle for mapping from kva/uva/physical
  222  * address space into bus device space.
  223  */
  224 int
  225 bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
  226 {
  227         *mapp = NULL;
  228         dmat->map_count++;
  229 
  230         return (0);
  231 }
  232 
  233 /*
  234  * Destroy a handle for mapping from kva/uva/physical
  235  * address space into bus device space.
  236  */
  237 int
  238 bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
  239 {
  240         if (map != NULL) {
  241                 panic("dmamap_destroy: NULL?\n");
  242         }
  243         dmat->map_count--;
  244         return (0);
  245 }
  246 
  247 /*
  248  * Allocate a piece of memory that can be efficiently mapped into
  249  * bus device space based on the constraints lited in the dma tag.
  250  * A dmamap to for use with dmamap_load is also allocated.
  251  */
  252 int
  253 bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
  254                  bus_dmamap_t *mapp)
  255 {
  256         int mflags;
  257 
  258         if (flags & BUS_DMA_NOWAIT)
  259                 mflags = M_NOWAIT;
  260         else
  261                 mflags = M_WAITOK;
  262         if (flags & BUS_DMA_ZERO)
  263                 mflags |= M_ZERO;
  264 
  265         *mapp = NULL;
  266         
  267         if (dmat->maxsize <= PAGE_SIZE) {
  268                 *vaddr = malloc(dmat->maxsize, M_DEVBUF, mflags);
  269         } else {
  270                 /*
  271                  * XXX Use Contigmalloc until it is merged into this facility
  272                  *     and handles multi-seg allocations.  Nobody is doing
  273                  *     multi-seg allocations yet though.
  274                  */
  275                 *vaddr = contigmalloc(dmat->maxsize, M_DEVBUF, mflags,
  276                     0ul, dmat->lowaddr, dmat->alignment? dmat->alignment : 1ul,
  277                     dmat->boundary);
  278         }
  279 
  280         if (*vaddr == NULL)
  281                 return (ENOMEM);
  282 
  283         return (0);
  284 }
  285 
  286 /*
  287  * Free a piece of memory and it's allocated dmamap, that was allocated
  288  * via bus_dmamem_alloc.  Make the same choice for free/contigfree.
  289  */
  290 void
  291 bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
  292 {
  293         if (map != NULL)
  294                 panic("bus_dmamem_free: Invalid map freed\n");
  295         if (dmat->maxsize <= PAGE_SIZE)
  296                 free(vaddr, M_DEVBUF);
  297         else {
  298                 contigfree(vaddr, dmat->maxsize, M_DEVBUF);
  299         }
  300 }
  301 
  302 /*
  303  * Map the buffer buf into bus space using the dmamap map.
  304  */
  305 int
  306 bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
  307                 bus_size_t buflen, bus_dmamap_callback_t *callback,
  308                 void *callback_arg, int flags)
  309 {
  310         vm_offset_t             vaddr;
  311         vm_offset_t             paddr;
  312 #ifdef __CC_SUPPORTS_DYNAMIC_ARRAY_INIT
  313         bus_dma_segment_t       dm_segments[dmat->nsegments];
  314 #else
  315         bus_dma_segment_t       dm_segments[BUS_DMAMAP_NSEGS];
  316 #endif
  317         bus_dma_segment_t      *sg;
  318         int                     seg;
  319         int                     error = 0;
  320         vm_offset_t             nextpaddr;
  321 
  322         if (map != NULL)
  323                 panic("bus_dmamap_load: Invalid map\n");
  324 
  325         vaddr = (vm_offset_t)buf;
  326         sg = &dm_segments[0];
  327         seg = 1;
  328         sg->ds_len = 0;
  329         nextpaddr = 0;
  330 
  331         do {
  332                 bus_size_t      size;
  333 
  334                 paddr = pmap_kextract(vaddr);
  335                 size = PAGE_SIZE - (paddr & PAGE_MASK);
  336                 if (size > buflen)
  337                         size = buflen;
  338 
  339                 if (sg->ds_len == 0) {
  340                         sg->ds_addr = paddr;
  341                         sg->ds_len = size;
  342                 } else if (paddr == nextpaddr) {
  343                         sg->ds_len += size;
  344                 } else {
  345                         /* Go to the next segment */
  346                         sg++;
  347                         seg++;
  348                         if (seg > dmat->nsegments)
  349                                 break;
  350                         sg->ds_addr = paddr;
  351                         sg->ds_len = size;
  352                 }
  353                 vaddr += size;
  354                 nextpaddr = paddr + size;
  355                 buflen -= size;
  356 
  357         } while (buflen > 0);
  358         
  359         if (buflen != 0) {
  360                 printf("bus_dmamap_load: Too many segs! buf_len = 0x%lx\n",
  361                     (u_long)buflen);
  362                 error = EFBIG;
  363         }
  364 
  365         (*callback)(callback_arg, dm_segments, seg, error);
  366         
  367         return (0);
  368 }
  369 
  370 /*
  371  * Utility function to load a linear buffer.  lastaddrp holds state
  372  * between invocations (for multiple-buffer loads).  segp contains
  373  * the starting segment on entrance, and the ending segment on exit.
  374  * first indicates if this is the first invocation of this function.
  375  */
  376 static int
  377 bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dma_segment_t segs[],
  378     void *buf, bus_size_t buflen, struct thread *td,
  379     int flags, vm_offset_t *lastaddrp, int *segp,
  380     int first)
  381 {
  382         bus_size_t sgsize;
  383         bus_addr_t curaddr, lastaddr, baddr, bmask;
  384         vm_offset_t vaddr = (vm_offset_t)buf;
  385         int seg;
  386         pmap_t pmap;
  387 
  388         if (td != NULL)
  389                 pmap = vmspace_pmap(td->td_proc->p_vmspace);
  390         else
  391                 pmap = NULL;
  392 
  393         lastaddr = *lastaddrp;
  394         bmask = ~(dmat->boundary - 1);
  395 
  396         for (seg = *segp; buflen > 0 ; ) {
  397                 /*
  398                  * Get the physical address for this segment.
  399                  */
  400                 if (pmap)
  401                         curaddr = pmap_extract(pmap, vaddr);
  402                 else
  403                         curaddr = pmap_kextract(vaddr);
  404 
  405                 /*
  406                  * Compute the segment size, and adjust counts.
  407                  */
  408                 sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK);
  409                 if (buflen < sgsize)
  410                         sgsize = buflen;
  411 
  412                 /*
  413                  * Make sure we don't cross any boundaries.
  414                  */
  415                 if (dmat->boundary > 0) {
  416                         baddr = (curaddr + dmat->boundary) & bmask;
  417                         if (sgsize > (baddr - curaddr))
  418                                 sgsize = (baddr - curaddr);
  419                 }
  420 
  421                 /*
  422                  * Insert chunk into a segment, coalescing with
  423                  * the previous segment if possible.
  424                  */
  425                 if (first) {
  426                         segs[seg].ds_addr = curaddr;
  427                         segs[seg].ds_len = sgsize;
  428                         first = 0;
  429                 } else {
  430                         if (curaddr == lastaddr &&
  431                             (segs[seg].ds_len + sgsize) <= dmat->maxsegsz &&
  432                             (dmat->boundary == 0 ||
  433                              (segs[seg].ds_addr & bmask) == (curaddr & bmask)))
  434                                 segs[seg].ds_len += sgsize;
  435                         else {
  436                                 if (++seg >= dmat->nsegments)
  437                                         break;
  438                                 segs[seg].ds_addr = curaddr;
  439                                 segs[seg].ds_len = sgsize;
  440                         }
  441                 }
  442 
  443                 lastaddr = curaddr + sgsize;
  444                 vaddr += sgsize;
  445                 buflen -= sgsize;
  446         }
  447 
  448         *segp = seg;
  449         *lastaddrp = lastaddr;
  450 
  451         /*
  452          * Did we fit?
  453          */
  454         return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */
  455 }
  456 
  457 /*
  458  * Like bus_dmamap_load(), but for mbufs.
  459  */
  460 int
  461 bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m0,
  462                      bus_dmamap_callback2_t *callback, void *callback_arg,
  463                      int flags)
  464 {
  465 #ifdef __CC_SUPPORTS_DYNAMIC_ARRAY_INIT
  466         bus_dma_segment_t dm_segments[dmat->nsegments];
  467 #else
  468         bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS];
  469 #endif
  470         int nsegs = 0, error = 0;
  471 
  472         M_ASSERTPKTHDR(m0);
  473 
  474         if (m0->m_pkthdr.len <= dmat->maxsize) {
  475                 int first = 1;
  476                 vm_offset_t lastaddr = 0;
  477                 struct mbuf *m;
  478 
  479                 for (m = m0; m != NULL && error == 0; m = m->m_next) {
  480                         if (m->m_len > 0) {
  481                                 error = bus_dmamap_load_buffer(dmat,
  482                                     dm_segments, m->m_data, m->m_len, NULL,
  483                                     flags, &lastaddr, &nsegs, first);
  484                                 first = 0;
  485                         }
  486                 }
  487         } else {
  488                 error = EINVAL;
  489         }
  490 
  491         if (error) {
  492                 /* 
  493                  * force "no valid mappings" on error in callback.
  494                  */
  495                 (*callback)(callback_arg, dm_segments, 0, 0, error);
  496         } else {
  497                 (*callback)(callback_arg, dm_segments, nsegs+1,
  498                     m0->m_pkthdr.len, error);
  499         }
  500         return (error);
  501 }
  502 
  503 int
  504 bus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m0,
  505                         bus_dma_segment_t *segs, int *nsegs, int flags)
  506 {
  507         int error = 0;
  508 
  509         M_ASSERTPKTHDR(m0);
  510 
  511         *nsegs = 0;
  512 
  513         if (m0->m_pkthdr.len <= dmat->maxsize) {
  514                 int first = 1;
  515                 vm_offset_t lastaddr = 0;
  516                 struct mbuf *m;
  517 
  518                 for (m = m0; m != NULL && error == 0; m = m->m_next) {
  519                         if (m->m_len > 0) {
  520                                 error = bus_dmamap_load_buffer(dmat,
  521                                     segs, m->m_data, m->m_len, NULL,
  522                                     flags, &lastaddr, nsegs, first);
  523                                 first = 0;
  524                         }
  525                 }
  526                 ++*nsegs;
  527         } else {
  528                 error = EINVAL;
  529         }
  530 
  531         return (error);
  532 }
  533 
  534 /*
  535  * Like bus_dmamap_load(), but for uios.
  536  */
  537 int
  538 bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, struct uio *uio,
  539     bus_dmamap_callback2_t *callback, void *callback_arg,
  540     int flags)
  541 {
  542         vm_offset_t lastaddr;
  543 #ifdef __CC_SUPPORTS_DYNAMIC_ARRAY_INIT
  544         bus_dma_segment_t dm_segments[dmat->nsegments];
  545 #else
  546         bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS];
  547 #endif
  548         int nsegs, i, error, first;
  549         bus_size_t resid;
  550         struct iovec *iov;
  551         struct thread *td = NULL;
  552 
  553         resid = uio->uio_resid;
  554         iov = uio->uio_iov;
  555 
  556         if (uio->uio_segflg == UIO_USERSPACE) {
  557                 td = uio->uio_td;
  558                 KASSERT(td != NULL,
  559                     ("bus_dmamap_load_uio: USERSPACE but no proc"));
  560         }
  561 
  562         first = 1;
  563         nsegs = error = 0;
  564         for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) {
  565                 /*
  566                  * Now at the first iovec to load.  Load each iovec
  567                  * until we have exhausted the residual count.
  568                  */
  569                 bus_size_t minlen =
  570                     resid < iov[i].iov_len ? resid : iov[i].iov_len;
  571                 caddr_t addr = (caddr_t) iov[i].iov_base;
  572 
  573                 if (minlen > 0) {
  574                         error = bus_dmamap_load_buffer(dmat, dm_segments, addr,
  575                             minlen, td, flags, &lastaddr, &nsegs, first);
  576 
  577                         first = 0;
  578 
  579                         resid -= minlen;
  580                 }
  581         }
  582 
  583         if (error) {
  584                 /* 
  585                  * force "no valid mappings" on error in callback.
  586                  */
  587                 (*callback)(callback_arg, dm_segments, 0, 0, error);
  588         } else {
  589                 (*callback)(callback_arg, dm_segments, nsegs+1,
  590                     uio->uio_resid, error);
  591         }
  592 
  593         return (error);
  594 }
  595 
  596 /*
  597  * Release the mapping held by map. A no-op on PowerPC.
  598  */
  599 void
  600 _bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
  601 {
  602 
  603         return;
  604 }
  605 
  606 void
  607 _bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
  608 {
  609 
  610         return;
  611 }

Cache object: b4ac8032b3cc59513461a397292a2cd3


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.