The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/sparc64/sparc64/bus_machdep.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc.
    3  * All rights reserved.
    4  *
    5  * This code is derived from software contributed to The NetBSD Foundation
    6  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
    7  * NASA Ames Research Center.
    8  *
    9  * Redistribution and use in source and binary forms, with or without
   10  * modification, are permitted provided that the following conditions
   11  * are met:
   12  * 1. Redistributions of source code must retain the above copyright
   13  *    notice, this list of conditions and the following disclaimer.
   14  * 2. Redistributions in binary form must reproduce the above copyright
   15  *    notice, this list of conditions and the following disclaimer in the
   16  *    documentation and/or other materials provided with the distribution.
   17  * 3. All advertising materials mentioning features or use of this software
   18  *    must display the following acknowledgement:
   19  *      This product includes software developed by the NetBSD
   20  *      Foundation, Inc. and its contributors.
   21  * 4. Neither the name of The NetBSD Foundation nor the names of its
   22  *    contributors may be used to endorse or promote products derived
   23  *    from this software without specific prior written permission.
   24  *
   25  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
   26  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
   27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
   28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
   29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
   30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
   31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
   32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
   33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
   34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
   35  * POSSIBILITY OF SUCH DAMAGE.
   36  */
   37 /*-
   38  * Copyright (c) 1992, 1993
   39  *      The Regents of the University of California.  All rights reserved.
   40  *
   41  * This software was developed by the Computer Systems Engineering group
   42  * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
   43  * contributed to Berkeley.
   44  *
   45  * Redistribution and use in source and binary forms, with or without
   46  * modification, are permitted provided that the following conditions
   47  * are met:
   48  * 1. Redistributions of source code must retain the above copyright
   49  *    notice, this list of conditions and the following disclaimer.
   50  * 2. Redistributions in binary form must reproduce the above copyright
   51  *    notice, this list of conditions and the following disclaimer in the
   52  *    documentation and/or other materials provided with the distribution.
   53  * 4. Neither the name of the University nor the names of its contributors
   54  *    may be used to endorse or promote products derived from this software
   55  *    without specific prior written permission.
   56  *
   57  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
   58  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   59  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   60  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   61  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   62  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   63  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   64  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   65  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   66  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   67  * SUCH DAMAGE.
   68  */
   69 /*-
   70  * Copyright (c) 1997, 1998 Justin T. Gibbs.
   71  * All rights reserved.
   72  * Copyright 2001 by Thomas Moestl <tmm@FreeBSD.org>.  All rights reserved.
   73  *
   74  * Redistribution and use in source and binary forms, with or without
   75  * modification, are permitted provided that the following conditions
   76  * are met:
   77  * 1. Redistributions of source code must retain the above copyright
   78  *    notice, this list of conditions, and the following disclaimer,
   79  *    without modification, immediately at the beginning of the file.
   80  * 2. The name of the author may not be used to endorse or promote products
   81  *    derived from this software without specific prior written permission.
   82  *
   83  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   84  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   85  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   86  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
   87  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   88  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   89  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   90  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   91  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   92  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   93  * SUCH DAMAGE.
   94  *
   95  *      from: @(#)machdep.c     8.6 (Berkeley) 1/14/94
   96  *      from: NetBSD: machdep.c,v 1.111 2001/09/15 07:13:40 eeh Exp
   97  *      and
   98  *      from: FreeBSD: src/sys/i386/i386/busdma_machdep.c,v 1.24 2001/08/15
   99  */
  100 
  101 #include <sys/cdefs.h>
  102 __FBSDID("$FreeBSD$");
  103 
  104 #include <sys/param.h>
  105 #include <sys/bus.h>
  106 #include <sys/lock.h>
  107 #include <sys/malloc.h>
  108 #include <sys/mbuf.h>
  109 #include <sys/mutex.h>
  110 #include <sys/proc.h>
  111 #include <sys/smp.h>
  112 #include <sys/systm.h>
  113 #include <sys/uio.h>
  114 
  115 #include <vm/vm.h>
  116 #include <vm/vm_extern.h>
  117 #include <vm/vm_kern.h>
  118 #include <vm/vm_page.h>
  119 #include <vm/vm_param.h>
  120 #include <vm/vm_map.h>
  121 
  122 #include <machine/asi.h>
  123 #include <machine/atomic.h>
  124 #include <machine/bus.h>
  125 #include <machine/bus_private.h>
  126 #include <machine/cache.h>
  127 #include <machine/smp.h>
  128 #include <machine/tlb.h>
  129 
  130 static void nexus_bus_barrier(bus_space_tag_t, bus_space_handle_t,
  131     bus_size_t, bus_size_t, int);
  132 
  133 /* ASI's for bus access. */
  134 int bus_type_asi[] = {
  135         ASI_PHYS_BYPASS_EC_WITH_EBIT,           /* nexus */
  136         ASI_PHYS_BYPASS_EC_WITH_EBIT,           /* SBus */
  137         ASI_PHYS_BYPASS_EC_WITH_EBIT_L,         /* PCI configuration space */
  138         ASI_PHYS_BYPASS_EC_WITH_EBIT_L,         /* PCI memory space */
  139         ASI_PHYS_BYPASS_EC_WITH_EBIT_L,         /* PCI I/O space */
  140         0
  141 };
  142 
  143 int bus_stream_asi[] = {
  144         ASI_PHYS_BYPASS_EC_WITH_EBIT,           /* nexus */
  145         ASI_PHYS_BYPASS_EC_WITH_EBIT,           /* SBus */
  146         ASI_PHYS_BYPASS_EC_WITH_EBIT,           /* PCI configuration space */
  147         ASI_PHYS_BYPASS_EC_WITH_EBIT,           /* PCI memory space */
  148         ASI_PHYS_BYPASS_EC_WITH_EBIT,           /* PCI I/O space */
  149         0
  150 };
  151 
  152 /*
  153  * Convenience function for manipulating driver locks from busdma (during
  154  * busdma_swi, for example).  Drivers that don't provide their own locks
  155  * should specify &Giant to dmat->lockfuncarg.  Drivers that use their own
  156  * non-mutex locking scheme don't have to use this at all.
  157  */
  158 void
  159 busdma_lock_mutex(void *arg, bus_dma_lock_op_t op)
  160 {
  161         struct mtx *dmtx;
  162 
  163         dmtx = (struct mtx *)arg;
  164         switch (op) {
  165         case BUS_DMA_LOCK:
  166                 mtx_lock(dmtx);
  167                 break;
  168         case BUS_DMA_UNLOCK:
  169                 mtx_unlock(dmtx);
  170                 break;
  171         default:
  172                 panic("Unknown operation 0x%x for busdma_lock_mutex!", op);
  173         }
  174 }
  175 
  176 /*
  177  * dflt_lock should never get called.  It gets put into the dma tag when
  178  * lockfunc == NULL, which is only valid if the maps that are associated
  179  * with the tag are meant to never be defered.
  180  * XXX Should have a way to identify which driver is responsible here.
  181  */
  182 static void
  183 dflt_lock(void *arg, bus_dma_lock_op_t op)
  184 {
  185 #ifdef INVARIANTS
  186         panic("driver error: busdma dflt_lock called");
  187 #else
  188         printf("DRIVER_ERROR: busdma dflt_lock called\n");
  189 #endif
  190 }
  191 
  192 /*
  193  * Allocate a device specific dma_tag.
  194  */
  195 int
  196 bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
  197     bus_size_t boundary, bus_addr_t lowaddr, bus_addr_t highaddr,
  198     bus_dma_filter_t *filter, void *filterarg, bus_size_t maxsize,
  199     int nsegments, bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc,
  200     void *lockfuncarg, bus_dma_tag_t *dmat)
  201 {
  202         bus_dma_tag_t newtag;
  203 
  204         /* Return a NULL tag on failure */
  205         *dmat = NULL;
  206 
  207         /* Enforce the usage of BUS_GET_DMA_TAG(). */
  208         if (parent == NULL)
  209                 panic("%s: parent DMA tag NULL", __func__);
  210 
  211         newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF, M_NOWAIT);
  212         if (newtag == NULL)
  213                 return (ENOMEM);
  214 
  215         /*
  216          * The method table pointer and the cookie need to be taken over from
  217          * the parent.
  218          */
  219         newtag->dt_cookie = parent->dt_cookie;
  220         newtag->dt_mt = parent->dt_mt;
  221 
  222         newtag->dt_parent = parent;
  223         newtag->dt_alignment = alignment;
  224         newtag->dt_boundary = boundary;
  225         newtag->dt_lowaddr = trunc_page((vm_offset_t)lowaddr) + (PAGE_SIZE - 1);
  226         newtag->dt_highaddr = trunc_page((vm_offset_t)highaddr) +
  227             (PAGE_SIZE - 1);
  228         newtag->dt_filter = filter;
  229         newtag->dt_filterarg = filterarg;
  230         newtag->dt_maxsize = maxsize;
  231         newtag->dt_nsegments = nsegments;
  232         newtag->dt_maxsegsz = maxsegsz;
  233         newtag->dt_flags = flags;
  234         newtag->dt_ref_count = 1; /* Count ourselves */
  235         newtag->dt_map_count = 0;
  236 
  237         if (lockfunc != NULL) {
  238                 newtag->dt_lockfunc = lockfunc;
  239                 newtag->dt_lockfuncarg = lockfuncarg;
  240         } else {
  241                 newtag->dt_lockfunc = dflt_lock;
  242                 newtag->dt_lockfuncarg = NULL;
  243         }
  244 
  245         newtag->dt_segments = NULL;
  246 
  247         /* Take into account any restrictions imposed by our parent tag */
  248         newtag->dt_lowaddr = ulmin(parent->dt_lowaddr, newtag->dt_lowaddr);
  249         newtag->dt_highaddr = ulmax(parent->dt_highaddr, newtag->dt_highaddr);
  250         if (newtag->dt_boundary == 0)
  251                 newtag->dt_boundary = parent->dt_boundary;
  252         else if (parent->dt_boundary != 0)
  253                 newtag->dt_boundary = ulmin(parent->dt_boundary,
  254                     newtag->dt_boundary);
  255         atomic_add_int(&parent->dt_ref_count, 1);
  256 
  257         if (newtag->dt_boundary > 0)
  258                 newtag->dt_maxsegsz = ulmin(newtag->dt_maxsegsz,
  259                     newtag->dt_boundary);
  260 
  261         *dmat = newtag;
  262         return (0);
  263 }
  264 
  265 int
  266 bus_dma_tag_destroy(bus_dma_tag_t dmat)
  267 {
  268         bus_dma_tag_t parent;
  269 
  270         if (dmat != NULL) {
  271                 if (dmat->dt_map_count != 0)
  272                         return (EBUSY);
  273                 while (dmat != NULL) {
  274                         parent = dmat->dt_parent;
  275                         atomic_subtract_int(&dmat->dt_ref_count, 1);
  276                         if (dmat->dt_ref_count == 0) {
  277                                 if (dmat->dt_segments != NULL)
  278                                         free(dmat->dt_segments, M_DEVBUF);
  279                                 free(dmat, M_DEVBUF);
  280                                 /*
  281                                  * Last reference count, so
  282                                  * release our reference
  283                                  * count on our parent.
  284                                  */
  285                                 dmat = parent;
  286                         } else
  287                                 dmat = NULL;
  288                 }
  289         }
  290         return (0);
  291 }
  292 
  293 /* Allocate/free a tag, and do the necessary management work. */
  294 int
  295 sparc64_dma_alloc_map(bus_dma_tag_t dmat, bus_dmamap_t *mapp)
  296 {
  297 
  298         if (dmat->dt_segments == NULL) {
  299                 dmat->dt_segments = (bus_dma_segment_t *)malloc(
  300                     sizeof(bus_dma_segment_t) * dmat->dt_nsegments, M_DEVBUF,
  301                     M_NOWAIT);
  302                 if (dmat->dt_segments == NULL)
  303                         return (ENOMEM);
  304         }
  305         *mapp = malloc(sizeof(**mapp), M_DEVBUF, M_NOWAIT | M_ZERO);
  306         if (*mapp == NULL)
  307                 return (ENOMEM);
  308 
  309         SLIST_INIT(&(*mapp)->dm_reslist);
  310         dmat->dt_map_count++;
  311         return (0);
  312 }
  313 
  314 void
  315 sparc64_dma_free_map(bus_dma_tag_t dmat, bus_dmamap_t map)
  316 {
  317 
  318         free(map, M_DEVBUF);
  319         dmat->dt_map_count--;
  320 }
  321 
  322 static int
  323 nexus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
  324 {
  325 
  326         return (sparc64_dma_alloc_map(dmat, mapp));
  327 }
  328 
  329 static int
  330 nexus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
  331 {
  332 
  333         sparc64_dma_free_map(dmat, map);
  334         return (0);
  335 }
  336 
  337 /*
  338  * Utility function to load a linear buffer.  lastaddrp holds state
  339  * between invocations (for multiple-buffer loads).  segp contains
  340  * the starting segment on entrace, and the ending segment on exit.
  341  * first indicates if this is the first invocation of this function.
  342  */
  343 static int
  344 _nexus_dmamap_load_buffer(bus_dma_tag_t dmat, void *buf, bus_size_t buflen,
  345     struct thread *td, int flags, bus_addr_t *lastaddrp,
  346     bus_dma_segment_t *segs, int *segp, int first)
  347 {
  348         bus_size_t sgsize;
  349         bus_addr_t curaddr, lastaddr, baddr, bmask;
  350         vm_offset_t vaddr = (vm_offset_t)buf;
  351         int seg;
  352         pmap_t pmap;
  353 
  354         if (td != NULL)
  355                 pmap = vmspace_pmap(td->td_proc->p_vmspace);
  356         else
  357                 pmap = NULL;
  358 
  359         lastaddr = *lastaddrp;
  360         bmask  = ~(dmat->dt_boundary - 1);
  361 
  362         for (seg = *segp; buflen > 0 ; ) {
  363                 /*
  364                  * Get the physical address for this segment.
  365                  */
  366                 if (pmap)
  367                         curaddr = pmap_extract(pmap, vaddr);
  368                 else
  369                         curaddr = pmap_kextract(vaddr);
  370 
  371                 /*
  372                  * Compute the segment size, and adjust counts.
  373                  */
  374                 sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK);
  375                 if (sgsize > dmat->dt_maxsegsz)
  376                         sgsize = dmat->dt_maxsegsz;
  377                 if (buflen < sgsize)
  378                         sgsize = buflen;
  379 
  380                 /*
  381                  * Make sure we don't cross any boundaries.
  382                  */
  383                 if (dmat->dt_boundary > 0) {
  384                         baddr = (curaddr + dmat->dt_boundary) & bmask;
  385                         if (sgsize > (baddr - curaddr))
  386                                 sgsize = (baddr - curaddr);
  387                 }
  388 
  389                 /*
  390                  * Insert chunk into a segment, coalescing with
  391                  * previous segment if possible.
  392                  */
  393                 if (first) {
  394                         segs[seg].ds_addr = curaddr;
  395                         segs[seg].ds_len = sgsize;
  396                         first = 0;
  397                 } else {
  398                         if (curaddr == lastaddr &&
  399                             (segs[seg].ds_len + sgsize) <= dmat->dt_maxsegsz &&
  400                             (dmat->dt_boundary == 0 ||
  401                             (segs[seg].ds_addr & bmask) == (curaddr & bmask)))
  402                                 segs[seg].ds_len += sgsize;
  403                         else {
  404                                 if (++seg >= dmat->dt_nsegments)
  405                                         break;
  406                                 segs[seg].ds_addr = curaddr;
  407                                 segs[seg].ds_len = sgsize;
  408                         }
  409                 }
  410 
  411                 lastaddr = curaddr + sgsize;
  412                 vaddr += sgsize;
  413                 buflen -= sgsize;
  414         }
  415 
  416         *segp = seg;
  417         *lastaddrp = lastaddr;
  418 
  419         /*
  420          * Did we fit?
  421          */
  422         return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */
  423 }
  424 
  425 /*
  426  * Common function for loading a DMA map with a linear buffer.  May
  427  * be called by bus-specific DMA map load functions.
  428  *
  429  * Most SPARCs have IOMMUs in the bus controllers.  In those cases
  430  * they only need one segment and will use virtual addresses for DVMA.
  431  * Those bus controllers should intercept these vectors and should
  432  * *NEVER* call nexus_dmamap_load() which is used only by devices that
  433  * bypass DVMA.
  434  */
  435 static int
  436 nexus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
  437     bus_size_t buflen, bus_dmamap_callback_t *callback, void *callback_arg,
  438     int flags)
  439 {
  440         bus_addr_t lastaddr;
  441         int error, nsegs;
  442 
  443         error = _nexus_dmamap_load_buffer(dmat, buf, buflen, NULL, flags,
  444             &lastaddr, dmat->dt_segments, &nsegs, 1);
  445 
  446         if (error == 0) {
  447                 (*callback)(callback_arg, dmat->dt_segments, nsegs + 1, 0);
  448                 map->dm_flags |= DMF_LOADED;
  449         } else
  450                 (*callback)(callback_arg, NULL, 0, error);
  451 
  452         return (0);
  453 }
  454 
  455 /*
  456  * Like nexus_dmamap_load(), but for mbufs.
  457  */
  458 static int
  459 nexus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m0,
  460     bus_dmamap_callback2_t *callback, void *callback_arg, int flags)
  461 {
  462         int nsegs, error;
  463 
  464         M_ASSERTPKTHDR(m0);
  465 
  466         nsegs = 0;
  467         error = 0;
  468         if (m0->m_pkthdr.len <= dmat->dt_maxsize) {
  469                 int first = 1;
  470                 bus_addr_t lastaddr = 0;
  471                 struct mbuf *m;
  472 
  473                 for (m = m0; m != NULL && error == 0; m = m->m_next) {
  474                         if (m->m_len > 0) {
  475                                 error = _nexus_dmamap_load_buffer(dmat,
  476                                     m->m_data, m->m_len,NULL, flags, &lastaddr,
  477                                     dmat->dt_segments, &nsegs, first);
  478                                 first = 0;
  479                         }
  480                 }
  481         } else {
  482                 error = EINVAL;
  483         }
  484 
  485         if (error) {
  486                 /* force "no valid mappings" in callback */
  487                 (*callback)(callback_arg, dmat->dt_segments, 0, 0, error);
  488         } else {
  489                 map->dm_flags |= DMF_LOADED;
  490                 (*callback)(callback_arg, dmat->dt_segments, nsegs + 1,
  491                     m0->m_pkthdr.len, error);
  492         }
  493         return (error);
  494 }
  495 
  496 static int
  497 nexus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m0,
  498     bus_dma_segment_t *segs, int *nsegs, int flags)
  499 {
  500         int error;
  501 
  502         M_ASSERTPKTHDR(m0);
  503 
  504         *nsegs = 0;
  505         error = 0;
  506         if (m0->m_pkthdr.len <= dmat->dt_maxsize) {
  507                 int first = 1;
  508                 bus_addr_t lastaddr = 0;
  509                 struct mbuf *m;
  510 
  511                 for (m = m0; m != NULL && error == 0; m = m->m_next) {
  512                         if (m->m_len > 0) {
  513                                 error = _nexus_dmamap_load_buffer(dmat,
  514                                     m->m_data, m->m_len,NULL, flags, &lastaddr,
  515                                     segs, nsegs, first);
  516                                 first = 0;
  517                         }
  518                 }
  519         } else {
  520                 error = EINVAL;
  521         }
  522 
  523         ++*nsegs;
  524         return (error);
  525 }
  526 
  527 /*
  528  * Like nexus_dmamap_load(), but for uios.
  529  */
  530 static int
  531 nexus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, struct uio *uio,
  532     bus_dmamap_callback2_t *callback, void *callback_arg, int flags)
  533 {
  534         bus_addr_t lastaddr;
  535         int nsegs, error, first, i;
  536         bus_size_t resid;
  537         struct iovec *iov;
  538         struct thread *td = NULL;
  539 
  540         resid = uio->uio_resid;
  541         iov = uio->uio_iov;
  542 
  543         if (uio->uio_segflg == UIO_USERSPACE) {
  544                 td = uio->uio_td;
  545                 KASSERT(td != NULL,
  546                         ("nexus_dmamap_load_uio: USERSPACE but no proc"));
  547         }
  548 
  549         nsegs = 0;
  550         error = 0;
  551         first = 1;
  552         for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) {
  553                 /*
  554                  * Now at the first iovec to load.  Load each iovec
  555                  * until we have exhausted the residual count.
  556                  */
  557                 bus_size_t minlen =
  558                         resid < iov[i].iov_len ? resid : iov[i].iov_len;
  559                 caddr_t addr = (caddr_t) iov[i].iov_base;
  560 
  561                 if (minlen > 0) {
  562                         error = _nexus_dmamap_load_buffer(dmat, addr, minlen,
  563                             td, flags, &lastaddr, dmat->dt_segments, &nsegs,
  564                             first);
  565                         first = 0;
  566 
  567                         resid -= minlen;
  568                 }
  569         }
  570 
  571         if (error) {
  572                 /* force "no valid mappings" in callback */
  573                 (*callback)(callback_arg, dmat->dt_segments, 0, 0, error);
  574         } else {
  575                 map->dm_flags |= DMF_LOADED;
  576                 (*callback)(callback_arg, dmat->dt_segments, nsegs + 1,
  577                     uio->uio_resid, error);
  578         }
  579         return (error);
  580 }
  581 
  582 /*
  583  * Common function for unloading a DMA map.  May be called by
  584  * bus-specific DMA map unload functions.
  585  */
  586 static void
  587 nexus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
  588 {
  589 
  590         map->dm_flags &= ~DMF_LOADED;
  591 }
  592 
  593 /*
  594  * Common function for DMA map synchronization.  May be called
  595  * by bus-specific DMA map synchronization functions.
  596  */
  597 static void
  598 nexus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
  599 {
  600 
  601         /*
  602          * We sync out our caches, but the bus must do the same.
  603          *
  604          * Actually a #Sync is expensive.  We should optimize.
  605          */
  606         if ((op & BUS_DMASYNC_PREREAD) || (op & BUS_DMASYNC_PREWRITE)) {
  607                 /*
  608                  * Don't really need to do anything, but flush any pending
  609                  * writes anyway.
  610                  */
  611                 membar(Sync);
  612         }
  613 #if 0
  614         /* Should not be needed. */
  615         if (op & BUS_DMASYNC_POSTREAD) {
  616                 ecache_flush((vm_offset_t)map->buf,
  617                     (vm_offset_t)map->buf + map->buflen - 1);
  618         }
  619 #endif
  620         if (op & BUS_DMASYNC_POSTWRITE) {
  621                 /* Nothing to do.  Handled by the bus controller. */
  622         }
  623 }
  624 
  625 /*
  626  * Common function for DMA-safe memory allocation.  May be called
  627  * by bus-specific DMA memory allocation functions.
  628  */
  629 static int
  630 nexus_dmamem_alloc(bus_dma_tag_t dmat, void **vaddr, int flags,
  631     bus_dmamap_t *mapp)
  632 {
  633         int mflags;
  634 
  635         if (flags & BUS_DMA_NOWAIT)
  636                 mflags = M_NOWAIT;
  637         else
  638                 mflags = M_WAITOK;
  639         if (flags & BUS_DMA_ZERO)
  640                 mflags |= M_ZERO;
  641 
  642         if ((dmat->dt_maxsize <= PAGE_SIZE)) {
  643                 *vaddr = malloc(dmat->dt_maxsize, M_DEVBUF, mflags);
  644         } else {
  645                 /*
  646                  * XXX: Use contigmalloc until it is merged into this facility
  647                  * and handles multi-seg allocations.  Nobody is doing multi-seg
  648                  * allocations yet though.
  649                  */
  650                 *vaddr = contigmalloc(dmat->dt_maxsize, M_DEVBUF, mflags,
  651                     0ul, dmat->dt_lowaddr,
  652                     dmat->dt_alignment ? dmat->dt_alignment : 1UL,
  653                     dmat->dt_boundary);
  654         }
  655         if (*vaddr == NULL)
  656                 return (ENOMEM);
  657         return (0);
  658 }
  659 
  660 /*
  661  * Common function for freeing DMA-safe memory.  May be called by
  662  * bus-specific DMA memory free functions.
  663  */
  664 static void
  665 nexus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
  666 {
  667 
  668         if ((dmat->dt_maxsize <= PAGE_SIZE))
  669                 free(vaddr, M_DEVBUF);
  670         else {
  671                 contigfree(vaddr, dmat->dt_maxsize, M_DEVBUF);
  672         }
  673 }
  674 
  675 struct bus_dma_methods nexus_dma_methods = {
  676         nexus_dmamap_create,
  677         nexus_dmamap_destroy,
  678         nexus_dmamap_load,
  679         nexus_dmamap_load_mbuf,
  680         nexus_dmamap_load_mbuf_sg,
  681         nexus_dmamap_load_uio,
  682         nexus_dmamap_unload,
  683         nexus_dmamap_sync,
  684         nexus_dmamem_alloc,
  685         nexus_dmamem_free,
  686 };
  687 
  688 struct bus_dma_tag nexus_dmatag = {
  689         NULL,
  690         NULL,
  691         1,
  692         0,
  693         ~0,
  694         ~0,
  695         NULL,           /* XXX */
  696         NULL,
  697         ~0,
  698         ~0,
  699         ~0,
  700         0,
  701         0,
  702         0,
  703         NULL,
  704         NULL,
  705         NULL,
  706         &nexus_dma_methods,
  707 };
  708 
  709 /*
  710  * Helpers to map/unmap bus memory
  711  */
  712 int
  713 sparc64_bus_mem_map(bus_space_tag_t tag, bus_space_handle_t handle,
  714     bus_size_t size, int flags, vm_offset_t vaddr, void **hp)
  715 {
  716         vm_offset_t addr;
  717         vm_offset_t sva;
  718         vm_offset_t va;
  719         vm_paddr_t pa;
  720         vm_size_t vsz;
  721         u_long pm_flags;
  722 
  723         addr = (vm_offset_t)handle;
  724         size = round_page(size);
  725         if (size == 0) {
  726                 printf("%s: zero size\n", __func__);
  727                 return (EINVAL);
  728         }
  729         switch (tag->bst_type) {
  730         case PCI_CONFIG_BUS_SPACE:
  731         case PCI_IO_BUS_SPACE:
  732         case PCI_MEMORY_BUS_SPACE:
  733                 pm_flags = TD_IE;
  734                 break;
  735         default:
  736                 pm_flags = 0;
  737                 break;
  738         }
  739 
  740         if (!(flags & BUS_SPACE_MAP_CACHEABLE))
  741                 pm_flags |= TD_E;
  742 
  743         if (vaddr != 0L)
  744                 sva = trunc_page(vaddr);
  745         else {
  746                 if ((sva = kmem_alloc_nofault(kernel_map, size)) == 0)
  747                         panic("%s: cannot allocate virtual memory", __func__);
  748         }
  749 
  750         /* Preserve page offset. */
  751         *hp = (void *)(sva | ((u_long)addr & PAGE_MASK));
  752 
  753         pa = trunc_page(addr);
  754         if ((flags & BUS_SPACE_MAP_READONLY) == 0)
  755                 pm_flags |= TD_W;
  756 
  757         va = sva;
  758         vsz = size;
  759         do {
  760                 pmap_kenter_flags(va, pa, pm_flags);
  761                 va += PAGE_SIZE;
  762                 pa += PAGE_SIZE;
  763         } while ((vsz -= PAGE_SIZE) > 0);
  764         tlb_range_demap(kernel_pmap, sva, sva + size - 1);
  765         return (0);
  766 }
  767 
  768 int
  769 sparc64_bus_mem_unmap(void *bh, bus_size_t size)
  770 {
  771         vm_offset_t sva;
  772         vm_offset_t va;
  773         vm_offset_t endva;
  774 
  775         sva = trunc_page((vm_offset_t)bh);
  776         endva = sva + round_page(size);
  777         for (va = sva; va < endva; va += PAGE_SIZE)
  778                 pmap_kremove_flags(va);
  779         tlb_range_demap(kernel_pmap, sva, sva + size - 1);
  780         kmem_free(kernel_map, sva, size);
  781         return (0);
  782 }
  783 
  784 /*
  785  * Fake up a bus tag, for use by console drivers in early boot when the regular
  786  * means to allocate resources are not yet available.
  787  * Addr is the physical address of the desired start of the handle.
  788  */
  789 bus_space_handle_t
  790 sparc64_fake_bustag(int space, bus_addr_t addr, struct bus_space_tag *ptag)
  791 {
  792 
  793         ptag->bst_cookie = NULL;
  794         ptag->bst_parent = NULL;
  795         ptag->bst_type = space;
  796         ptag->bst_bus_barrier = nexus_bus_barrier;
  797         return (addr);
  798 }
  799 
  800 /*
  801  * Base bus space handlers.
  802  */
  803 
  804 static void
  805 nexus_bus_barrier(bus_space_tag_t t, bus_space_handle_t h, bus_size_t offset,
  806     bus_size_t size, int flags)
  807 {
  808 
  809         /*
  810          * We have lots of alternatives depending on whether we're
  811          * synchronizing loads with loads, loads with stores, stores
  812          * with loads, or stores with stores.  The only ones that seem
  813          * generic are #Sync and #MemIssue.  I'll use #Sync for safety.
  814          */
  815         switch(flags) {
  816         case BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE:
  817         case BUS_SPACE_BARRIER_READ:
  818         case BUS_SPACE_BARRIER_WRITE:
  819                 membar(Sync);
  820                 break;
  821         default:
  822                 panic("%s: unknown flags", __func__);
  823         }
  824         return;
  825 }
  826 
  827 struct bus_space_tag nexus_bustag = {
  828         NULL,                           /* cookie */
  829         NULL,                           /* parent bus tag */
  830         NEXUS_BUS_SPACE,                /* type */
  831         nexus_bus_barrier,              /* bus_space_barrier */
  832 };

Cache object: e53f66e3762dd261561bf27a74f72992


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.