The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/sparc64/sparc64/bus_machdep.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc.
    3  * All rights reserved.
    4  *
    5  * This code is derived from software contributed to The NetBSD Foundation
    6  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
    7  * NASA Ames Research Center.
    8  *
    9  * Redistribution and use in source and binary forms, with or without
   10  * modification, are permitted provided that the following conditions
   11  * are met:
   12  * 1. Redistributions of source code must retain the above copyright
   13  *    notice, this list of conditions and the following disclaimer.
   14  * 2. Redistributions in binary form must reproduce the above copyright
   15  *    notice, this list of conditions and the following disclaimer in the
   16  *    documentation and/or other materials provided with the distribution.
   17  * 3. All advertising materials mentioning features or use of this software
   18  *    must display the following acknowledgement:
   19  *      This product includes software developed by the NetBSD
   20  *      Foundation, Inc. and its contributors.
   21  * 4. Neither the name of The NetBSD Foundation nor the names of its
   22  *    contributors may be used to endorse or promote products derived
   23  *    from this software without specific prior written permission.
   24  *
   25  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
   26  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
   27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
   28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
   29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
   30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
   31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
   32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
   33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
   34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
   35  * POSSIBILITY OF SUCH DAMAGE.
   36  */
   37 /*-
   38  * Copyright (c) 1992, 1993
   39  *      The Regents of the University of California.  All rights reserved.
   40  *
   41  * This software was developed by the Computer Systems Engineering group
   42  * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
   43  * contributed to Berkeley.
   44  *
   45  * Redistribution and use in source and binary forms, with or without
   46  * modification, are permitted provided that the following conditions
   47  * are met:
   48  * 1. Redistributions of source code must retain the above copyright
   49  *    notice, this list of conditions and the following disclaimer.
   50  * 2. Redistributions in binary form must reproduce the above copyright
   51  *    notice, this list of conditions and the following disclaimer in the
   52  *    documentation and/or other materials provided with the distribution.
   53  * 4. Neither the name of the University nor the names of its contributors
   54  *    may be used to endorse or promote products derived from this software
   55  *    without specific prior written permission.
   56  *
   57  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
   58  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   59  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   60  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   61  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   62  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   63  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   64  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   65  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   66  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   67  * SUCH DAMAGE.
   68  */
   69 /*-
   70  * Copyright (c) 1997, 1998 Justin T. Gibbs.
   71  * All rights reserved.
   72  * Copyright 2001 by Thomas Moestl <tmm@FreeBSD.org>.  All rights reserved.
   73  *
   74  * Redistribution and use in source and binary forms, with or without
   75  * modification, are permitted provided that the following conditions
   76  * are met:
   77  * 1. Redistributions of source code must retain the above copyright
   78  *    notice, this list of conditions, and the following disclaimer,
   79  *    without modification, immediately at the beginning of the file.
   80  * 2. The name of the author may not be used to endorse or promote products
   81  *    derived from this software without specific prior written permission.
   82  *
   83  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   84  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   85  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   86  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
   87  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   88  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   89  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   90  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   91  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   92  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   93  * SUCH DAMAGE.
   94  *
   95  *      from: @(#)machdep.c     8.6 (Berkeley) 1/14/94
   96  *      from: NetBSD: machdep.c,v 1.111 2001/09/15 07:13:40 eeh Exp
   97  *      and
   98  *      from: FreeBSD: src/sys/i386/i386/busdma_machdep.c,v 1.24 2001/08/15
   99  *
  100  * $FreeBSD: releng/6.0/sys/sparc64/sparc64/bus_machdep.c 140281 2005-01-15 09:20:47Z scottl $
  101  */
  102 
  103 #include <sys/param.h>
  104 #include <sys/bus.h>
  105 #include <sys/lock.h>
  106 #include <sys/malloc.h>
  107 #include <sys/mbuf.h>
  108 #include <sys/mutex.h>
  109 #include <sys/proc.h>
  110 #include <sys/smp.h>
  111 #include <sys/systm.h>
  112 #include <sys/uio.h>
  113 
  114 #include <vm/vm.h>
  115 #include <vm/vm_extern.h>
  116 #include <vm/vm_kern.h>
  117 #include <vm/vm_page.h>
  118 #include <vm/vm_param.h>
  119 #include <vm/vm_map.h>
  120 
  121 #include <machine/asi.h>
  122 #include <machine/atomic.h>
  123 #include <machine/bus.h>
  124 #include <machine/bus_private.h>
  125 #include <machine/cache.h>
  126 #include <machine/smp.h>
  127 #include <machine/tlb.h>
  128 
  129 static void nexus_bus_barrier(bus_space_tag_t, bus_space_handle_t,
  130     bus_size_t, bus_size_t, int);
  131 
  132 /* ASI's for bus access. */
  133 int bus_type_asi[] = {
  134         ASI_PHYS_BYPASS_EC_WITH_EBIT,           /* UPA */
  135         ASI_PHYS_BYPASS_EC_WITH_EBIT,           /* SBUS */
  136         ASI_PHYS_BYPASS_EC_WITH_EBIT_L,         /* PCI configuration space */
  137         ASI_PHYS_BYPASS_EC_WITH_EBIT_L,         /* PCI memory space */
  138         ASI_PHYS_BYPASS_EC_WITH_EBIT_L,         /* PCI I/O space */
  139         0
  140 };
  141 
  142 int bus_stream_asi[] = {
  143         ASI_PHYS_BYPASS_EC_WITH_EBIT,           /* UPA */
  144         ASI_PHYS_BYPASS_EC_WITH_EBIT,           /* SBUS */
  145         ASI_PHYS_BYPASS_EC_WITH_EBIT,           /* PCI configuration space */
  146         ASI_PHYS_BYPASS_EC_WITH_EBIT,           /* PCI memory space */
  147         ASI_PHYS_BYPASS_EC_WITH_EBIT,           /* PCI I/O space */
  148         0
  149 };
  150 
  151 /*
  152  * Convenience function for manipulating driver locks from busdma (during
  153  * busdma_swi, for example).  Drivers that don't provide their own locks
  154  * should specify &Giant to dmat->lockfuncarg.  Drivers that use their own
  155  * non-mutex locking scheme don't have to use this at all.
  156  */
  157 void
  158 busdma_lock_mutex(void *arg, bus_dma_lock_op_t op)
  159 {
  160         struct mtx *dmtx;
  161 
  162         dmtx = (struct mtx *)arg;
  163         switch (op) {
  164         case BUS_DMA_LOCK:
  165                 mtx_lock(dmtx);
  166                 break;
  167         case BUS_DMA_UNLOCK:
  168                 mtx_unlock(dmtx);
  169                 break;
  170         default:
  171                 panic("Unknown operation 0x%x for busdma_lock_mutex!", op);
  172         }
  173 }
  174 
  175 /*
  176  * dflt_lock should never get called.  It gets put into the dma tag when
  177  * lockfunc == NULL, which is only valid if the maps that are associated
  178  * with the tag are meant to never be defered.
  179  * XXX Should have a way to identify which driver is responsible here.
  180  */
  181 static void
  182 dflt_lock(void *arg, bus_dma_lock_op_t op)
  183 {
  184 #ifdef INVARIANTS
  185         panic("driver error: busdma dflt_lock called");
  186 #else
  187         printf("DRIVER_ERROR: busdma dflt_lock called\n");
  188 #endif
  189 }
  190 
  191 /*
  192  * Since there is no way for a device to obtain a dma tag from its parent
  193  * we use this kluge to handle different the different supported bus systems.
  194  * The sparc64_root_dma_tag is used as parent for tags that have none, so that
  195  * the correct methods will be used.
  196  */
  197 bus_dma_tag_t sparc64_root_dma_tag;
  198 
  199 /*
  200  * Allocate a device specific dma_tag.
  201  */
  202 int
  203 bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
  204     bus_size_t boundary, bus_addr_t lowaddr, bus_addr_t highaddr,
  205     bus_dma_filter_t *filter, void *filterarg, bus_size_t maxsize,
  206     int nsegments, bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc,
  207     void *lockfuncarg, bus_dma_tag_t *dmat)
  208 {
  209         bus_dma_tag_t impptag;
  210         bus_dma_tag_t newtag;
  211 
  212         /* Return a NULL tag on failure */
  213         *dmat = NULL;
  214 
  215         newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF, M_NOWAIT);
  216         if (newtag == NULL)
  217                 return (ENOMEM);
  218 
  219         impptag = parent != NULL ? parent : sparc64_root_dma_tag;
  220         /*
  221          * The method table pointer and the cookie need to be taken over from
  222          * the parent or the root tag.
  223          */
  224         newtag->dt_cookie = impptag->dt_cookie;
  225         newtag->dt_mt = impptag->dt_mt;
  226 
  227         newtag->dt_parent = parent;
  228         newtag->dt_alignment = alignment;
  229         newtag->dt_boundary = boundary;
  230         newtag->dt_lowaddr = trunc_page((vm_offset_t)lowaddr) + (PAGE_SIZE - 1);
  231         newtag->dt_highaddr = trunc_page((vm_offset_t)highaddr) +
  232             (PAGE_SIZE - 1);
  233         newtag->dt_filter = filter;
  234         newtag->dt_filterarg = filterarg;
  235         newtag->dt_maxsize = maxsize;
  236         newtag->dt_nsegments = nsegments;
  237         newtag->dt_maxsegsz = maxsegsz;
  238         newtag->dt_flags = flags;
  239         newtag->dt_ref_count = 1; /* Count ourselves */
  240         newtag->dt_map_count = 0;
  241 
  242         if (lockfunc != NULL) {
  243                 newtag->dt_lockfunc = lockfunc;
  244                 newtag->dt_lockfuncarg = lockfuncarg;
  245         } else {
  246                 newtag->dt_lockfunc = dflt_lock;
  247                 newtag->dt_lockfuncarg = NULL;
  248         }
  249 
  250         newtag->dt_segments = NULL;
  251 
  252         /* Take into account any restrictions imposed by our parent tag */
  253         if (parent != NULL) {
  254                 newtag->dt_lowaddr = ulmin(parent->dt_lowaddr,
  255                     newtag->dt_lowaddr);
  256                 newtag->dt_highaddr = ulmax(parent->dt_highaddr,
  257                     newtag->dt_highaddr);
  258                 if (newtag->dt_boundary == 0)
  259                         newtag->dt_boundary = parent->dt_boundary;
  260                 else if (parent->dt_boundary != 0)            
  261                         newtag->dt_boundary = ulmin(parent->dt_boundary,
  262                             newtag->dt_boundary);
  263                 atomic_add_int(&parent->dt_ref_count, 1);
  264         }
  265 
  266         if (newtag->dt_boundary > 0)
  267                 newtag->dt_maxsegsz = ulmin(newtag->dt_maxsegsz,
  268                     newtag->dt_boundary);
  269 
  270         *dmat = newtag;
  271         return (0);
  272 }
  273 
  274 int
  275 bus_dma_tag_destroy(bus_dma_tag_t dmat)
  276 {
  277         bus_dma_tag_t parent;
  278 
  279         if (dmat != NULL) {
  280                 if (dmat->dt_map_count != 0)
  281                         return (EBUSY);
  282                 while (dmat != NULL) {
  283                         parent = dmat->dt_parent;
  284                         atomic_subtract_int(&dmat->dt_ref_count, 1);
  285                         if (dmat->dt_ref_count == 0) {
  286                                 if (dmat->dt_segments != NULL)
  287                                         free(dmat->dt_segments, M_DEVBUF);
  288                                 free(dmat, M_DEVBUF);
  289                                 /*
  290                                  * Last reference count, so
  291                                  * release our reference
  292                                  * count on our parent.
  293                                  */
  294                                 dmat = parent;
  295                         } else
  296                                 dmat = NULL;
  297                 }
  298         }
  299         return (0);
  300 }
  301 
  302 /* Allocate/free a tag, and do the necessary management work. */
  303 int
  304 sparc64_dma_alloc_map(bus_dma_tag_t dmat, bus_dmamap_t *mapp)
  305 {
  306 
  307         if (dmat->dt_segments == NULL) {
  308                 dmat->dt_segments = (bus_dma_segment_t *)malloc(
  309                     sizeof(bus_dma_segment_t) * dmat->dt_nsegments, M_DEVBUF,
  310                     M_NOWAIT);
  311                 if (dmat->dt_segments == NULL)
  312                         return (ENOMEM);
  313         }
  314         *mapp = malloc(sizeof(**mapp), M_DEVBUF, M_NOWAIT | M_ZERO);
  315         if (*mapp == NULL)
  316                 return (ENOMEM);
  317 
  318         SLIST_INIT(&(*mapp)->dm_reslist);
  319         dmat->dt_map_count++;
  320         return (0);
  321 }
  322 
  323 void
  324 sparc64_dma_free_map(bus_dma_tag_t dmat, bus_dmamap_t map)
  325 {
  326 
  327         free(map, M_DEVBUF);
  328         dmat->dt_map_count--;
  329 }
  330 
  331 static int
  332 nexus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
  333 {
  334 
  335         return (sparc64_dma_alloc_map(dmat, mapp));
  336 }
  337 
  338 static int
  339 nexus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
  340 {
  341 
  342         sparc64_dma_free_map(dmat, map);
  343         return (0);
  344 }
  345 
  346 /*
  347  * Utility function to load a linear buffer.  lastaddrp holds state
  348  * between invocations (for multiple-buffer loads).  segp contains
  349  * the starting segment on entrace, and the ending segment on exit.
  350  * first indicates if this is the first invocation of this function.
  351  */
  352 static int
  353 _nexus_dmamap_load_buffer(bus_dma_tag_t dmat, void *buf, bus_size_t buflen,
  354     struct thread *td, int flags, bus_addr_t *lastaddrp,
  355     bus_dma_segment_t *segs, int *segp, int first)
  356 {
  357         bus_size_t sgsize;
  358         bus_addr_t curaddr, lastaddr, baddr, bmask;
  359         vm_offset_t vaddr = (vm_offset_t)buf;
  360         int seg;
  361         pmap_t pmap;
  362 
  363         if (td != NULL)
  364                 pmap = vmspace_pmap(td->td_proc->p_vmspace);
  365         else
  366                 pmap = NULL;
  367 
  368         lastaddr = *lastaddrp;
  369         bmask  = ~(dmat->dt_boundary - 1);
  370 
  371         for (seg = *segp; buflen > 0 ; ) {
  372                 /*
  373                  * Get the physical address for this segment.
  374                  */
  375                 if (pmap)
  376                         curaddr = pmap_extract(pmap, vaddr);
  377                 else
  378                         curaddr = pmap_kextract(vaddr);
  379 
  380                 /*
  381                  * Compute the segment size, and adjust counts.
  382                  */
  383                 sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK);
  384                 if (buflen < sgsize)
  385                         sgsize = buflen;
  386 
  387                 /*
  388                  * Make sure we don't cross any boundaries.
  389                  */
  390                 if (dmat->dt_boundary > 0) {
  391                         baddr = (curaddr + dmat->dt_boundary) & bmask;
  392                         if (sgsize > (baddr - curaddr))
  393                                 sgsize = (baddr - curaddr);
  394                 }
  395 
  396                 /*
  397                  * Insert chunk into a segment, coalescing with
  398                  * previous segment if possible.
  399                  */
  400                 if (first) {
  401                         segs[seg].ds_addr = curaddr;
  402                         segs[seg].ds_len = sgsize;
  403                         first = 0;
  404                 } else {
  405                         if (curaddr == lastaddr &&
  406                             (segs[seg].ds_len + sgsize) <= dmat->dt_maxsegsz &&
  407                             (dmat->dt_boundary == 0 ||
  408                              (segs[seg].ds_addr & bmask) == (curaddr & bmask)))
  409                                 segs[seg].ds_len += sgsize;
  410                         else {
  411                                 if (++seg >= dmat->dt_nsegments)
  412                                         break;
  413                                 segs[seg].ds_addr = curaddr;
  414                                 segs[seg].ds_len = sgsize;
  415                         }
  416                 }
  417 
  418                 lastaddr = curaddr + sgsize;
  419                 vaddr += sgsize;
  420                 buflen -= sgsize;
  421         }
  422 
  423         *segp = seg;
  424         *lastaddrp = lastaddr;
  425 
  426         /*
  427          * Did we fit?
  428          */
  429         return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */
  430 }
  431 
  432 /*
  433  * Common function for loading a DMA map with a linear buffer.  May
  434  * be called by bus-specific DMA map load functions.
  435  *
  436  * Most SPARCs have IOMMUs in the bus controllers.  In those cases
  437  * they only need one segment and will use virtual addresses for DVMA.
  438  * Those bus controllers should intercept these vectors and should
  439  * *NEVER* call nexus_dmamap_load() which is used only by devices that
  440  * bypass DVMA.
  441  */
  442 static int
  443 nexus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
  444     bus_size_t buflen, bus_dmamap_callback_t *callback, void *callback_arg,
  445     int flags)
  446 {
  447         bus_addr_t lastaddr;
  448         int error, nsegs;
  449 
  450         error = _nexus_dmamap_load_buffer(dmat, buf, buflen, NULL, flags,
  451             &lastaddr, dmat->dt_segments, &nsegs, 1);
  452 
  453         if (error == 0) {
  454                 (*callback)(callback_arg, dmat->dt_segments, nsegs + 1, 0);
  455                 map->dm_flags |= DMF_LOADED;
  456         } else
  457                 (*callback)(callback_arg, NULL, 0, error);
  458 
  459         return (0);
  460 }
  461 
  462 /*
  463  * Like nexus_dmamap_load(), but for mbufs.
  464  */
  465 static int
  466 nexus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m0,
  467     bus_dmamap_callback2_t *callback, void *callback_arg, int flags)
  468 {
  469         int nsegs, error;
  470 
  471         M_ASSERTPKTHDR(m0);
  472 
  473         nsegs = 0;
  474         error = 0;
  475         if (m0->m_pkthdr.len <= dmat->dt_maxsize) {
  476                 int first = 1;
  477                 bus_addr_t lastaddr = 0;
  478                 struct mbuf *m;
  479 
  480                 for (m = m0; m != NULL && error == 0; m = m->m_next) {
  481                         if (m->m_len > 0) {
  482                                 error = _nexus_dmamap_load_buffer(dmat,
  483                                     m->m_data, m->m_len,NULL, flags, &lastaddr,
  484                                     dmat->dt_segments, &nsegs, first);
  485                                 first = 0;
  486                         }
  487                 }
  488         } else {
  489                 error = EINVAL;
  490         }
  491 
  492         if (error) {
  493                 /* force "no valid mappings" in callback */
  494                 (*callback)(callback_arg, dmat->dt_segments, 0, 0, error);
  495         } else {
  496                 map->dm_flags |= DMF_LOADED;
  497                 (*callback)(callback_arg, dmat->dt_segments, nsegs + 1,
  498                     m0->m_pkthdr.len, error);
  499         }
  500         return (error);
  501 }
  502 
  503 static int
  504 nexus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m0,
  505     bus_dma_segment_t *segs, int *nsegs, int flags)
  506 {
  507         int error;
  508 
  509         M_ASSERTPKTHDR(m0);
  510 
  511         *nsegs = 0;
  512         error = 0;
  513         if (m0->m_pkthdr.len <= dmat->dt_maxsize) {
  514                 int first = 1;
  515                 bus_addr_t lastaddr = 0;
  516                 struct mbuf *m;
  517 
  518                 for (m = m0; m != NULL && error == 0; m = m->m_next) {
  519                         if (m->m_len > 0) {
  520                                 error = _nexus_dmamap_load_buffer(dmat,
  521                                     m->m_data, m->m_len,NULL, flags, &lastaddr,
  522                                     segs, nsegs, first);
  523                                 first = 0;
  524                         }
  525                 }
  526         } else {
  527                 error = EINVAL;
  528         }
  529 
  530         ++*nsegs;
  531         return (error);
  532 }
  533 
  534 /*
  535  * Like nexus_dmamap_load(), but for uios.
  536  */
  537 static int
  538 nexus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, struct uio *uio,
  539     bus_dmamap_callback2_t *callback, void *callback_arg, int flags)
  540 {
  541         bus_addr_t lastaddr;
  542         int nsegs, error, first, i;
  543         bus_size_t resid;
  544         struct iovec *iov;
  545         struct thread *td = NULL;
  546 
  547         resid = uio->uio_resid;
  548         iov = uio->uio_iov;
  549 
  550         if (uio->uio_segflg == UIO_USERSPACE) {
  551                 td = uio->uio_td;
  552                 KASSERT(td != NULL,
  553                         ("nexus_dmamap_load_uio: USERSPACE but no proc"));
  554         }
  555 
  556         nsegs = 0;
  557         error = 0;
  558         first = 1;
  559         for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) {
  560                 /*
  561                  * Now at the first iovec to load.  Load each iovec
  562                  * until we have exhausted the residual count.
  563                  */
  564                 bus_size_t minlen =
  565                         resid < iov[i].iov_len ? resid : iov[i].iov_len;
  566                 caddr_t addr = (caddr_t) iov[i].iov_base;
  567 
  568                 if (minlen > 0) {
  569                         error = _nexus_dmamap_load_buffer(dmat, addr, minlen,
  570                             td, flags, &lastaddr, dmat->dt_segments, &nsegs,
  571                             first);
  572                         first = 0;
  573 
  574                         resid -= minlen;
  575                 }
  576         }
  577 
  578         if (error) {
  579                 /* force "no valid mappings" in callback */
  580                 (*callback)(callback_arg, dmat->dt_segments, 0, 0, error);
  581         } else {
  582                 map->dm_flags |= DMF_LOADED;
  583                 (*callback)(callback_arg, dmat->dt_segments, nsegs + 1,
  584                     uio->uio_resid, error);
  585         }
  586         return (error);
  587 }
  588 
  589 /*
  590  * Common function for unloading a DMA map.  May be called by
  591  * bus-specific DMA map unload functions.
  592  */
  593 static void
  594 nexus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
  595 {
  596 
  597         map->dm_flags &= ~DMF_LOADED;
  598 }
  599 
  600 /*
  601  * Common function for DMA map synchronization.  May be called
  602  * by bus-specific DMA map synchronization functions.
  603  */
  604 static void
  605 nexus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
  606 {
  607 
  608         /*
  609          * We sync out our caches, but the bus must do the same.
  610          *
  611          * Actually a #Sync is expensive.  We should optimize.
  612          */
  613         if ((op & BUS_DMASYNC_PREREAD) || (op & BUS_DMASYNC_PREWRITE)) {
  614                 /* 
  615                  * Don't really need to do anything, but flush any pending
  616                  * writes anyway. 
  617                  */
  618                 membar(Sync);
  619         }
  620 #if 0
  621         /* Should not be needed. */
  622         if (op & BUS_DMASYNC_POSTREAD) {
  623                 ecache_flush((vm_offset_t)map->buf,
  624                     (vm_offset_t)map->buf + map->buflen - 1);
  625         }
  626 #endif
  627         if (op & BUS_DMASYNC_POSTWRITE) {
  628                 /* Nothing to do.  Handled by the bus controller. */
  629         }
  630 }
  631 
  632 /*
  633  * Common function for DMA-safe memory allocation.  May be called
  634  * by bus-specific DMA memory allocation functions.
  635  */
  636 static int
  637 nexus_dmamem_alloc(bus_dma_tag_t dmat, void **vaddr, int flags,
  638     bus_dmamap_t *mapp)
  639 {
  640         int mflags;
  641 
  642         if (flags & BUS_DMA_NOWAIT)
  643                 mflags = M_NOWAIT;
  644         else
  645                 mflags = M_WAITOK;
  646         if (flags & BUS_DMA_ZERO)
  647                 mflags |= M_ZERO;
  648 
  649         if ((dmat->dt_maxsize <= PAGE_SIZE)) {
  650                 *vaddr = malloc(dmat->dt_maxsize, M_DEVBUF, mflags);
  651         } else {
  652                 /*
  653                  * XXX: Use contigmalloc until it is merged into this facility
  654                  * and handles multi-seg allocations.  Nobody is doing multi-seg
  655                  * allocations yet though.
  656                  */
  657                 *vaddr = contigmalloc(dmat->dt_maxsize, M_DEVBUF, mflags,
  658                     0ul, dmat->dt_lowaddr,
  659                     dmat->dt_alignment ? dmat->dt_alignment : 1UL,
  660                     dmat->dt_boundary);
  661         }
  662         if (*vaddr == NULL)
  663                 return (ENOMEM);
  664         return (0);
  665 }
  666 
  667 /*
  668  * Common function for freeing DMA-safe memory.  May be called by
  669  * bus-specific DMA memory free functions.
  670  */
  671 static void
  672 nexus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
  673 {
  674 
  675         if ((dmat->dt_maxsize <= PAGE_SIZE))
  676                 free(vaddr, M_DEVBUF);
  677         else {
  678                 contigfree(vaddr, dmat->dt_maxsize, M_DEVBUF);
  679         }
  680 }
  681 
  682 struct bus_dma_methods nexus_dma_methods = {
  683         nexus_dmamap_create,
  684         nexus_dmamap_destroy,
  685         nexus_dmamap_load,
  686         nexus_dmamap_load_mbuf,
  687         nexus_dmamap_load_mbuf_sg,
  688         nexus_dmamap_load_uio,
  689         nexus_dmamap_unload,
  690         nexus_dmamap_sync,
  691         nexus_dmamem_alloc,
  692         nexus_dmamem_free,
  693 };
  694 
  695 struct bus_dma_tag nexus_dmatag = {
  696         NULL,
  697         NULL,
  698         8,
  699         0,
  700         0,
  701         0x3ffffffff,
  702         NULL,           /* XXX */
  703         NULL,
  704         0x3ffffffff,    /* XXX */
  705         0xff,           /* XXX */
  706         0xffffffff,     /* XXX */
  707         0,
  708         0,
  709         0,
  710         NULL,
  711         NULL,
  712         NULL,
  713         &nexus_dma_methods,
  714 };
  715 
  716 /*
  717  * Helpers to map/unmap bus memory
  718  */
  719 int
  720 sparc64_bus_mem_map(bus_space_tag_t tag, bus_space_handle_t handle,
  721     bus_size_t size, int flags, vm_offset_t vaddr, void **hp)
  722 {
  723         vm_offset_t addr;
  724         vm_offset_t sva;
  725         vm_offset_t va;
  726         vm_paddr_t pa;
  727         vm_size_t vsz;
  728         u_long pm_flags;
  729 
  730         addr = (vm_offset_t)handle;
  731         size = round_page(size);
  732         if (size == 0) {
  733                 printf("sparc64_bus_map: zero size\n");
  734                 return (EINVAL);
  735         }
  736         switch (tag->bst_type) {
  737         case PCI_CONFIG_BUS_SPACE:
  738         case PCI_IO_BUS_SPACE:
  739         case PCI_MEMORY_BUS_SPACE:
  740                 pm_flags = TD_IE;
  741                 break;
  742         default:
  743                 pm_flags = 0;
  744                 break;
  745         }
  746 
  747         if (!(flags & BUS_SPACE_MAP_CACHEABLE))
  748                 pm_flags |= TD_E;
  749 
  750         if (vaddr != 0L)
  751                 sva = trunc_page(vaddr);
  752         else {
  753                 if ((sva = kmem_alloc_nofault(kernel_map, size)) == 0)
  754                         panic("sparc64_bus_map: cannot allocate virtual "
  755                             "memory");
  756         }
  757 
  758         /* Preserve page offset. */
  759         *hp = (void *)(sva | ((u_long)addr & PAGE_MASK));
  760 
  761         pa = trunc_page(addr);
  762         if ((flags & BUS_SPACE_MAP_READONLY) == 0)
  763                 pm_flags |= TD_W;
  764 
  765         va = sva;
  766         vsz = size;
  767         do {
  768                 pmap_kenter_flags(va, pa, pm_flags);
  769                 va += PAGE_SIZE;
  770                 pa += PAGE_SIZE;
  771         } while ((vsz -= PAGE_SIZE) > 0);
  772         tlb_range_demap(kernel_pmap, sva, sva + size - 1);
  773         return (0);
  774 }
  775 
  776 int
  777 sparc64_bus_mem_unmap(void *bh, bus_size_t size)
  778 {
  779         vm_offset_t sva;
  780         vm_offset_t va;
  781         vm_offset_t endva;
  782 
  783         sva = trunc_page((vm_offset_t)bh);
  784         endva = sva + round_page(size);
  785         for (va = sva; va < endva; va += PAGE_SIZE)
  786                 pmap_kremove_flags(va);
  787         tlb_range_demap(kernel_pmap, sva, sva + size - 1);
  788         kmem_free(kernel_map, sva, size);
  789         return (0);
  790 }
  791 
  792 /*
  793  * Fake up a bus tag, for use by console drivers in early boot when the regular
  794  * means to allocate resources are not yet available.
  795  * Addr is the physical address of the desired start of the handle.
  796  */
  797 bus_space_handle_t
  798 sparc64_fake_bustag(int space, bus_addr_t addr, struct bus_space_tag *ptag)
  799 {
  800 
  801         ptag->bst_cookie = NULL;
  802         ptag->bst_parent = NULL;
  803         ptag->bst_type = space;
  804         ptag->bst_bus_barrier = nexus_bus_barrier;
  805         return (addr);
  806 }
  807 
  808 /*
  809  * Base bus space handlers.
  810  */
  811 
  812 static void
  813 nexus_bus_barrier(bus_space_tag_t t, bus_space_handle_t h, bus_size_t offset,
  814     bus_size_t size, int flags)
  815 {
  816 
  817         /* 
  818          * We have lots of alternatives depending on whether we're
  819          * synchronizing loads with loads, loads with stores, stores
  820          * with loads, or stores with stores.  The only ones that seem
  821          * generic are #Sync and #MemIssue.  I'll use #Sync for safety.
  822          */
  823         switch(flags) {
  824         case BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE:
  825         case BUS_SPACE_BARRIER_READ:
  826         case BUS_SPACE_BARRIER_WRITE:
  827                 membar(Sync);
  828                 break;
  829         default:
  830                 panic("sparc64_bus_barrier: unknown flags");
  831         }
  832         return;
  833 }
  834 
  835 struct bus_space_tag nexus_bustag = {
  836         NULL,                           /* cookie */
  837         NULL,                           /* parent bus tag */
  838         UPA_BUS_SPACE,                  /* type */
  839         nexus_bus_barrier,              /* bus_space_barrier */
  840 };

Cache object: aaa7b2468c356fb457b7676ac18881e9


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.