The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/xdma/xdma_sg.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * SPDX-License-Identifier: BSD-2-Clause
    3  *
    4  * Copyright (c) 2018-2019 Ruslan Bukin <br@bsdpad.com>
    5  *
    6  * This software was developed by SRI International and the University of
    7  * Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237
    8  * ("CTSRD"), as part of the DARPA CRASH research programme.
    9  *
   10  * Redistribution and use in source and binary forms, with or without
   11  * modification, are permitted provided that the following conditions
   12  * are met:
   13  * 1. Redistributions of source code must retain the above copyright
   14  *    notice, this list of conditions and the following disclaimer.
   15  * 2. Redistributions in binary form must reproduce the above copyright
   16  *    notice, this list of conditions and the following disclaimer in the
   17  *    documentation and/or other materials provided with the distribution.
   18  *
   19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   29  * SUCH DAMAGE.
   30  */
   31 
   32 #include <sys/cdefs.h>
   33 __FBSDID("$FreeBSD$");
   34 
   35 #include "opt_platform.h"
   36 #include <sys/param.h>
   37 #include <sys/conf.h>
   38 #include <sys/bus.h>
   39 #include <sys/kernel.h>
   40 #include <sys/lock.h>
   41 #include <sys/malloc.h>
   42 #include <sys/mbuf.h>
   43 #include <sys/mutex.h>
   44 #include <sys/rwlock.h>
   45 
   46 #include <machine/bus.h>
   47 
   48 #include <vm/vm.h>
   49 #include <vm/pmap.h>
   50 #include <vm/vm_extern.h>
   51 #include <vm/vm_page.h>
   52 
   53 #ifdef FDT
   54 #include <dev/fdt/fdt_common.h>
   55 #include <dev/ofw/ofw_bus.h>
   56 #include <dev/ofw/ofw_bus_subr.h>
   57 #endif
   58 
   59 #include <dev/xdma/xdma.h>
   60 
   61 #include <xdma_if.h>
   62 
   63 struct seg_load_request {
   64         struct bus_dma_segment *seg;
   65         uint32_t nsegs;
   66         uint32_t error;
   67 };
   68 
   69 static void
   70 xchan_bufs_free_reserved(xdma_channel_t *xchan)
   71 {
   72         struct xdma_request *xr;
   73         vm_size_t size;
   74         int i;
   75 
   76         for (i = 0; i < xchan->xr_num; i++) {
   77                 xr = &xchan->xr_mem[i];
   78                 size = xr->buf.size;
   79                 if (xr->buf.vaddr) {
   80                         pmap_kremove_device(xr->buf.vaddr, size);
   81                         kva_free(xr->buf.vaddr, size);
   82                         xr->buf.vaddr = 0;
   83                 }
   84                 if (xr->buf.paddr) {
   85                         vmem_free(xchan->vmem, xr->buf.paddr, size);
   86                         xr->buf.paddr = 0;
   87                 }
   88                 xr->buf.size = 0;
   89         }
   90 }
   91 
   92 static int
   93 xchan_bufs_alloc_reserved(xdma_channel_t *xchan)
   94 {
   95         xdma_controller_t *xdma;
   96         struct xdma_request *xr;
   97         vmem_addr_t addr;
   98         vm_size_t size;
   99         int i;
  100 
  101         xdma = xchan->xdma;
  102 
  103         if (xchan->vmem == NULL)
  104                 return (ENOBUFS);
  105 
  106         for (i = 0; i < xchan->xr_num; i++) {
  107                 xr = &xchan->xr_mem[i];
  108                 size = round_page(xchan->maxsegsize);
  109                 if (vmem_alloc(xchan->vmem, size,
  110                     M_BESTFIT | M_NOWAIT, &addr)) {
  111                         device_printf(xdma->dev,
  112                             "%s: Can't allocate memory\n", __func__);
  113                         xchan_bufs_free_reserved(xchan);
  114                         return (ENOMEM);
  115                 }
  116                 
  117                 xr->buf.size = size;
  118                 xr->buf.paddr = addr;
  119                 xr->buf.vaddr = kva_alloc(size);
  120                 if (xr->buf.vaddr == 0) {
  121                         device_printf(xdma->dev,
  122                             "%s: Can't allocate KVA\n", __func__);
  123                         xchan_bufs_free_reserved(xchan);
  124                         return (ENOMEM);
  125                 }
  126                 pmap_kenter_device(xr->buf.vaddr, size, addr);
  127         }
  128 
  129         return (0);
  130 }
  131 
  132 static int
  133 xchan_bufs_alloc_busdma(xdma_channel_t *xchan)
  134 {
  135         xdma_controller_t *xdma;
  136         struct xdma_request *xr;
  137         int err;
  138         int i;
  139 
  140         xdma = xchan->xdma;
  141 
  142         /* Create bus_dma tag */
  143         err = bus_dma_tag_create(
  144             bus_get_dma_tag(xdma->dev), /* Parent tag. */
  145             xchan->alignment,           /* alignment */
  146             xchan->boundary,            /* boundary */
  147             xchan->lowaddr,             /* lowaddr */
  148             xchan->highaddr,            /* highaddr */
  149             NULL, NULL,                 /* filter, filterarg */
  150             xchan->maxsegsize * xchan->maxnsegs, /* maxsize */
  151             xchan->maxnsegs,            /* nsegments */
  152             xchan->maxsegsize,          /* maxsegsize */
  153             0,                          /* flags */
  154             NULL, NULL,                 /* lockfunc, lockarg */
  155             &xchan->dma_tag_bufs);
  156         if (err != 0) {
  157                 device_printf(xdma->dev,
  158                     "%s: Can't create bus_dma tag.\n", __func__);
  159                 return (-1);
  160         }
  161 
  162         for (i = 0; i < xchan->xr_num; i++) {
  163                 xr = &xchan->xr_mem[i];
  164                 err = bus_dmamap_create(xchan->dma_tag_bufs, 0,
  165                     &xr->buf.map);
  166                 if (err != 0) {
  167                         device_printf(xdma->dev,
  168                             "%s: Can't create buf DMA map.\n", __func__);
  169 
  170                         /* Cleanup. */
  171                         bus_dma_tag_destroy(xchan->dma_tag_bufs);
  172 
  173                         return (-1);
  174                 }
  175         }
  176 
  177         return (0);
  178 }
  179 
  180 static int
  181 xchan_bufs_alloc(xdma_channel_t *xchan)
  182 {
  183         xdma_controller_t *xdma;
  184         int ret;
  185 
  186         xdma = xchan->xdma;
  187 
  188         if (xdma == NULL) {
  189                 printf("%s: Channel was not allocated properly.\n", __func__);
  190                 return (-1);
  191         }
  192 
  193         if (xchan->caps & XCHAN_CAP_BUSDMA)
  194                 ret = xchan_bufs_alloc_busdma(xchan);
  195         else {
  196                 ret = xchan_bufs_alloc_reserved(xchan);
  197         }
  198         if (ret != 0) {
  199                 device_printf(xdma->dev,
  200                     "%s: Can't allocate bufs.\n", __func__);
  201                 return (-1);
  202         }
  203 
  204         xchan->flags |= XCHAN_BUFS_ALLOCATED;
  205 
  206         return (0);
  207 }
  208 
  209 static int
  210 xchan_bufs_free(xdma_channel_t *xchan)
  211 {
  212         struct xdma_request *xr;
  213         struct xchan_buf *b;
  214         int i;
  215 
  216         if ((xchan->flags & XCHAN_BUFS_ALLOCATED) == 0)
  217                 return (-1);
  218 
  219         if (xchan->caps & XCHAN_CAP_BUSDMA) {
  220                 for (i = 0; i < xchan->xr_num; i++) {
  221                         xr = &xchan->xr_mem[i];
  222                         b = &xr->buf;
  223                         bus_dmamap_destroy(xchan->dma_tag_bufs, b->map);
  224                 }
  225                 bus_dma_tag_destroy(xchan->dma_tag_bufs);
  226         } else
  227                 xchan_bufs_free_reserved(xchan);
  228 
  229         xchan->flags &= ~XCHAN_BUFS_ALLOCATED;
  230 
  231         return (0);
  232 }
  233 
  234 void
  235 xdma_channel_free_sg(xdma_channel_t *xchan)
  236 {
  237 
  238         xchan_bufs_free(xchan);
  239         xchan_sglist_free(xchan);
  240         xchan_bank_free(xchan);
  241 }
  242 
  243 /*
  244  * Prepare xchan for a scatter-gather transfer.
  245  * xr_num - xdma requests queue size,
  246  * maxsegsize - maximum allowed scatter-gather list element size in bytes
  247  */
  248 int
  249 xdma_prep_sg(xdma_channel_t *xchan, uint32_t xr_num,
  250     bus_size_t maxsegsize, bus_size_t maxnsegs,
  251     bus_size_t alignment, bus_addr_t boundary,
  252     bus_addr_t lowaddr, bus_addr_t highaddr)
  253 {
  254         xdma_controller_t *xdma;
  255         int ret;
  256 
  257         xdma = xchan->xdma;
  258 
  259         KASSERT(xdma != NULL, ("xdma is NULL"));
  260 
  261         if (xchan->flags & XCHAN_CONFIGURED) {
  262                 device_printf(xdma->dev,
  263                     "%s: Channel is already configured.\n", __func__);
  264                 return (-1);
  265         }
  266 
  267         xchan->xr_num = xr_num;
  268         xchan->maxsegsize = maxsegsize;
  269         xchan->maxnsegs = maxnsegs;
  270         xchan->alignment = alignment;
  271         xchan->boundary = boundary;
  272         xchan->lowaddr = lowaddr;
  273         xchan->highaddr = highaddr;
  274 
  275         if (xchan->maxnsegs > XDMA_MAX_SEG) {
  276                 device_printf(xdma->dev, "%s: maxnsegs is too big\n",
  277                     __func__);
  278                 return (-1);
  279         }
  280 
  281         xchan_bank_init(xchan);
  282 
  283         /* Allocate sglist. */
  284         ret = xchan_sglist_alloc(xchan);
  285         if (ret != 0) {
  286                 device_printf(xdma->dev,
  287                     "%s: Can't allocate sglist.\n", __func__);
  288                 return (-1);
  289         }
  290 
  291         /* Allocate buffers if required. */
  292         if (xchan->caps & (XCHAN_CAP_BUSDMA | XCHAN_CAP_BOUNCE)) {
  293                 ret = xchan_bufs_alloc(xchan);
  294                 if (ret != 0) {
  295                         device_printf(xdma->dev,
  296                             "%s: Can't allocate bufs.\n", __func__);
  297 
  298                         /* Cleanup */
  299                         xchan_sglist_free(xchan);
  300                         xchan_bank_free(xchan);
  301 
  302                         return (-1);
  303                 }
  304         }
  305 
  306         xchan->flags |= (XCHAN_CONFIGURED | XCHAN_TYPE_SG);
  307 
  308         XCHAN_LOCK(xchan);
  309         ret = XDMA_CHANNEL_PREP_SG(xdma->dma_dev, xchan);
  310         if (ret != 0) {
  311                 device_printf(xdma->dev,
  312                     "%s: Can't prepare SG transfer.\n", __func__);
  313                 XCHAN_UNLOCK(xchan);
  314 
  315                 return (-1);
  316         }
  317         XCHAN_UNLOCK(xchan);
  318 
  319         return (0);
  320 }
  321 
  322 void
  323 xchan_seg_done(xdma_channel_t *xchan,
  324     struct xdma_transfer_status *st)
  325 {
  326         struct xdma_request *xr;
  327         struct xchan_buf *b;
  328         bus_addr_t addr;
  329 
  330         xr = TAILQ_FIRST(&xchan->processing);
  331         if (xr == NULL)
  332                 panic("request not found\n");
  333 
  334         b = &xr->buf;
  335 
  336         atomic_subtract_int(&b->nsegs_left, 1);
  337 
  338         if (b->nsegs_left == 0) {
  339                 if (xchan->caps & XCHAN_CAP_BUSDMA) {
  340                         if (xr->direction == XDMA_MEM_TO_DEV)
  341                                 bus_dmamap_sync(xchan->dma_tag_bufs, b->map, 
  342                                     BUS_DMASYNC_POSTWRITE);
  343                         else
  344                                 bus_dmamap_sync(xchan->dma_tag_bufs, b->map, 
  345                                     BUS_DMASYNC_POSTREAD);
  346                         bus_dmamap_unload(xchan->dma_tag_bufs, b->map);
  347                 } else if (xchan->caps & XCHAN_CAP_BOUNCE) {
  348                         if (xr->req_type == XR_TYPE_MBUF &&
  349                             xr->direction == XDMA_DEV_TO_MEM)
  350                                 m_copyback(xr->m, 0, st->transferred,
  351                                     (void *)xr->buf.vaddr);
  352                 } else if (xchan->caps & XCHAN_CAP_IOMMU) {
  353                         if (xr->direction == XDMA_MEM_TO_DEV)
  354                                 addr = xr->src_addr;
  355                         else
  356                                 addr = xr->dst_addr;
  357                         xdma_iommu_remove_entry(xchan, addr);
  358                 }
  359                 xr->status.error = st->error;
  360                 xr->status.transferred = st->transferred;
  361 
  362                 QUEUE_PROC_LOCK(xchan);
  363                 TAILQ_REMOVE(&xchan->processing, xr, xr_next);
  364                 QUEUE_PROC_UNLOCK(xchan);
  365 
  366                 QUEUE_OUT_LOCK(xchan);
  367                 TAILQ_INSERT_TAIL(&xchan->queue_out, xr, xr_next);
  368                 QUEUE_OUT_UNLOCK(xchan);
  369         }
  370 }
  371 
  372 static void
  373 xdma_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
  374 {
  375         struct seg_load_request *slr;
  376         struct bus_dma_segment *seg;
  377         int i;
  378 
  379         slr = arg;
  380         seg = slr->seg;
  381 
  382         if (error != 0) {
  383                 slr->error = error;
  384                 return;
  385         }
  386 
  387         slr->nsegs = nsegs;
  388 
  389         for (i = 0; i < nsegs; i++) {
  390                 seg[i].ds_addr = segs[i].ds_addr;
  391                 seg[i].ds_len = segs[i].ds_len;
  392         }
  393 }
  394 
  395 static int
  396 _xdma_load_data_busdma(xdma_channel_t *xchan, struct xdma_request *xr,
  397     struct bus_dma_segment *seg)
  398 {
  399         xdma_controller_t *xdma;
  400         struct seg_load_request slr;
  401         uint32_t nsegs;
  402         void *addr;
  403         int error;
  404 
  405         xdma = xchan->xdma;
  406 
  407         error = 0;
  408         nsegs = 0;
  409 
  410         switch (xr->req_type) {
  411         case XR_TYPE_MBUF:
  412                 error = bus_dmamap_load_mbuf_sg(xchan->dma_tag_bufs,
  413                     xr->buf.map, xr->m, seg, &nsegs, BUS_DMA_NOWAIT);
  414                 break;
  415         case XR_TYPE_BIO:
  416                 slr.nsegs = 0;
  417                 slr.error = 0;
  418                 slr.seg = seg;
  419                 error = bus_dmamap_load_bio(xchan->dma_tag_bufs,
  420                     xr->buf.map, xr->bp, xdma_dmamap_cb, &slr, BUS_DMA_NOWAIT);
  421                 if (slr.error != 0) {
  422                         device_printf(xdma->dma_dev,
  423                             "%s: bus_dmamap_load failed, err %d\n",
  424                             __func__, slr.error);
  425                         return (0);
  426                 }
  427                 nsegs = slr.nsegs;
  428                 break;
  429         case XR_TYPE_VIRT:
  430                 switch (xr->direction) {
  431                 case XDMA_MEM_TO_DEV:
  432                         addr = (void *)xr->src_addr;
  433                         break;
  434                 case XDMA_DEV_TO_MEM:
  435                         addr = (void *)xr->dst_addr;
  436                         break;
  437                 default:
  438                         device_printf(xdma->dma_dev,
  439                             "%s: Direction is not supported\n", __func__);
  440                         return (0);
  441                 }
  442                 slr.nsegs = 0;
  443                 slr.error = 0;
  444                 slr.seg = seg;
  445                 error = bus_dmamap_load(xchan->dma_tag_bufs, xr->buf.map,
  446                     addr, (xr->block_len * xr->block_num),
  447                     xdma_dmamap_cb, &slr, BUS_DMA_NOWAIT);
  448                 if (slr.error != 0) {
  449                         device_printf(xdma->dma_dev,
  450                             "%s: bus_dmamap_load failed, err %d\n",
  451                             __func__, slr.error);
  452                         return (0);
  453                 }
  454                 nsegs = slr.nsegs;
  455                 break;
  456         default:
  457                 break;
  458         }
  459 
  460         if (error != 0) {
  461                 if (error == ENOMEM) {
  462                         /*
  463                          * Out of memory. Try again later.
  464                          * TODO: count errors.
  465                          */
  466                 } else
  467                         device_printf(xdma->dma_dev,
  468                             "%s: bus_dmamap_load failed with err %d\n",
  469                             __func__, error);
  470                 return (0);
  471         }
  472 
  473         if (xr->direction == XDMA_MEM_TO_DEV)
  474                 bus_dmamap_sync(xchan->dma_tag_bufs, xr->buf.map,
  475                     BUS_DMASYNC_PREWRITE);
  476         else
  477                 bus_dmamap_sync(xchan->dma_tag_bufs, xr->buf.map,
  478                     BUS_DMASYNC_PREREAD);
  479 
  480         return (nsegs);
  481 }
  482 
  483 static int
  484 _xdma_load_data(xdma_channel_t *xchan, struct xdma_request *xr,
  485     struct bus_dma_segment *seg)
  486 {
  487         struct mbuf *m;
  488         uint32_t nsegs;
  489         vm_offset_t va, addr;
  490         bus_addr_t pa;
  491         vm_prot_t prot;
  492 
  493         m = xr->m;
  494 
  495         KASSERT(xchan->caps & (XCHAN_CAP_NOSEG | XCHAN_CAP_BOUNCE),
  496             ("Handling segmented data is not implemented here."));
  497 
  498         nsegs = 1;
  499 
  500         switch (xr->req_type) {
  501         case XR_TYPE_MBUF:
  502                 if (xchan->caps & XCHAN_CAP_BOUNCE) {
  503                         if (xr->direction == XDMA_MEM_TO_DEV)
  504                                 m_copydata(m, 0, m->m_pkthdr.len,
  505                                     (void *)xr->buf.vaddr);
  506                         seg[0].ds_addr = (bus_addr_t)xr->buf.paddr;
  507                 } else if (xchan->caps & XCHAN_CAP_IOMMU) {
  508                         addr = mtod(m, bus_addr_t);
  509                         pa = vtophys(addr);
  510 
  511                         if (xr->direction == XDMA_MEM_TO_DEV)
  512                                 prot = VM_PROT_READ;
  513                         else
  514                                 prot = VM_PROT_WRITE;
  515 
  516                         xdma_iommu_add_entry(xchan, &va,
  517                             pa, m->m_pkthdr.len, prot);
  518 
  519                         /*
  520                          * Save VA so we can unload data later
  521                          * after completion of this transfer.
  522                          */
  523                         if (xr->direction == XDMA_MEM_TO_DEV)
  524                                 xr->src_addr = va;
  525                         else
  526                                 xr->dst_addr = va;
  527                         seg[0].ds_addr = va;
  528                 } else
  529                         seg[0].ds_addr = mtod(m, bus_addr_t);
  530                 seg[0].ds_len = m->m_pkthdr.len;
  531                 break;
  532         case XR_TYPE_BIO:
  533         case XR_TYPE_VIRT:
  534         default:
  535                 panic("implement me\n");
  536         }
  537 
  538         return (nsegs);
  539 }
  540 
  541 static int
  542 xdma_load_data(xdma_channel_t *xchan,
  543     struct xdma_request *xr, struct bus_dma_segment *seg)
  544 {
  545         int nsegs;
  546 
  547         nsegs = 0;
  548 
  549         if (xchan->caps & XCHAN_CAP_BUSDMA)
  550                 nsegs = _xdma_load_data_busdma(xchan, xr, seg);
  551         else
  552                 nsegs = _xdma_load_data(xchan, xr, seg);
  553         if (nsegs == 0)
  554                 return (0); /* Try again later. */
  555 
  556         xr->buf.nsegs = nsegs;
  557         xr->buf.nsegs_left = nsegs;
  558 
  559         return (nsegs);
  560 }
  561 
  562 static int
  563 xdma_process(xdma_channel_t *xchan,
  564     struct xdma_sglist *sg)
  565 {
  566         struct bus_dma_segment seg[XDMA_MAX_SEG];
  567         struct xdma_request *xr;
  568         struct xdma_request *xr_tmp;
  569         xdma_controller_t *xdma;
  570         uint32_t capacity;
  571         uint32_t n;
  572         uint32_t c;
  573         int nsegs;
  574         int ret;
  575 
  576         XCHAN_ASSERT_LOCKED(xchan);
  577 
  578         xdma = xchan->xdma;
  579 
  580         n = 0;
  581         c = 0;
  582 
  583         ret = XDMA_CHANNEL_CAPACITY(xdma->dma_dev, xchan, &capacity);
  584         if (ret != 0) {
  585                 device_printf(xdma->dev,
  586                     "%s: Can't get DMA controller capacity.\n", __func__);
  587                 return (-1);
  588         }
  589 
  590         TAILQ_FOREACH_SAFE(xr, &xchan->queue_in, xr_next, xr_tmp) {
  591                 switch (xr->req_type) {
  592                 case XR_TYPE_MBUF:
  593                         if ((xchan->caps & XCHAN_CAP_NOSEG) ||
  594                             (c > xchan->maxnsegs))
  595                                 c = xdma_mbuf_defrag(xchan, xr);
  596                         break;
  597                 case XR_TYPE_BIO:
  598                 case XR_TYPE_VIRT:
  599                 default:
  600                         c = 1;
  601                 }
  602 
  603                 if (capacity <= (c + n)) {
  604                         /*
  605                          * No space yet available for the entire
  606                          * request in the DMA engine.
  607                          */
  608                         break;
  609                 }
  610 
  611                 if ((c + n + xchan->maxnsegs) >= XDMA_SGLIST_MAXLEN) {
  612                         /* Sglist is full. */
  613                         break;
  614                 }
  615 
  616                 nsegs = xdma_load_data(xchan, xr, seg);
  617                 if (nsegs == 0)
  618                         break;
  619 
  620                 xdma_sglist_add(&sg[n], seg, nsegs, xr);
  621                 n += nsegs;
  622 
  623                 QUEUE_IN_LOCK(xchan);
  624                 TAILQ_REMOVE(&xchan->queue_in, xr, xr_next);
  625                 QUEUE_IN_UNLOCK(xchan);
  626 
  627                 QUEUE_PROC_LOCK(xchan);
  628                 TAILQ_INSERT_TAIL(&xchan->processing, xr, xr_next);
  629                 QUEUE_PROC_UNLOCK(xchan);
  630         }
  631 
  632         return (n);
  633 }
  634 
  635 int
  636 xdma_queue_submit_sg(xdma_channel_t *xchan)
  637 {
  638         struct xdma_sglist *sg;
  639         xdma_controller_t *xdma;
  640         uint32_t sg_n;
  641         int ret;
  642 
  643         xdma = xchan->xdma;
  644         KASSERT(xdma != NULL, ("xdma is NULL"));
  645 
  646         XCHAN_ASSERT_LOCKED(xchan);
  647 
  648         sg = xchan->sg;
  649 
  650         if ((xchan->caps & (XCHAN_CAP_BOUNCE | XCHAN_CAP_BUSDMA)) &&
  651            (xchan->flags & XCHAN_BUFS_ALLOCATED) == 0) {
  652                 device_printf(xdma->dev,
  653                     "%s: Can't submit a transfer: no bufs\n",
  654                     __func__);
  655                 return (-1);
  656         }
  657 
  658         sg_n = xdma_process(xchan, sg);
  659         if (sg_n == 0)
  660                 return (0); /* Nothing to submit */
  661 
  662         /* Now submit sglist to DMA engine driver. */
  663         ret = XDMA_CHANNEL_SUBMIT_SG(xdma->dma_dev, xchan, sg, sg_n);
  664         if (ret != 0) {
  665                 device_printf(xdma->dev,
  666                     "%s: Can't submit an sglist.\n", __func__);
  667                 return (-1);
  668         }
  669 
  670         return (0);
  671 }

Cache object: 241395b6823d8c5419119d4479dcf489


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.