The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/contrib/alpine-hal/al_hal_udma.h

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2 *******************************************************************************
    3 Copyright (C) 2015 Annapurna Labs Ltd.
    4 
    5 This file may be licensed under the terms of the Annapurna Labs Commercial
    6 License Agreement.
    7 
    8 Alternatively, this file can be distributed under the terms of the GNU General
    9 Public License V2 as published by the Free Software Foundation and can be
   10 found at http://www.gnu.org/licenses/gpl-2.0.html
   11 
   12 Alternatively, redistribution and use in source and binary forms, with or
   13 without modification, are permitted provided that the following conditions are
   14 met:
   15 
   16     *     Redistributions of source code must retain the above copyright notice,
   17 this list of conditions and the following disclaimer.
   18 
   19     *     Redistributions in binary form must reproduce the above copyright
   20 notice, this list of conditions and the following disclaimer in
   21 the documentation and/or other materials provided with the
   22 distribution.
   23 
   24 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
   25 ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
   26 WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
   27 DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
   28 ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
   29 (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
   30 LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
   31 ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
   32 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
   33 SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
   34 
   35 *******************************************************************************/
   36 
   37 /**
   38  * @defgroup group_udma_api API
   39  * @ingroup group_udma
   40  * UDMA API
   41  * @{
   42  * @}
   43  *
   44  * @defgroup group_udma_main UDMA Main
   45  * @ingroup group_udma_api
   46  * UDMA main API
   47  * @{
   48  * @file   al_hal_udma.h
   49  *
   50  * @brief C Header file for the Universal DMA HAL driver
   51  *
   52  */
   53 
   54 #ifndef __AL_HAL_UDMA_H__
   55 #define __AL_HAL_UDMA_H__
   56 
   57 #include "al_hal_common.h"
   58 #include "al_hal_udma_regs.h"
   59 
   60 /* *INDENT-OFF* */
   61 #ifdef __cplusplus
   62 extern "C" {
   63 #endif
   64 /* *INDENT-ON* */
   65 
   66 #define DMA_MAX_Q       4
   67 #define AL_UDMA_MIN_Q_SIZE      4
   68 #define AL_UDMA_MAX_Q_SIZE      (1 << 16) /* hw can do more, but we limit it */
   69 
   70 /* Default Max number of descriptors supported per action */
   71 #define AL_UDMA_DEFAULT_MAX_ACTN_DESCS  16
   72 
   73 #define AL_UDMA_REV_ID_1        1
   74 #define AL_UDMA_REV_ID_2        2
   75 
   76 #define DMA_RING_ID_MASK        0x3
   77 /* New registers ?? */
   78 /* Statistics - TBD */
   79 
   80 /** UDMA submission descriptor */
   81 union al_udma_desc {
   82         /* TX */
   83         struct {
   84                 uint32_t len_ctrl;
   85                 uint32_t meta_ctrl;
   86                 uint64_t buf_ptr;
   87         } tx;
   88         /* TX Meta, used by upper layer */
   89         struct {
   90                 uint32_t len_ctrl;
   91                 uint32_t meta_ctrl;
   92                 uint32_t meta1;
   93                 uint32_t meta2;
   94         } tx_meta;
   95         /* RX */
   96         struct {
   97                 uint32_t len_ctrl;
   98                 uint32_t buf2_ptr_lo;
   99                 uint64_t buf1_ptr;
  100         } rx;
  101 } __packed_a16;
  102 
  103 /* TX desc length and control fields */
  104 
  105 #define AL_M2S_DESC_CONCAT                      AL_BIT(31)      /* concatenate */
  106 #define AL_M2S_DESC_DMB                         AL_BIT(30)
  107                                                 /** Data Memory Barrier */
  108 #define AL_M2S_DESC_NO_SNOOP_H                  AL_BIT(29)
  109 #define AL_M2S_DESC_INT_EN                      AL_BIT(28)      /** enable interrupt */
  110 #define AL_M2S_DESC_LAST                        AL_BIT(27)
  111 #define AL_M2S_DESC_FIRST                       AL_BIT(26)
  112 #define AL_M2S_DESC_RING_ID_SHIFT               24
  113 #define AL_M2S_DESC_RING_ID_MASK                (0x3 << AL_M2S_DESC_RING_ID_SHIFT)
  114 #define AL_M2S_DESC_META_DATA                   AL_BIT(23)
  115 #define AL_M2S_DESC_DUMMY                       AL_BIT(22) /* for Metdata only */
  116 #define AL_M2S_DESC_LEN_ADJ_SHIFT               20
  117 #define AL_M2S_DESC_LEN_ADJ_MASK                (0x7 << AL_M2S_DESC_LEN_ADJ_SHIFT)
  118 #define AL_M2S_DESC_LEN_SHIFT                   0
  119 #define AL_M2S_DESC_LEN_MASK                    (0xfffff << AL_M2S_DESC_LEN_SHIFT)
  120 
  121 #define AL_S2M_DESC_DUAL_BUF                    AL_BIT(31)
  122 #define AL_S2M_DESC_NO_SNOOP_H                  AL_BIT(29)
  123 #define AL_S2M_DESC_INT_EN                      AL_BIT(28)      /** enable interrupt */
  124 #define AL_S2M_DESC_RING_ID_SHIFT               24
  125 #define AL_S2M_DESC_RING_ID_MASK                (0x3 << AL_S2M_DESC_RING_ID_SHIFT)
  126 #define AL_S2M_DESC_LEN_SHIFT                   0
  127 #define AL_S2M_DESC_LEN_MASK                    (0xffff << AL_S2M_DESC_LEN_SHIFT)
  128 #define AL_S2M_DESC_LEN2_SHIFT                  16
  129 #define AL_S2M_DESC_LEN2_MASK                   (0x3fff << AL_S2M_DESC_LEN2_SHIFT)
  130 #define AL_S2M_DESC_LEN2_GRANULARITY_SHIFT      6
  131 
  132 /* TX/RX descriptor Target-ID field (in the buffer address 64 bit field) */
  133 #define AL_UDMA_DESC_TGTID_SHIFT                48
  134 
  135 /** UDMA completion descriptor */
  136 union al_udma_cdesc {
  137         /* TX completion */
  138         struct {
  139                 uint32_t ctrl_meta;
  140         } al_desc_comp_tx;
  141         /* RX completion */
  142         struct {
  143                 /* TBD */
  144                 uint32_t ctrl_meta;
  145         } al_desc_comp_rx;
  146 } __packed_a4;
  147 
  148 /* TX/RX common completion desc ctrl_meta feilds */
  149 #define AL_UDMA_CDESC_ERROR             AL_BIT(31)
  150 #define AL_UDMA_CDESC_BUF1_USED         AL_BIT(30)
  151 #define AL_UDMA_CDESC_DDP               AL_BIT(29)
  152 #define AL_UDMA_CDESC_LAST              AL_BIT(27)
  153 #define AL_UDMA_CDESC_FIRST             AL_BIT(26)
  154 /* word 2 */
  155 #define AL_UDMA_CDESC_BUF2_USED                 AL_BIT(31)
  156 #define AL_UDMA_CDESC_BUF2_LEN_SHIFT    16
  157 #define AL_UDMA_CDESC_BUF2_LEN_MASK             AL_FIELD_MASK(29, 16)
  158 /** Basic Buffer structure */
  159 struct al_buf {
  160         al_phys_addr_t addr; /**< Buffer physical address */
  161         uint32_t len; /**< Buffer lenght in bytes */
  162 };
  163 
  164 /** Block is a set of buffers that belong to same source or destination */
  165 struct al_block {
  166         struct al_buf *bufs; /**< The buffers of the block */
  167         uint32_t num; /**< Number of buffers of the block */
  168 
  169         /**<
  170          * Target-ID to be assigned to the block descriptors
  171          * Requires Target-ID in descriptor to be enabled for the specific UDMA
  172          * queue.
  173          */
  174         uint16_t tgtid;
  175 };
  176 
  177 /** UDMA type */
  178 enum al_udma_type {
  179         UDMA_TX,
  180         UDMA_RX
  181 };
  182 
  183 /** UDMA state */
  184 enum al_udma_state {
  185         UDMA_DISABLE = 0,
  186         UDMA_IDLE,
  187         UDMA_NORMAL,
  188         UDMA_ABORT,
  189         UDMA_RESET
  190 };
  191 
  192 extern const char *const al_udma_states_name[];
  193 
  194 /** UDMA Q specific parameters from upper layer */
  195 struct al_udma_q_params {
  196         uint32_t size;          /**< ring size (in descriptors), submission and
  197                                  * completion rings must have same size
  198                                  */
  199         union al_udma_desc *desc_base; /**< cpu address for submission ring
  200                                          * descriptors
  201                                          */
  202         al_phys_addr_t desc_phy_base;   /**< submission ring descriptors
  203                                          * physical base address
  204                                          */
  205 #ifdef __FreeBSD__
  206         bus_dma_tag_t desc_phy_base_tag;
  207         bus_dmamap_t desc_phy_base_map;
  208 #endif
  209         uint8_t *cdesc_base;    /**< completion descriptors pointer, NULL */
  210                                 /* means no completion update */
  211         al_phys_addr_t cdesc_phy_base;  /**< completion descriptors ring
  212                                          * physical base address
  213                                          */
  214 #ifdef __FreeBSD__
  215         bus_dma_tag_t cdesc_phy_base_tag;
  216         bus_dmamap_t cdesc_phy_base_map;
  217 #endif
  218         uint32_t cdesc_size;    /**< size (in bytes) of a single dma completion
  219                                         * descriptor
  220                                         */
  221 
  222         uint8_t adapter_rev_id; /**<PCI adapter revision ID */
  223 };
  224 
  225 /** UDMA parameters from upper layer */
  226 struct al_udma_params {
  227         struct unit_regs __iomem *udma_regs_base;
  228         enum al_udma_type type; /**< Tx or Rx */
  229         uint8_t num_of_queues; /**< number of queues supported by the UDMA */
  230         const char *name; /**< the upper layer must keep the string area */
  231 };
  232 
  233 /* Fordward decleration */
  234 struct al_udma;
  235 
  236 /** SW status of a queue */
  237 enum al_udma_queue_status {
  238         AL_QUEUE_NOT_INITIALIZED = 0,
  239         AL_QUEUE_DISABLED,
  240         AL_QUEUE_ENABLED,
  241         AL_QUEUE_ABORTED
  242 };
  243 
  244 /** UDMA Queue private data structure */
  245 struct __cache_aligned al_udma_q {
  246         uint16_t size_mask;             /**< mask used for pointers wrap around
  247                                          * equals to size - 1
  248                                          */
  249         union udma_q_regs __iomem *q_regs; /**< pointer to the per queue UDMA
  250                                            * registers
  251                                            */
  252         union al_udma_desc *desc_base_ptr; /**< base address submission ring
  253                                                 * descriptors
  254                                                 */
  255         uint16_t next_desc_idx; /**< index to the next available submission
  256                                       * descriptor
  257                                       */
  258 
  259         uint32_t desc_ring_id;  /**< current submission ring id */
  260 
  261         uint8_t *cdesc_base_ptr;/**< completion descriptors pointer, NULL */
  262                                 /* means no completion */
  263         uint32_t cdesc_size;    /**< size (in bytes) of the udma completion ring
  264                                  * descriptor
  265                                  */
  266         uint16_t next_cdesc_idx; /**< index in descriptors for next completing
  267                               * ring descriptor
  268                               */
  269         uint8_t *end_cdesc_ptr; /**< used for wrap around detection */
  270         uint16_t comp_head_idx; /**< completion ring head pointer register
  271                                  *shadow
  272                                  */
  273         volatile union al_udma_cdesc *comp_head_ptr; /**< when working in get_packet mode
  274                                        * we maintain pointer instead of the
  275                                        * above idx
  276                                        */
  277 
  278         uint32_t pkt_crnt_descs; /**< holds the number of processed descriptors
  279                                   * of the current packet
  280                                   */
  281         uint32_t comp_ring_id;  /**< current completion Ring Id */
  282 
  283 
  284         al_phys_addr_t desc_phy_base; /**< submission desc. physical base */
  285         al_phys_addr_t cdesc_phy_base; /**< completion desc. physical base */
  286 
  287         uint32_t flags; /**< flags used for completion modes */
  288         uint32_t size;          /**< ring size in descriptors  */
  289         enum al_udma_queue_status status;
  290         struct al_udma *udma;   /**< pointer to parent UDMA */
  291         uint32_t qid;           /**< the index number of the queue */
  292 
  293         /*
  294          * The following fields are duplicated from the UDMA parent adapter
  295          * due to performance considerations.
  296          */
  297         uint8_t adapter_rev_id; /**<PCI adapter revision ID */
  298 };
  299 
  300 /* UDMA */
  301 struct al_udma {
  302         const char *name;
  303         enum al_udma_type type; /* Tx or Rx */
  304         enum al_udma_state state;
  305         uint8_t num_of_queues; /* number of queues supported by the UDMA */
  306         union udma_regs __iomem *udma_regs; /* pointer to the UDMA registers */
  307         struct udma_gen_regs *gen_regs;         /* pointer to the Gen registers*/
  308         struct al_udma_q udma_q[DMA_MAX_Q];     /* Array of UDMA Qs pointers */
  309         unsigned int rev_id; /* UDMA revision ID */
  310 };
  311 
  312 
  313 /*
  314  * Configurations
  315  */
  316 
  317 /* Initializations functions */
  318 /**
  319  * Initialize the udma engine
  320  *
  321  * @param udma udma data structure
  322  * @param udma_params udma parameters from upper layer
  323  *
  324  * @return 0 on success. -EINVAL otherwise.
  325  */
  326 int al_udma_init(struct al_udma *udma, struct al_udma_params *udma_params);
  327 
  328 /**
  329  * Initialize the udma queue data structure
  330  *
  331  * @param udma
  332  * @param qid
  333  * @param q_params
  334  *
  335  * @return 0 if no error found.
  336  *         -EINVAL if the qid is out of range
  337  *         -EIO if queue was already initialized
  338  */
  339 
  340 int al_udma_q_init(struct al_udma *udma, uint32_t qid,
  341                    struct al_udma_q_params *q_params);
  342 
  343 /**
  344  * Reset a udma queue
  345  *
  346  * Prior to calling this function make sure:
  347  * 1. Queue interrupts are masked
  348  * 2. No additional descriptors are written to the descriptor ring of the queue
  349  * 3. No completed descriptors are being fetched
  350  *
  351  * The queue can be initialized again using 'al_udma_q_init'
  352  *
  353  * @param udma_q
  354  *
  355  * @return 0 if no error found.
  356  */
  357 
  358 int al_udma_q_reset(struct al_udma_q *udma_q);
  359 
  360 /**
  361  * return (by reference) a pointer to a specific queue date structure.
  362  * this pointer needed for calling functions (i.e. al_udma_desc_action_add) that
  363  * require this pointer as input argument.
  364  *
  365  * @param udma udma data structure
  366  * @param qid queue index
  367  * @param q_handle pointer to the location where the queue structure pointer
  368  * written to.
  369  *
  370  * @return  0 on success. -EINVAL otherwise.
  371  */
  372 int al_udma_q_handle_get(struct al_udma *udma, uint32_t qid,
  373                       struct al_udma_q **q_handle);
  374 
  375 /**
  376  * Change the UDMA's state
  377  *
  378  * @param udma udma data structure
  379  * @param state the target state
  380  *
  381  * @return 0
  382  */
  383 int al_udma_state_set(struct al_udma *udma, enum al_udma_state state);
  384 
  385 /**
  386  * return the current UDMA hardware state
  387  *
  388  * @param udma udma handle
  389  *
  390  * @return the UDMA state as reported by the hardware.
  391  */
  392 enum al_udma_state al_udma_state_get(struct al_udma *udma);
  393 
  394 /*
  395  * Action handling
  396  */
  397 
  398 /**
  399  * get number of descriptors that can be submitted to the udma.
  400  * keep one free descriptor to simplify full/empty management
  401  * @param udma_q queue handle
  402  *
  403  * @return num of free descriptors.
  404  */
  405 static INLINE uint32_t al_udma_available_get(struct al_udma_q *udma_q)
  406 {
  407         uint16_t tmp = udma_q->next_cdesc_idx - (udma_q->next_desc_idx + 1);
  408         tmp &= udma_q->size_mask;
  409 
  410         return (uint32_t) tmp;
  411 }
  412 
  413 /**
  414  * check if queue has pending descriptors
  415  *
  416  * @param udma_q queue handle
  417  *
  418  * @return AL_TRUE if descriptors are submitted to completion ring and still
  419  * not completed (with ack). AL_FALSE otherwise.
  420  */
  421 static INLINE al_bool al_udma_is_empty(struct al_udma_q *udma_q)
  422 {
  423         if (((udma_q->next_cdesc_idx - udma_q->next_desc_idx) &
  424              udma_q->size_mask) == 0)
  425                 return AL_TRUE;
  426 
  427         return AL_FALSE;
  428 }
  429 
  430 /**
  431  * get next available descriptor
  432  * @param udma_q queue handle
  433  *
  434  * @return pointer to the next available descriptor
  435  */
  436 static INLINE union al_udma_desc *al_udma_desc_get(struct al_udma_q *udma_q)
  437 {
  438         union al_udma_desc *desc;
  439         uint16_t next_desc_idx;
  440 
  441         al_assert(udma_q);
  442 
  443         next_desc_idx = udma_q->next_desc_idx;
  444         desc = udma_q->desc_base_ptr + next_desc_idx;
  445 
  446         next_desc_idx++;
  447 
  448         /* if reached end of queue, wrap around */
  449         udma_q->next_desc_idx = next_desc_idx & udma_q->size_mask;
  450 
  451         return desc;
  452 }
  453 
  454 /**
  455  * get ring id for the last allocated descriptor
  456  * @param udma_q
  457  *
  458  * @return ring id for the last allocated descriptor
  459  * this function must be called each time a new descriptor is allocated
  460  * by the al_udma_desc_get(), unless ring id is ignored.
  461  */
  462 static INLINE uint32_t al_udma_ring_id_get(struct al_udma_q *udma_q)
  463 {
  464         uint32_t ring_id;
  465 
  466         al_assert(udma_q);
  467 
  468         ring_id = udma_q->desc_ring_id;
  469 
  470         /* calculate the ring id of the next desc */
  471         /* if next_desc points to first desc, then queue wrapped around */
  472         if (unlikely(udma_q->next_desc_idx) == 0)
  473                 udma_q->desc_ring_id = (udma_q->desc_ring_id + 1) &
  474                         DMA_RING_ID_MASK;
  475         return ring_id;
  476 }
  477 
  478 /* add DMA action - trigger the engine */
  479 /**
  480  * add num descriptors to the submission queue.
  481  *
  482  * @param udma_q queue handle
  483  * @param num number of descriptors to add to the queues ring.
  484  *
  485  * @return 0;
  486  */
  487 static INLINE int al_udma_desc_action_add(struct al_udma_q *udma_q,
  488                                           uint32_t num)
  489 {
  490         uint32_t *addr;
  491 
  492         al_assert(udma_q);
  493         al_assert((num > 0) && (num <= udma_q->size));
  494 
  495         addr = &udma_q->q_regs->rings.drtp_inc;
  496         /* make sure data written to the descriptors will be visible by the */
  497         /* DMA */
  498         al_local_data_memory_barrier();
  499 
  500         /*
  501          * As we explicitly invoke the synchronization function
  502          * (al_data_memory_barrier()), then we can use the relaxed version.
  503          */
  504         al_reg_write32_relaxed(addr, num);
  505 
  506         return 0;
  507 }
  508 
  509 #define cdesc_is_first(flags) ((flags) & AL_UDMA_CDESC_FIRST)
  510 #define cdesc_is_last(flags) ((flags) & AL_UDMA_CDESC_LAST)
  511 
  512 /**
  513  * return pointer to the cdesc + offset desciptors. wrap around when needed.
  514  *
  515  * @param udma_q queue handle
  516  * @param cdesc pointer that set by this function
  517  * @param offset offset desciptors
  518  *
  519  */
  520 static INLINE volatile union al_udma_cdesc *al_cdesc_next(
  521         struct al_udma_q                *udma_q,
  522         volatile union al_udma_cdesc    *cdesc,
  523         uint32_t                        offset)
  524 {
  525         volatile uint8_t *tmp = (volatile uint8_t *) cdesc + offset * udma_q->cdesc_size;
  526         al_assert(udma_q);
  527         al_assert(cdesc);
  528 
  529         /* if wrap around */
  530         if (unlikely((tmp > udma_q->end_cdesc_ptr)))
  531                 return (union al_udma_cdesc *)
  532                         (udma_q->cdesc_base_ptr +
  533                         (tmp - udma_q->end_cdesc_ptr - udma_q->cdesc_size));
  534 
  535         return (volatile union al_udma_cdesc *) tmp;
  536 }
  537 
  538 /**
  539  * check if the flags of the descriptor indicates that is new one
  540  * the function uses the ring id from the descriptor flags to know whether it
  541  * new one by comparing it with the curring ring id of the queue
  542  *
  543  * @param udma_q queue handle
  544  * @param flags the flags of the completion descriptor
  545  *
  546  * @return AL_TRUE if the completion descriptor is new one.
  547  *      AL_FALSE if it old one.
  548  */
  549 static INLINE al_bool al_udma_new_cdesc(struct al_udma_q *udma_q,
  550                                                                 uint32_t flags)
  551 {
  552         if (((flags & AL_M2S_DESC_RING_ID_MASK) >> AL_M2S_DESC_RING_ID_SHIFT)
  553             == udma_q->comp_ring_id)
  554                 return AL_TRUE;
  555         return AL_FALSE;
  556 }
  557 
  558 /**
  559  * get next completion descriptor
  560  * this function will also increment the completion ring id when the ring wraps
  561  * around
  562  *
  563  * @param udma_q queue handle
  564  * @param cdesc current completion descriptor
  565  *
  566  * @return pointer to the completion descriptor that follows the one pointed by
  567  * cdesc
  568  */
  569 static INLINE volatile union al_udma_cdesc *al_cdesc_next_update(
  570         struct al_udma_q                *udma_q,
  571         volatile union al_udma_cdesc    *cdesc)
  572 {
  573         /* if last desc, wrap around */
  574         if (unlikely(((volatile uint8_t *) cdesc == udma_q->end_cdesc_ptr))) {
  575                 udma_q->comp_ring_id =
  576                     (udma_q->comp_ring_id + 1) & DMA_RING_ID_MASK;
  577                 return (union al_udma_cdesc *) udma_q->cdesc_base_ptr;
  578         }
  579         return (volatile union al_udma_cdesc *) ((volatile uint8_t *) cdesc + udma_q->cdesc_size);
  580 }
  581 
  582 /**
  583  * get next completed packet from completion ring of the queue
  584  *
  585  * @param udma_q udma queue handle
  586  * @param desc pointer that set by this function to the first descriptor
  587  * note: desc is valid only when return value is not zero
  588  * @return number of descriptors that belong to the packet. 0 means no completed
  589  * full packet was found.
  590  * If the descriptors found in the completion queue don't form full packet (no
  591  * desc with LAST flag), then this function will do the following:
  592  * (1) save the number of processed descriptors.
  593  * (2) save last processed descriptor, so next time it called, it will resume
  594  *     from there.
  595  * (3) return 0.
  596  * note: the descriptors that belong to the completed packet will still be
  597  * considered as used, that means the upper layer is safe to access those
  598  * descriptors when this function returns. the al_udma_cdesc_ack() should be
  599  * called to inform the udma driver that those descriptors are freed.
  600  */
  601 uint32_t al_udma_cdesc_packet_get(
  602         struct al_udma_q                *udma_q,
  603         volatile union al_udma_cdesc    **desc);
  604 
  605 /** get completion descriptor pointer from its index */
  606 #define al_udma_cdesc_idx_to_ptr(udma_q, idx)                           \
  607         ((volatile union al_udma_cdesc *) ((udma_q)->cdesc_base_ptr +   \
  608                                 (idx) * (udma_q)->cdesc_size))
  609 
  610 
  611 /**
  612  * return number of all completed descriptors in the completion ring
  613  *
  614  * @param udma_q udma queue handle
  615  * @param cdesc pointer that set by this function to the first descriptor
  616  * note: desc is valid only when return value is not zero
  617  * note: pass NULL if not interested
  618  * @return number of descriptors. 0 means no completed descriptors were found.
  619  * note: the descriptors that belong to the completed packet will still be
  620  * considered as used, that means the upper layer is safe to access those
  621  * descriptors when this function returns. the al_udma_cdesc_ack() should be
  622  * called to inform the udma driver that those descriptors are freed.
  623  */
  624 static INLINE uint32_t al_udma_cdesc_get_all(
  625         struct al_udma_q                *udma_q,
  626         volatile union al_udma_cdesc    **cdesc)
  627 {
  628         uint16_t count = 0;
  629 
  630         al_assert(udma_q);
  631 
  632         udma_q->comp_head_idx = (uint16_t)
  633                                 (al_reg_read32(&udma_q->q_regs->rings.crhp) &
  634                                                 0xFFFF);
  635 
  636         count = (udma_q->comp_head_idx - udma_q->next_cdesc_idx) &
  637                 udma_q->size_mask;
  638 
  639         if (cdesc)
  640                 *cdesc = al_udma_cdesc_idx_to_ptr(udma_q, udma_q->next_cdesc_idx);
  641 
  642         return (uint32_t)count;
  643 }
  644 
  645 /**
  646  * acknowledge the driver that the upper layer completed processing completion
  647  * descriptors
  648  *
  649  * @param udma_q udma queue handle
  650  * @param num number of descriptors to acknowledge
  651  *
  652  * @return 0
  653  */
  654 static INLINE int al_udma_cdesc_ack(struct al_udma_q *udma_q, uint32_t num)
  655 {
  656         al_assert(udma_q);
  657 
  658         udma_q->next_cdesc_idx += num;
  659         udma_q->next_cdesc_idx &= udma_q->size_mask;
  660 
  661         return 0;
  662 }
  663 
  664 /* *INDENT-OFF* */
  665 #ifdef __cplusplus
  666 }
  667 #endif
  668 /* *INDENT-ON* */
  669 
  670 #endif /* __AL_HAL_UDMA_H__ */
  671 /** @} end of UDMA group */

Cache object: 8175bd758296ddbfd1e3c4b0e2fcafb1


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.