The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/contrib/dev/ath/ath_hal/ar9300/ar9300_xmit.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*
    2  * Copyright (c) 2013 Qualcomm Atheros, Inc.
    3  *
    4  * Permission to use, copy, modify, and/or distribute this software for any
    5  * purpose with or without fee is hereby granted, provided that the above
    6  * copyright notice and this permission notice appear in all copies.
    7  *
    8  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
    9  * REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
   10  * AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
   11  * INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
   12  * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
   13  * OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
   14  * PERFORMANCE OF THIS SOFTWARE.
   15  */
   16 
   17 #include "opt_ah.h"
   18 
   19 #include "ah.h"
   20 #include "ah_desc.h"
   21 #include "ah_internal.h"
   22 
   23 #include "ar9300/ar9300.h"
   24 #include "ar9300/ar9300reg.h"
   25 #include "ar9300/ar9300phy.h"
   26 #include "ar9300/ar9300desc.h"
   27 
   28 #define TU_TO_USEC(_tu)         ((_tu) << 10)
   29 #define ONE_EIGHTH_TU_TO_USEC(_tu8)     ((_tu8) << 7)
   30 
   31 /*
   32  * Update Tx FIFO trigger level.
   33  *
   34  * Set b_inc_trig_level to TRUE to increase the trigger level.
   35  * Set b_inc_trig_level to FALSE to decrease the trigger level.
   36  *
   37  * Returns TRUE if the trigger level was updated
   38  */
   39 HAL_BOOL
   40 ar9300_update_tx_trig_level(struct ath_hal *ah, HAL_BOOL b_inc_trig_level)
   41 {
   42     struct ath_hal_9300 *ahp = AH9300(ah);
   43     u_int32_t txcfg, cur_level, new_level;
   44     HAL_INT omask;
   45 
   46     if (AH9300(ah)->ah_tx_trig_level >= MAX_TX_FIFO_THRESHOLD &&
   47         b_inc_trig_level)
   48     {
   49         return AH_FALSE;
   50     }
   51 
   52     /*
   53      * Disable interrupts while futzing with the fifo level.
   54      */
   55     omask = ar9300_set_interrupts(ah, ahp->ah_mask_reg &~ HAL_INT_GLOBAL, 0);
   56 
   57     txcfg = OS_REG_READ(ah, AR_TXCFG);
   58     cur_level = MS(txcfg, AR_FTRIG);
   59     new_level = cur_level;
   60 
   61     if (b_inc_trig_level)  {   /* increase the trigger level */
   62         if (cur_level < MAX_TX_FIFO_THRESHOLD) {
   63             new_level++;
   64         }
   65     } else if (cur_level > MIN_TX_FIFO_THRESHOLD) {
   66         new_level--;
   67     }
   68 
   69     if (new_level != cur_level) {
   70         /* Update the trigger level */
   71         OS_REG_WRITE(ah,
   72             AR_TXCFG, (txcfg &~ AR_FTRIG) | SM(new_level, AR_FTRIG));
   73     }
   74 
   75     /* re-enable chip interrupts */
   76     ar9300_set_interrupts(ah, omask, 0);
   77 
   78     AH9300(ah)->ah_tx_trig_level = new_level;
   79 
   80     return (new_level != cur_level);
   81 }
   82 
   83 /*
   84  * Returns the value of Tx Trigger Level
   85  */
   86 u_int16_t
   87 ar9300_get_tx_trig_level(struct ath_hal *ah)
   88 {
   89     return (AH9300(ah)->ah_tx_trig_level);
   90 }
   91 
   92 /*
   93  * Set the properties of the tx queue with the parameters
   94  * from q_info.
   95  */
   96 HAL_BOOL
   97 ar9300_set_tx_queue_props(struct ath_hal *ah, int q, const HAL_TXQ_INFO *q_info)
   98 {
   99     struct ath_hal_9300 *ahp = AH9300(ah);
  100     HAL_CAPABILITIES *p_cap = &AH_PRIVATE(ah)->ah_caps;
  101 
  102     if (q >= p_cap->halTotalQueues) {
  103         HALDEBUG(ah, HAL_DEBUG_QUEUE, "%s: invalid queue num %u\n", __func__, q);
  104         return AH_FALSE;
  105     }
  106     return ath_hal_setTxQProps(ah, &ahp->ah_txq[q], q_info);
  107 }
  108 
  109 /*
  110  * Return the properties for the specified tx queue.
  111  */
  112 HAL_BOOL
  113 ar9300_get_tx_queue_props(struct ath_hal *ah, int q, HAL_TXQ_INFO *q_info)
  114 {
  115     struct ath_hal_9300 *ahp = AH9300(ah);
  116     HAL_CAPABILITIES *p_cap = &AH_PRIVATE(ah)->ah_caps;
  117 
  118 
  119     if (q >= p_cap->halTotalQueues) {
  120         HALDEBUG(ah, HAL_DEBUG_QUEUE, "%s: invalid queue num %u\n", __func__, q);
  121         return AH_FALSE;
  122     }
  123     return ath_hal_getTxQProps(ah, q_info, &ahp->ah_txq[q]);
  124 }
  125 
  126 enum {
  127     AH_TX_QUEUE_MINUS_OFFSET_BEACON = 1,
  128     AH_TX_QUEUE_MINUS_OFFSET_CAB    = 2,
  129     AH_TX_QUEUE_MINUS_OFFSET_UAPSD  = 3,
  130     AH_TX_QUEUE_MINUS_OFFSET_PAPRD  = 4,
  131 };
  132 
  133 /*
  134  * Allocate and initialize a tx DCU/QCU combination.
  135  */
  136 int
  137 ar9300_setup_tx_queue(struct ath_hal *ah, HAL_TX_QUEUE type,
  138         const HAL_TXQ_INFO *q_info)
  139 {
  140     struct ath_hal_9300 *ahp = AH9300(ah);
  141     HAL_TX_QUEUE_INFO *qi;
  142     HAL_CAPABILITIES *p_cap = &AH_PRIVATE(ah)->ah_caps;
  143     int q;
  144 
  145     /* XXX move queue assignment to driver */
  146     switch (type) {
  147     case HAL_TX_QUEUE_BEACON:
  148         /* highest priority */
  149         q = p_cap->halTotalQueues - AH_TX_QUEUE_MINUS_OFFSET_BEACON;
  150         break;
  151     case HAL_TX_QUEUE_CAB:
  152         /* next highest priority */
  153         q = p_cap->halTotalQueues - AH_TX_QUEUE_MINUS_OFFSET_CAB;
  154         break;
  155     case HAL_TX_QUEUE_UAPSD:
  156         q = p_cap->halTotalQueues - AH_TX_QUEUE_MINUS_OFFSET_UAPSD;
  157         break;
  158     case HAL_TX_QUEUE_PAPRD:
  159         q = p_cap->halTotalQueues - AH_TX_QUEUE_MINUS_OFFSET_PAPRD;
  160         break;
  161     case HAL_TX_QUEUE_DATA:
  162         /*
  163          * don't infringe on top 4 queues, reserved for:
  164          * beacon, CAB, UAPSD, PAPRD
  165          */
  166         for (q = 0;
  167              q < p_cap->halTotalQueues - AH_TX_QUEUE_MINUS_OFFSET_PAPRD;
  168              q++)
  169         {
  170             if (ahp->ah_txq[q].tqi_type == HAL_TX_QUEUE_INACTIVE) {
  171                 break;
  172             }
  173         }
  174         if (q == p_cap->halTotalQueues - 3) {
  175             HALDEBUG(ah, HAL_DEBUG_QUEUE,
  176                 "%s: no available tx queue\n", __func__);
  177             return -1;
  178         }
  179         break;
  180     default:
  181         HALDEBUG(ah, HAL_DEBUG_QUEUE,
  182             "%s: bad tx queue type %u\n", __func__, type);
  183         return -1;
  184     }
  185 
  186     HALDEBUG(ah, HAL_DEBUG_QUEUE, "%s: queue %u\n", __func__, q);
  187 
  188     qi = &ahp->ah_txq[q];
  189     if (qi->tqi_type != HAL_TX_QUEUE_INACTIVE) {
  190         HALDEBUG(ah, HAL_DEBUG_QUEUE,
  191             "%s: tx queue %u already active\n", __func__, q);
  192         return -1;
  193     }
  194 
  195     OS_MEMZERO(qi, sizeof(HAL_TX_QUEUE_INFO));
  196     qi->tqi_type = type;
  197 
  198     if (q_info == AH_NULL) {
  199         /* by default enable OK+ERR+DESC+URN interrupts */
  200         qi->tqi_qflags = HAL_TXQ_TXOKINT_ENABLE
  201                         | HAL_TXQ_TXERRINT_ENABLE
  202                         | HAL_TXQ_TXDESCINT_ENABLE
  203                         | HAL_TXQ_TXURNINT_ENABLE;
  204         qi->tqi_aifs = INIT_AIFS;
  205         qi->tqi_cwmin = HAL_TXQ_USEDEFAULT;     /* NB: do at reset */
  206         qi->tqi_cwmax = INIT_CWMAX;
  207         qi->tqi_shretry = INIT_SH_RETRY;
  208         qi->tqi_lgretry = INIT_LG_RETRY;
  209         qi->tqi_physCompBuf = 0;
  210     } else {
  211         qi->tqi_physCompBuf = q_info->tqi_compBuf;
  212         (void) ar9300_set_tx_queue_props(ah, q, q_info);
  213     }
  214     /* NB: must be followed by ar9300_reset_tx_queue */
  215     return q;
  216 }
  217 
  218 /*
  219  * Update the h/w interrupt registers to reflect a tx q's configuration.
  220  */
  221 static void
  222 set_tx_q_interrupts(struct ath_hal *ah, HAL_TX_QUEUE_INFO *qi)
  223 {
  224     struct ath_hal_9300 *ahp = AH9300(ah);
  225 
  226     HALDEBUG(ah, HAL_DEBUG_INTERRUPT,
  227             "%s: tx ok 0x%x err 0x%x eol 0x%x urn 0x%x\n",
  228             __func__,
  229             ahp->ah_tx_ok_interrupt_mask,
  230             ahp->ah_tx_err_interrupt_mask,
  231             ahp->ah_tx_eol_interrupt_mask,
  232             ahp->ah_tx_urn_interrupt_mask);
  233 
  234     OS_REG_WRITE(ah, AR_IMR_S0,
  235               SM(ahp->ah_tx_ok_interrupt_mask, AR_IMR_S0_QCU_TXOK));
  236     OS_REG_WRITE(ah, AR_IMR_S1,
  237               SM(ahp->ah_tx_err_interrupt_mask, AR_IMR_S1_QCU_TXERR)
  238             | SM(ahp->ah_tx_eol_interrupt_mask, AR_IMR_S1_QCU_TXEOL));
  239     OS_REG_RMW_FIELD(ah,
  240         AR_IMR_S2, AR_IMR_S2_QCU_TXURN, ahp->ah_tx_urn_interrupt_mask);
  241     ahp->ah_mask2Reg = OS_REG_READ(ah, AR_IMR_S2);
  242 }
  243 
  244 /*
  245  * Free a tx DCU/QCU combination.
  246  */
  247 HAL_BOOL
  248 ar9300_release_tx_queue(struct ath_hal *ah, u_int q)
  249 {
  250     struct ath_hal_9300 *ahp = AH9300(ah);
  251     HAL_CAPABILITIES *p_cap = &AH_PRIVATE(ah)->ah_caps;
  252     HAL_TX_QUEUE_INFO *qi;
  253 
  254     if (q >= p_cap->halTotalQueues) {
  255         HALDEBUG(ah, HAL_DEBUG_QUEUE, "%s: invalid queue num %u\n", __func__, q);
  256         return AH_FALSE;
  257     }
  258 
  259     qi = &ahp->ah_txq[q];
  260     if (qi->tqi_type == HAL_TX_QUEUE_INACTIVE) {
  261         HALDEBUG(ah, HAL_DEBUG_QUEUE, "%s: inactive queue %u\n", __func__, q);
  262         return AH_FALSE;
  263     }
  264 
  265     HALDEBUG(ah, HAL_DEBUG_QUEUE, "%s: release queue %u\n", __func__, q);
  266 
  267     qi->tqi_type = HAL_TX_QUEUE_INACTIVE;
  268     ahp->ah_tx_ok_interrupt_mask &= ~(1 << q);
  269     ahp->ah_tx_err_interrupt_mask &= ~(1 << q);
  270     ahp->ah_tx_eol_interrupt_mask &= ~(1 << q);
  271     ahp->ah_tx_urn_interrupt_mask &= ~(1 << q);
  272     set_tx_q_interrupts(ah, qi);
  273 
  274     return AH_TRUE;
  275 }
  276 
  277 /*
  278  * Set the retry, aifs, cwmin/max, ready_time regs for specified queue
  279  * Assumes:
  280  *  phw_channel has been set to point to the current channel
  281  */
  282 HAL_BOOL
  283 ar9300_reset_tx_queue(struct ath_hal *ah, u_int q)
  284 {
  285     struct ath_hal_9300     *ahp  = AH9300(ah);
  286 //    struct ath_hal_private  *ap   = AH_PRIVATE(ah);
  287     HAL_CAPABILITIES        *p_cap = &AH_PRIVATE(ah)->ah_caps;
  288     const struct ieee80211_channel *chan = AH_PRIVATE(ah)->ah_curchan;
  289     HAL_TX_QUEUE_INFO       *qi;
  290     u_int32_t               cw_min, chan_cw_min, value;
  291     uint32_t                qmisc, dmisc;
  292 
  293     if (q >= p_cap->halTotalQueues) {
  294         HALDEBUG(ah, HAL_DEBUG_QUEUE, "%s: invalid queue num %u\n", __func__, q);
  295         return AH_FALSE;
  296     }
  297 
  298     qi = &ahp->ah_txq[q];
  299     if (qi->tqi_type == HAL_TX_QUEUE_INACTIVE) {
  300         HALDEBUG(ah, HAL_DEBUG_QUEUE, "%s: inactive queue %u\n", __func__, q);
  301         return AH_TRUE;         /* XXX??? */
  302     }
  303 
  304     HALDEBUG(ah, HAL_DEBUG_QUEUE, "%s: reset queue %u\n", __func__, q);
  305 
  306     if (qi->tqi_cwmin == HAL_TXQ_USEDEFAULT) {
  307         /*
  308          * Select cwmin according to channel type.
  309          * NB: chan can be NULL during attach
  310          */
  311         if (chan && IEEE80211_IS_CHAN_B(chan)) {
  312             chan_cw_min = INIT_CWMIN_11B;
  313         } else {
  314             chan_cw_min = INIT_CWMIN;
  315         }
  316         /* make sure that the CWmin is of the form (2^n - 1) */
  317         for (cw_min = 1; cw_min < chan_cw_min; cw_min = (cw_min << 1) | 1) {}
  318     } else {
  319         cw_min = qi->tqi_cwmin;
  320     }
  321 
  322     /* set cw_min/Max and AIFS values */
  323     if (q > 3 || (!AH9300(ah)->ah_fccaifs))
  324        /* values should not be overwritten if domain is FCC and manual rate 
  325          less than 24Mb is set, this check  is making sure this */
  326     {
  327         OS_REG_WRITE(ah, AR_DLCL_IFS(q), SM(cw_min, AR_D_LCL_IFS_CWMIN)
  328                 | SM(qi->tqi_cwmax, AR_D_LCL_IFS_CWMAX)
  329                 | SM(qi->tqi_aifs, AR_D_LCL_IFS_AIFS));
  330     }
  331 
  332     /* Set retry limit values */
  333     OS_REG_WRITE(ah, AR_DRETRY_LIMIT(q),
  334         SM(INIT_SSH_RETRY, AR_D_RETRY_LIMIT_STA_SH) |
  335         SM(INIT_SLG_RETRY, AR_D_RETRY_LIMIT_STA_LG) |
  336         SM(qi->tqi_shretry, AR_D_RETRY_LIMIT_FR_SH));
  337 
  338     /* enable early termination on the QCU */
  339     qmisc = AR_Q_MISC_DCU_EARLY_TERM_REQ;
  340 
  341     /* enable DCU to wait for next fragment from QCU  */
  342     if (AR_SREV_WASP(ah) && (AH_PRIVATE((ah))->ah_macRev <= AR_SREV_REVISION_WASP_12)) {
  343         /* WAR for EV#85395: Wasp Rx overrun issue - reduces Tx queue backoff 
  344          * threshold to 1 to avoid Rx overruns - Fixed in Wasp 1.3 */
  345         dmisc = AR_D_MISC_CW_BKOFF_EN | AR_D_MISC_FRAG_WAIT_EN | 0x1;
  346     } else {
  347         dmisc = AR_D_MISC_CW_BKOFF_EN | AR_D_MISC_FRAG_WAIT_EN | 0x2;
  348     }
  349 
  350     /* multiqueue support */
  351     if (qi->tqi_cbrPeriod) {
  352         OS_REG_WRITE(ah,
  353             AR_QCBRCFG(q),
  354             SM(qi->tqi_cbrPeriod, AR_Q_CBRCFG_INTERVAL) |
  355                 SM(qi->tqi_cbrOverflowLimit,
  356             AR_Q_CBRCFG_OVF_THRESH));
  357         qmisc |= AR_Q_MISC_FSP_CBR |
  358             (qi->tqi_cbrOverflowLimit ?
  359                 AR_Q_MISC_CBR_EXP_CNTR_LIMIT_EN : 0);
  360     }
  361 
  362     if (qi->tqi_readyTime && (qi->tqi_type != HAL_TX_QUEUE_CAB)) {
  363         OS_REG_WRITE(ah, AR_QRDYTIMECFG(q),
  364             SM(qi->tqi_readyTime, AR_Q_RDYTIMECFG_DURATION) |
  365             AR_Q_RDYTIMECFG_EN);
  366     }
  367 
  368     OS_REG_WRITE(ah, AR_DCHNTIME(q), SM(qi->tqi_burstTime, AR_D_CHNTIME_DUR) |
  369                 (qi->tqi_burstTime ? AR_D_CHNTIME_EN : 0));
  370 
  371     if (qi->tqi_readyTime &&
  372       (qi->tqi_qflags & HAL_TXQ_RDYTIME_EXP_POLICY_ENABLE))
  373         qmisc |= AR_Q_MISC_RDYTIME_EXP_POLICY;
  374     if (qi->tqi_qflags & HAL_TXQ_DBA_GATED)
  375         qmisc = (qmisc &~ AR_Q_MISC_FSP) | AR_Q_MISC_FSP_DBA_GATED;
  376     if (MS(qmisc, AR_Q_MISC_FSP) != AR_Q_MISC_FSP_ASAP) {
  377         /*
  378         * These are meangingful only when not scheduled asap.
  379         */
  380         if (qi->tqi_qflags & HAL_TXQ_CBR_DIS_BEMPTY)
  381             qmisc |= AR_Q_MISC_CBR_INCR_DIS0;
  382         else
  383             qmisc &= ~AR_Q_MISC_CBR_INCR_DIS0;
  384         if (qi->tqi_qflags & HAL_TXQ_CBR_DIS_QEMPTY)
  385             qmisc |= AR_Q_MISC_CBR_INCR_DIS1;
  386         else
  387             qmisc &= ~AR_Q_MISC_CBR_INCR_DIS1;
  388     }
  389 
  390     if (qi->tqi_qflags & HAL_TXQ_BACKOFF_DISABLE)
  391         dmisc |= AR_D_MISC_POST_FR_BKOFF_DIS;
  392     if (qi->tqi_qflags & HAL_TXQ_FRAG_BURST_BACKOFF_ENABLE)
  393         dmisc |= AR_D_MISC_FRAG_BKOFF_EN;
  394     if (qi->tqi_qflags & HAL_TXQ_ARB_LOCKOUT_GLOBAL)
  395         dmisc |= SM(AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL,
  396                     AR_D_MISC_ARB_LOCKOUT_CNTRL);
  397     else if (qi->tqi_qflags & HAL_TXQ_ARB_LOCKOUT_INTRA)
  398         dmisc |= SM(AR_D_MISC_ARB_LOCKOUT_CNTRL_INTRA_FR,
  399                     AR_D_MISC_ARB_LOCKOUT_CNTRL);
  400     if (qi->tqi_qflags & HAL_TXQ_IGNORE_VIRTCOL)
  401         dmisc |= SM(AR_D_MISC_VIR_COL_HANDLING_IGNORE,
  402                     AR_D_MISC_VIR_COL_HANDLING);
  403     if (qi->tqi_qflags & HAL_TXQ_SEQNUM_INC_DIS)
  404         dmisc |= AR_D_MISC_SEQ_NUM_INCR_DIS;
  405 
  406     switch (qi->tqi_type) {
  407     case HAL_TX_QUEUE_BEACON:               /* beacon frames */
  408         qmisc |= AR_Q_MISC_FSP_DBA_GATED
  409                     | AR_Q_MISC_BEACON_USE
  410                     | AR_Q_MISC_CBR_INCR_DIS1;
  411 
  412         dmisc |= (AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL <<
  413                     AR_D_MISC_ARB_LOCKOUT_CNTRL_S)
  414                     | AR_D_MISC_BEACON_USE
  415                     | AR_D_MISC_POST_FR_BKOFF_DIS;
  416         /* XXX cwmin and cwmax should be 0 for beacon queue */
  417         if (AH_PRIVATE(ah)->ah_opmode != HAL_M_IBSS) {
  418             OS_REG_WRITE(ah, AR_DLCL_IFS(q), SM(0, AR_D_LCL_IFS_CWMIN)
  419                         | SM(0, AR_D_LCL_IFS_CWMAX)
  420                         | SM(qi->tqi_aifs, AR_D_LCL_IFS_AIFS));
  421         }
  422         break;
  423     case HAL_TX_QUEUE_CAB:                  /* CAB  frames */
  424         /*
  425          * No longer Enable AR_Q_MISC_RDYTIME_EXP_POLICY,
  426          * bug #6079.  There is an issue with the CAB Queue
  427          * not properly refreshing the Tx descriptor if
  428          * the TXE clear setting is used.
  429          */
  430         qmisc |= AR_Q_MISC_FSP_DBA_GATED
  431                         | AR_Q_MISC_CBR_INCR_DIS1
  432                         | AR_Q_MISC_CBR_INCR_DIS0;
  433 
  434         if (qi->tqi_readyTime) {
  435             OS_REG_WRITE(ah, AR_QRDYTIMECFG(q),
  436               SM(qi->tqi_readyTime, AR_Q_RDYTIMECFG_DURATION) |
  437               AR_Q_RDYTIMECFG_EN);
  438         } else {
  439 
  440             value = (ahp->ah_beaconInterval * 50 / 100)
  441               - ah->ah_config.ah_additional_swba_backoff
  442               - ah->ah_config.ah_sw_beacon_response_time
  443               + ah->ah_config.ah_dma_beacon_response_time;
  444             /*
  445              * XXX Ensure it isn't too low - nothing lower
  446              * XXX than 10 TU
  447              */
  448             if (value < 10)
  449                 value = 10;
  450             if (value < 0)
  451                 value = 10;
  452             HALDEBUG(ah, HAL_DEBUG_TXQUEUE,
  453               "%s: defaulting to rdytime = %d uS\n",
  454               __func__, value);
  455             OS_REG_WRITE(ah, AR_QRDYTIMECFG(q),
  456               SM(TU_TO_USEC(value), AR_Q_RDYTIMECFG_DURATION) |
  457               AR_Q_RDYTIMECFG_EN);
  458         }
  459         dmisc |= SM(AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL,
  460                     AR_D_MISC_ARB_LOCKOUT_CNTRL);
  461         break;
  462     case HAL_TX_QUEUE_PSPOLL:
  463         /*
  464          * We may configure ps_poll QCU to be TIM-gated in the
  465          * future; TIM_GATED bit is not enabled currently because
  466          * of a hardware problem in Oahu that overshoots the TIM
  467          * bitmap in beacon and may find matching associd bit in
  468          * non-TIM elements and send PS-poll PS poll processing
  469          * will be done in software
  470          */
  471         qmisc |= AR_Q_MISC_CBR_INCR_DIS1;
  472         break;
  473     case HAL_TX_QUEUE_UAPSD:
  474         dmisc |= AR_D_MISC_POST_FR_BKOFF_DIS;
  475         break;
  476     default:                        /* NB: silence compiler */
  477         break;
  478     }
  479 
  480 #ifndef AH_DISABLE_WME
  481     /*
  482      * Yes, this is a hack and not the right way to do it, but
  483      * it does get the lockout bits and backoff set for the
  484      * high-pri WME queues for testing.  We need to either extend
  485      * the meaning of queue_info->mode, or create something like
  486      * queue_info->dcumode.
  487      */
  488     if (qi->tqi_intFlags & HAL_TXQ_USE_LOCKOUT_BKOFF_DIS) {
  489         dmisc |= SM(AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL,
  490                     AR_D_MISC_ARB_LOCKOUT_CNTRL) |
  491                 AR_D_MISC_POST_FR_BKOFF_DIS;
  492     }
  493 #endif
  494 
  495     OS_REG_WRITE(ah, AR_Q_DESC_CRCCHK, AR_Q_DESC_CRCCHK_EN);
  496     OS_REG_WRITE(ah, AR_QMISC(q), qmisc);
  497     OS_REG_WRITE(ah, AR_DMISC(q), dmisc);
  498 
  499     /*
  500      * Always update the secondary interrupt mask registers - this
  501      * could be a new queue getting enabled in a running system or
  502      * hw getting re-initialized during a reset!
  503      *
  504      * Since we don't differentiate between tx interrupts corresponding
  505      * to individual queues - secondary tx mask regs are always unmasked;
  506      * tx interrupts are enabled/disabled for all queues collectively
  507      * using the primary mask reg
  508      */
  509     if (qi->tqi_qflags & HAL_TXQ_TXOKINT_ENABLE) {
  510         ahp->ah_tx_ok_interrupt_mask |=  (1 << q);
  511     } else {
  512         ahp->ah_tx_ok_interrupt_mask &= ~(1 << q);
  513     }
  514     if (qi->tqi_qflags & HAL_TXQ_TXERRINT_ENABLE) {
  515         ahp->ah_tx_err_interrupt_mask |=  (1 << q);
  516     } else {
  517         ahp->ah_tx_err_interrupt_mask &= ~(1 << q);
  518     }
  519     if (qi->tqi_qflags & HAL_TXQ_TXEOLINT_ENABLE) {
  520         ahp->ah_tx_eol_interrupt_mask |=  (1 << q);
  521     } else {
  522         ahp->ah_tx_eol_interrupt_mask &= ~(1 << q);
  523     }
  524     if (qi->tqi_qflags & HAL_TXQ_TXURNINT_ENABLE) {
  525         ahp->ah_tx_urn_interrupt_mask |=  (1 << q);
  526     } else {
  527         ahp->ah_tx_urn_interrupt_mask &= ~(1 << q);
  528     }
  529     set_tx_q_interrupts(ah, qi);
  530 
  531     return AH_TRUE;
  532 }
  533 
  534 /*
  535  * Get the TXDP for the specified queue
  536  */
  537 u_int32_t
  538 ar9300_get_tx_dp(struct ath_hal *ah, u_int q)
  539 {
  540     HALASSERT(q < AH_PRIVATE(ah)->ah_caps.halTotalQueues);
  541     return OS_REG_READ(ah, AR_QTXDP(q));
  542 }
  543 
  544 /*
  545  * Set the tx_dp for the specified queue
  546  */
  547 HAL_BOOL
  548 ar9300_set_tx_dp(struct ath_hal *ah, u_int q, u_int32_t txdp)
  549 {
  550     HALASSERT(q < AH_PRIVATE(ah)->ah_caps.halTotalQueues);
  551     HALASSERT(AH9300(ah)->ah_txq[q].tqi_type != HAL_TX_QUEUE_INACTIVE);
  552     HALASSERT(txdp != 0);
  553 
  554     OS_REG_WRITE(ah, AR_QTXDP(q), txdp);
  555 
  556     return AH_TRUE;
  557 }
  558 
  559 /*
  560  * Transmit Enable is read-only now
  561  */
  562 HAL_BOOL
  563 ar9300_start_tx_dma(struct ath_hal *ah, u_int q)
  564 {
  565     return AH_TRUE;
  566 }
  567 
  568 /*
  569  * Return the number of pending frames or 0 if the specified
  570  * queue is stopped.
  571  */
  572 u_int32_t
  573 ar9300_num_tx_pending(struct ath_hal *ah, u_int q)
  574 {
  575     u_int32_t npend;
  576 
  577     HALASSERT(q < AH_PRIVATE(ah)->ah_caps.halTotalQueues);
  578 
  579     npend = OS_REG_READ(ah, AR_QSTS(q)) & AR_Q_STS_PEND_FR_CNT;
  580     if (npend == 0) {
  581         /*
  582          * Pending frame count (PFC) can momentarily go to zero
  583          * while TXE remains asserted.  In other words a PFC of
  584          * zero is not sufficient to say that the queue has stopped.
  585          */
  586         if (OS_REG_READ(ah, AR_Q_TXE) & (1 << q)) {
  587             npend = 1;              /* arbitrarily return 1 */
  588         }
  589     }
  590 #ifdef DEBUG
  591     if (npend && (AH9300(ah)->ah_txq[q].tqi_type == HAL_TX_QUEUE_CAB)) {
  592         if (OS_REG_READ(ah, AR_Q_RDYTIMESHDN) & (1 << q)) {
  593             HALDEBUG(ah, HAL_DEBUG_QUEUE, "RTSD on CAB queue\n");
  594             /* Clear the ready_time shutdown status bits */
  595             OS_REG_WRITE(ah, AR_Q_RDYTIMESHDN, 1 << q);
  596         }
  597     }
  598 #endif
  599     HALASSERT((npend == 0) ||
  600         (AH9300(ah)->ah_txq[q].tqi_type != HAL_TX_QUEUE_INACTIVE));
  601 
  602     return npend;
  603 }
  604 
  605 /*
  606  * Stop transmit on the specified queue
  607  */
  608 HAL_BOOL
  609 ar9300_stop_tx_dma(struct ath_hal *ah, u_int q, u_int timeout)
  610 {
  611     struct ath_hal_9300 *ahp = AH9300(ah);
  612 
  613     /*
  614      * If we call abort txdma instead, no need to stop RX.
  615      * Otherwise, the RX logic might not be restarted properly.
  616      */
  617     ahp->ah_abort_txdma_norx = AH_FALSE;
  618 
  619     /*
  620      * Directly call abort.  It is better, hardware-wise, to stop all
  621      * queues at once than individual ones.
  622      */
  623     return ar9300_abort_tx_dma(ah);
  624 
  625 #if 0
  626 #define AH_TX_STOP_DMA_TIMEOUT 4000    /* usec */
  627 #define AH_TIME_QUANTUM        100     /* usec */
  628     u_int wait;
  629 
  630     HALASSERT(q < AH_PRIVATE(ah)->ah_caps.hal_total_queues);
  631 
  632     HALASSERT(AH9300(ah)->ah_txq[q].tqi_type != HAL_TX_QUEUE_INACTIVE);
  633 
  634     if (timeout == 0) {
  635         timeout = AH_TX_STOP_DMA_TIMEOUT;
  636     }
  637 
  638     OS_REG_WRITE(ah, AR_Q_TXD, 1 << q);
  639 
  640     for (wait = timeout / AH_TIME_QUANTUM; wait != 0; wait--) {
  641         if (ar9300_num_tx_pending(ah, q) == 0) {
  642             break;
  643         }
  644         OS_DELAY(AH_TIME_QUANTUM);        /* XXX get actual value */
  645     }
  646 
  647 #ifdef AH_DEBUG
  648     if (wait == 0) {
  649         HALDEBUG(ah, HAL_DEBUG_QUEUE,
  650             "%s: queue %u DMA did not stop in 100 msec\n", __func__, q);
  651         HALDEBUG(ah, HAL_DEBUG_QUEUE,
  652             "%s: QSTS 0x%x Q_TXE 0x%x Q_TXD 0x%x Q_CBR 0x%x\n",
  653             __func__,
  654             OS_REG_READ(ah, AR_QSTS(q)),
  655             OS_REG_READ(ah, AR_Q_TXE),
  656             OS_REG_READ(ah, AR_Q_TXD),
  657             OS_REG_READ(ah, AR_QCBRCFG(q)));
  658         HALDEBUG(ah, HAL_DEBUG_QUEUE,
  659             "%s: Q_MISC 0x%x Q_RDYTIMECFG 0x%x Q_RDYTIMESHDN 0x%x\n",
  660             __func__,
  661             OS_REG_READ(ah, AR_QMISC(q)),
  662             OS_REG_READ(ah, AR_QRDYTIMECFG(q)),
  663             OS_REG_READ(ah, AR_Q_RDYTIMESHDN));
  664     }
  665 #endif /* AH_DEBUG */
  666 
  667     /* 2413+ and up can kill packets at the PCU level */
  668     if (ar9300_num_tx_pending(ah, q)) {
  669         u_int32_t tsf_low, j;
  670 
  671         HALDEBUG(ah, HAL_DEBUG_QUEUE, "%s: Num of pending TX Frames %d on Q %d\n",
  672                  __func__, ar9300_num_tx_pending(ah, q), q);
  673 
  674         /* Kill last PCU Tx Frame */
  675         /* TODO - save off and restore current values of Q1/Q2? */
  676         for (j = 0; j < 2; j++) {
  677             tsf_low = OS_REG_READ(ah, AR_TSF_L32);
  678             OS_REG_WRITE(ah, AR_QUIET2, SM(10, AR_QUIET2_QUIET_DUR));
  679             OS_REG_WRITE(ah, AR_QUIET_PERIOD, 100);
  680             OS_REG_WRITE(ah, AR_NEXT_QUIET_TIMER, tsf_low >> 10);
  681             OS_REG_SET_BIT(ah, AR_TIMER_MODE, AR_QUIET_TIMER_EN);
  682 
  683             if ((OS_REG_READ(ah, AR_TSF_L32) >> 10) == (tsf_low >> 10)) {
  684                 break;
  685             }
  686 
  687             HALDEBUG(ah, HAL_DEBUG_QUEUE,
  688                 "%s: TSF have moved while trying to set "
  689                 "quiet time TSF: 0x%08x\n",
  690                 __func__, tsf_low);
  691             /* TSF shouldn't count twice or reg access is taking forever */
  692             HALASSERT(j < 1);
  693         }
  694 
  695         OS_REG_SET_BIT(ah, AR_DIAG_SW, AR_DIAG_FORCE_CH_IDLE_HIGH);
  696 
  697         /* Allow the quiet mechanism to do its work */
  698         OS_DELAY(200);
  699         OS_REG_CLR_BIT(ah, AR_TIMER_MODE, AR_QUIET_TIMER_EN);
  700 
  701         /* Verify all transmit is dead */
  702         wait = timeout / AH_TIME_QUANTUM;
  703         while (ar9300_num_tx_pending(ah, q)) {
  704             if ((--wait) == 0) {
  705                 HALDEBUG(ah, HAL_DEBUG_TX,
  706                     "%s: Failed to stop Tx DMA in %d msec "
  707                     "after killing last frame\n",
  708                     __func__, timeout / 1000);
  709                 break;
  710             }
  711             OS_DELAY(AH_TIME_QUANTUM);
  712         }
  713 
  714         OS_REG_CLR_BIT(ah, AR_DIAG_SW, AR_DIAG_FORCE_CH_IDLE_HIGH);
  715     }
  716 
  717     OS_REG_WRITE(ah, AR_Q_TXD, 0);
  718     return (wait != 0);
  719 
  720 #undef AH_TX_STOP_DMA_TIMEOUT
  721 #undef AH_TIME_QUANTUM
  722 #endif
  723 }
  724 
  725 /*
  726  * Really Stop transmit on the specified queue
  727  */
  728 HAL_BOOL
  729 ar9300_stop_tx_dma_indv_que(struct ath_hal *ah, u_int q, u_int timeout)
  730 {
  731 #define AH_TX_STOP_DMA_TIMEOUT 4000    /* usec */
  732 #define AH_TIME_QUANTUM        100     /* usec */
  733     u_int wait;
  734 
  735     HALASSERT(q < AH_PRIVATE(ah)->ah_caps.hal_total_queues);
  736 
  737     HALASSERT(AH9300(ah)->ah_txq[q].tqi_type != HAL_TX_QUEUE_INACTIVE);
  738 
  739     if (timeout == 0) {
  740         timeout = AH_TX_STOP_DMA_TIMEOUT;
  741     }
  742 
  743     OS_REG_WRITE(ah, AR_Q_TXD, 1 << q);
  744 
  745     for (wait = timeout / AH_TIME_QUANTUM; wait != 0; wait--) {
  746         if (ar9300_num_tx_pending(ah, q) == 0) {
  747             break;
  748         }
  749         OS_DELAY(AH_TIME_QUANTUM);        /* XXX get actual value */
  750     }
  751 
  752 #ifdef AH_DEBUG
  753     if (wait == 0) {
  754         HALDEBUG(ah, HAL_DEBUG_QUEUE,
  755             "%s: queue %u DMA did not stop in 100 msec\n", __func__, q);
  756         HALDEBUG(ah, HAL_DEBUG_QUEUE,
  757             "%s: QSTS 0x%x Q_TXE 0x%x Q_TXD 0x%x Q_CBR 0x%x\n",
  758             __func__,
  759             OS_REG_READ(ah, AR_QSTS(q)),
  760             OS_REG_READ(ah, AR_Q_TXE),
  761             OS_REG_READ(ah, AR_Q_TXD),
  762             OS_REG_READ(ah, AR_QCBRCFG(q)));
  763         HALDEBUG(ah, HAL_DEBUG_QUEUE,
  764             "%s: Q_MISC 0x%x Q_RDYTIMECFG 0x%x Q_RDYTIMESHDN 0x%x\n",
  765             __func__,
  766             OS_REG_READ(ah, AR_QMISC(q)),
  767             OS_REG_READ(ah, AR_QRDYTIMECFG(q)),
  768             OS_REG_READ(ah, AR_Q_RDYTIMESHDN));
  769     }
  770 #endif /* AH_DEBUG */
  771 
  772     /* 2413+ and up can kill packets at the PCU level */
  773     if (ar9300_num_tx_pending(ah, q)) {
  774         u_int32_t tsf_low, j;
  775 
  776         HALDEBUG(ah, HAL_DEBUG_QUEUE, "%s: Num of pending TX Frames %d on Q %d\n",
  777                  __func__, ar9300_num_tx_pending(ah, q), q);
  778 
  779         /* Kill last PCU Tx Frame */
  780         /* TODO - save off and restore current values of Q1/Q2? */
  781         for (j = 0; j < 2; j++) {
  782             tsf_low = OS_REG_READ(ah, AR_TSF_L32);
  783             OS_REG_WRITE(ah, AR_QUIET2, SM(10, AR_QUIET2_QUIET_DUR));
  784             OS_REG_WRITE(ah, AR_QUIET_PERIOD, 100);
  785             OS_REG_WRITE(ah, AR_NEXT_QUIET_TIMER, tsf_low >> 10);
  786             OS_REG_SET_BIT(ah, AR_TIMER_MODE, AR_QUIET_TIMER_EN);
  787 
  788             if ((OS_REG_READ(ah, AR_TSF_L32) >> 10) == (tsf_low >> 10)) {
  789                 break;
  790             }
  791 
  792             HALDEBUG(ah, HAL_DEBUG_QUEUE,
  793                 "%s: TSF have moved while trying to set "
  794                 "quiet time TSF: 0x%08x\n",
  795                 __func__, tsf_low);
  796             /* TSF shouldn't count twice or reg access is taking forever */
  797             HALASSERT(j < 1);
  798         }
  799 
  800         OS_REG_SET_BIT(ah, AR_DIAG_SW, AR_DIAG_FORCE_CH_IDLE_HIGH);
  801 
  802         /* Allow the quiet mechanism to do its work */
  803         OS_DELAY(200);
  804         OS_REG_CLR_BIT(ah, AR_TIMER_MODE, AR_QUIET_TIMER_EN);
  805 
  806         /* Verify all transmit is dead */
  807         wait = timeout / AH_TIME_QUANTUM;
  808         while (ar9300_num_tx_pending(ah, q)) {
  809             if ((--wait) == 0) {
  810                 HALDEBUG(ah, HAL_DEBUG_TX,
  811                     "%s: Failed to stop Tx DMA in %d msec "
  812                     "after killing last frame\n",
  813                     __func__, timeout / 1000);
  814                 break;
  815             }
  816             OS_DELAY(AH_TIME_QUANTUM);
  817         }
  818 
  819         OS_REG_CLR_BIT(ah, AR_DIAG_SW, AR_DIAG_FORCE_CH_IDLE_HIGH);
  820     }
  821 
  822     OS_REG_WRITE(ah, AR_Q_TXD, 0);
  823     return (wait != 0);
  824 
  825 #undef AH_TX_STOP_DMA_TIMEOUT
  826 #undef AH_TIME_QUANTUM
  827 }
  828 
  829 /*
  830  * Abort transmit on all queues
  831  */
  832 #define AR9300_ABORT_LOOPS     1000
  833 #define AR9300_ABORT_WAIT      5
  834 #define NEXT_TBTT_NOW       10
  835 HAL_BOOL
  836 ar9300_abort_tx_dma(struct ath_hal *ah)
  837 {
  838     struct ath_hal_9300 *ahp = AH9300(ah);
  839     int i, q;
  840     u_int32_t nexttbtt, nextdba, tsf_tbtt, tbtt, dba;
  841     HAL_BOOL stopped;
  842     HAL_BOOL status = AH_TRUE;
  843 
  844     if (ahp->ah_abort_txdma_norx) {
  845         /*
  846          * First of all, make sure RX has been stopped
  847          */
  848         if (ar9300_get_power_mode(ah) != HAL_PM_FULL_SLEEP) {
  849             /* Need to stop RX DMA before reset otherwise chip might hang */
  850             stopped = ar9300_set_rx_abort(ah, AH_TRUE); /* abort and disable PCU */
  851             ar9300_set_rx_filter(ah, 0);
  852             stopped &= ar9300_stop_dma_receive(ah, 0); /* stop and disable RX DMA */
  853             if (!stopped) {
  854                 /*
  855                  * During the transition from full sleep to reset,
  856                  * recv DMA regs are not available to be read
  857                  */
  858                 HALDEBUG(ah, HAL_DEBUG_UNMASKABLE,
  859                     "%s[%d]: ar9300_stop_dma_receive failed\n", __func__, __LINE__);
  860                 //We still continue to stop TX dma
  861                 //return AH_FALSE;
  862             }
  863         } else {
  864             HALDEBUG(ah, HAL_DEBUG_UNMASKABLE,
  865                 "%s[%d]: Chip is already in full sleep\n", __func__, __LINE__);
  866         }
  867     }
  868 
  869     /*
  870      * set txd on all queues
  871      */
  872     OS_REG_WRITE(ah, AR_Q_TXD, AR_Q_TXD_M);
  873 
  874     /*
  875      * set tx abort bits (also disable rx)
  876      */
  877     OS_REG_SET_BIT(ah, AR_PCU_MISC, AR_PCU_FORCE_QUIET_COLL | AR_PCU_CLEAR_VMF);
  878     /* Add a new receipe from K31 code */
  879     OS_REG_SET_BIT(ah, AR_DIAG_SW, AR_DIAG_FORCE_CH_IDLE_HIGH | AR_DIAG_RX_DIS |
  880                                    AR_DIAG_RX_ABORT | AR_DIAG_FORCE_RX_CLEAR);
  881      /* beacon Q flush */
  882     nexttbtt = OS_REG_READ(ah, AR_NEXT_TBTT_TIMER);
  883     nextdba = OS_REG_READ(ah, AR_NEXT_DMA_BEACON_ALERT);
  884     //printk("%s[%d]:dba: %d, nt: %d \n", __func__, __LINE__, nextdba, nexttbtt);
  885     tsf_tbtt =  OS_REG_READ(ah, AR_TSF_L32);
  886     tbtt = tsf_tbtt + NEXT_TBTT_NOW;
  887     dba = tsf_tbtt;
  888     OS_REG_WRITE(ah, AR_NEXT_DMA_BEACON_ALERT, dba);
  889     OS_REG_WRITE(ah, AR_NEXT_TBTT_TIMER, tbtt);
  890     OS_REG_SET_BIT(ah, AR_D_GBL_IFS_MISC, AR_D_GBL_IFS_MISC_IGNORE_BACKOFF);
  891 
  892     /*
  893      * Let TXE (all queues) clear before waiting for any pending frames 
  894      * This is needed before starting the RF_BUS GRANT sequence other wise causes kernel 
  895      * panic 
  896      */     
  897     for(i = 0; i < AR9300_ABORT_LOOPS; i++) {
  898         if(OS_REG_READ(ah, AR_Q_TXE) == 0) {
  899             break;
  900         }
  901         OS_DELAY(AR9300_ABORT_WAIT);
  902     }
  903     if (i == AR9300_ABORT_LOOPS) {
  904         HALDEBUG(ah, HAL_DEBUG_TX, "%s[%d] reached max wait on TXE\n",
  905                  __func__, __LINE__);
  906     }
  907 
  908     /*
  909      * wait on all tx queues
  910      * This need to be checked in the last to gain extra 50 usec. on avg. 
  911      * Currently checked first since we dont have a previous channel information currently. 
  912      * Which is needed to revert the rf changes. 
  913      */
  914     for (q = AR_NUM_QCU - 1; q >= 0; q--) {
  915         for (i = 0; i < AR9300_ABORT_LOOPS; i++) {
  916             if (!(ar9300_num_tx_pending(ah, q))) {
  917                 break;
  918             }
  919             OS_DELAY(AR9300_ABORT_WAIT);
  920         }
  921         if (i == AR9300_ABORT_LOOPS) {
  922             status = AH_FALSE;
  923             HALDEBUG(ah, HAL_DEBUG_UNMASKABLE,
  924                     "ABORT LOOP finsihsed for Q: %d, num_pending: %d \n",
  925                     q, ar9300_num_tx_pending(ah, q));
  926             goto exit;
  927         }
  928     }
  929 
  930     /* Updating the beacon alert register with correct value */
  931     OS_REG_WRITE(ah, AR_NEXT_DMA_BEACON_ALERT, nextdba);
  932     OS_REG_WRITE(ah, AR_NEXT_TBTT_TIMER, nexttbtt);
  933 
  934 exit:
  935     /*
  936      * clear tx abort bits
  937      */
  938     OS_REG_CLR_BIT(ah, AR_PCU_MISC, AR_PCU_FORCE_QUIET_COLL | AR_PCU_CLEAR_VMF);
  939     /* Added a new receipe from K31 code */
  940     OS_REG_CLR_BIT(ah, AR_DIAG_SW, AR_DIAG_FORCE_CH_IDLE_HIGH | AR_DIAG_RX_DIS |
  941                                    AR_DIAG_RX_ABORT | AR_DIAG_FORCE_RX_CLEAR);
  942     OS_REG_CLR_BIT(ah, AR_D_GBL_IFS_MISC, AR_D_GBL_IFS_MISC_IGNORE_BACKOFF);
  943 
  944     /*
  945      * clear txd
  946      */
  947     OS_REG_WRITE(ah, AR_Q_TXD, 0);
  948 
  949     ahp->ah_abort_txdma_norx = AH_TRUE;
  950 
  951     return status;
  952 }
  953 
  954 /*
  955  * Determine which tx queues need interrupt servicing.
  956  */
  957 void
  958 ar9300_get_tx_intr_queue(struct ath_hal *ah, u_int32_t *txqs)
  959 {
  960     HALDEBUG(AH_NULL, HAL_DEBUG_UNMASKABLE,
  961                  "ar9300_get_tx_intr_queue: Should not be called\n");
  962 #if 0
  963     struct ath_hal_9300 *ahp = AH9300(ah);
  964     *txqs &= ahp->ah_intr_txqs;
  965     ahp->ah_intr_txqs &= ~(*txqs);
  966 #endif
  967 }
  968 
  969 void
  970 ar9300_reset_tx_status_ring(struct ath_hal *ah)
  971 {
  972     struct ath_hal_9300 *ahp = AH9300(ah);
  973 
  974     ahp->ts_tail = 0;
  975 
  976     /* Zero out the status descriptors */
  977     OS_MEMZERO((void *)ahp->ts_ring, ahp->ts_size * sizeof(struct ar9300_txs));
  978     HALDEBUG(ah, HAL_DEBUG_QUEUE,
  979         "%s: TS Start 0x%x End 0x%x Virt %p, Size %d\n", __func__,
  980         ahp->ts_paddr_start, ahp->ts_paddr_end, ahp->ts_ring, ahp->ts_size);
  981 
  982     OS_REG_WRITE(ah, AR_Q_STATUS_RING_START, ahp->ts_paddr_start);
  983     OS_REG_WRITE(ah, AR_Q_STATUS_RING_END, ahp->ts_paddr_end);
  984 }
  985 
  986 void
  987 ar9300_setup_tx_status_ring(struct ath_hal *ah, void *ts_start,
  988     u_int32_t ts_paddr_start, u_int16_t size)
  989 {
  990     struct ath_hal_9300 *ahp = AH9300(ah);
  991 
  992     ahp->ts_paddr_start = ts_paddr_start;
  993     ahp->ts_paddr_end = ts_paddr_start + (size * sizeof(struct ar9300_txs));
  994     ahp->ts_size = size;
  995     ahp->ts_ring = (struct ar9300_txs *)ts_start;
  996 
  997     ar9300_reset_tx_status_ring(ah);
  998 }

Cache object: 5e966ba8f5e9a5eef58f96fa9cd0ff75


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.