1 /*-
2 * SPDX-License-Identifier: ISC
3 *
4 * Copyright (c) 2002-2009 Sam Leffler, Errno Consulting
5 * Copyright (c) 2002-2008 Atheros Communications, Inc.
6 *
7 * Permission to use, copy, modify, and/or distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
10 *
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 *
19 * $FreeBSD$
20 */
21 #include "opt_ah.h"
22
23 #include "ah.h"
24 #include "ah_internal.h"
25 #include "ah_desc.h"
26
27 #include "ar5212/ar5212.h"
28 #include "ar5212/ar5212reg.h"
29 #include "ar5212/ar5212desc.h"
30 #include "ar5212/ar5212phy.h"
31 #ifdef AH_SUPPORT_5311
32 #include "ar5212/ar5311reg.h"
33 #endif
34
35 #ifdef AH_NEED_DESC_SWAP
36 static void ar5212SwapTxDesc(struct ath_desc *ds);
37 #endif
38
39 /*
40 * Update Tx FIFO trigger level.
41 *
42 * Set bIncTrigLevel to TRUE to increase the trigger level.
43 * Set bIncTrigLevel to FALSE to decrease the trigger level.
44 *
45 * Returns TRUE if the trigger level was updated
46 */
47 HAL_BOOL
48 ar5212UpdateTxTrigLevel(struct ath_hal *ah, HAL_BOOL bIncTrigLevel)
49 {
50 struct ath_hal_5212 *ahp = AH5212(ah);
51 uint32_t txcfg, curLevel, newLevel;
52 HAL_INT omask;
53
54 if (ahp->ah_txTrigLev >= ahp->ah_maxTxTrigLev)
55 return AH_FALSE;
56
57 /*
58 * Disable interrupts while futzing with the fifo level.
59 */
60 omask = ath_hal_setInterrupts(ah, ahp->ah_maskReg &~ HAL_INT_GLOBAL);
61
62 txcfg = OS_REG_READ(ah, AR_TXCFG);
63 curLevel = MS(txcfg, AR_FTRIG);
64 newLevel = curLevel;
65 if (bIncTrigLevel) { /* increase the trigger level */
66 if (curLevel < ahp->ah_maxTxTrigLev)
67 newLevel++;
68 } else if (curLevel > MIN_TX_FIFO_THRESHOLD)
69 newLevel--;
70 if (newLevel != curLevel)
71 /* Update the trigger level */
72 OS_REG_WRITE(ah, AR_TXCFG,
73 (txcfg &~ AR_FTRIG) | SM(newLevel, AR_FTRIG));
74
75 ahp->ah_txTrigLev = newLevel;
76
77 /* re-enable chip interrupts */
78 ath_hal_setInterrupts(ah, omask);
79
80 return (newLevel != curLevel);
81 }
82
83 /*
84 * Set the properties of the tx queue with the parameters
85 * from qInfo.
86 */
87 HAL_BOOL
88 ar5212SetTxQueueProps(struct ath_hal *ah, int q, const HAL_TXQ_INFO *qInfo)
89 {
90 struct ath_hal_5212 *ahp = AH5212(ah);
91 HAL_CAPABILITIES *pCap = &AH_PRIVATE(ah)->ah_caps;
92
93 if (q >= pCap->halTotalQueues) {
94 HALDEBUG(ah, HAL_DEBUG_ANY, "%s: invalid queue num %u\n",
95 __func__, q);
96 return AH_FALSE;
97 }
98 return ath_hal_setTxQProps(ah, &ahp->ah_txq[q], qInfo);
99 }
100
101 /*
102 * Return the properties for the specified tx queue.
103 */
104 HAL_BOOL
105 ar5212GetTxQueueProps(struct ath_hal *ah, int q, HAL_TXQ_INFO *qInfo)
106 {
107 struct ath_hal_5212 *ahp = AH5212(ah);
108 HAL_CAPABILITIES *pCap = &AH_PRIVATE(ah)->ah_caps;
109
110 if (q >= pCap->halTotalQueues) {
111 HALDEBUG(ah, HAL_DEBUG_ANY, "%s: invalid queue num %u\n",
112 __func__, q);
113 return AH_FALSE;
114 }
115 return ath_hal_getTxQProps(ah, qInfo, &ahp->ah_txq[q]);
116 }
117
118 /*
119 * Allocate and initialize a tx DCU/QCU combination.
120 */
121 int
122 ar5212SetupTxQueue(struct ath_hal *ah, HAL_TX_QUEUE type,
123 const HAL_TXQ_INFO *qInfo)
124 {
125 struct ath_hal_5212 *ahp = AH5212(ah);
126 HAL_TX_QUEUE_INFO *qi;
127 HAL_CAPABILITIES *pCap = &AH_PRIVATE(ah)->ah_caps;
128 int q, defqflags;
129
130 /* by default enable OK+ERR+DESC+URN interrupts */
131 defqflags = HAL_TXQ_TXOKINT_ENABLE
132 | HAL_TXQ_TXERRINT_ENABLE
133 | HAL_TXQ_TXDESCINT_ENABLE
134 | HAL_TXQ_TXURNINT_ENABLE;
135 /* XXX move queue assignment to driver */
136 switch (type) {
137 case HAL_TX_QUEUE_BEACON:
138 q = pCap->halTotalQueues-1; /* highest priority */
139 defqflags |= HAL_TXQ_DBA_GATED
140 | HAL_TXQ_CBR_DIS_QEMPTY
141 | HAL_TXQ_ARB_LOCKOUT_GLOBAL
142 | HAL_TXQ_BACKOFF_DISABLE;
143 break;
144 case HAL_TX_QUEUE_CAB:
145 q = pCap->halTotalQueues-2; /* next highest priority */
146 defqflags |= HAL_TXQ_DBA_GATED
147 | HAL_TXQ_CBR_DIS_QEMPTY
148 | HAL_TXQ_CBR_DIS_BEMPTY
149 | HAL_TXQ_ARB_LOCKOUT_GLOBAL
150 | HAL_TXQ_BACKOFF_DISABLE;
151 break;
152 case HAL_TX_QUEUE_UAPSD:
153 q = pCap->halTotalQueues-3; /* nextest highest priority */
154 if (ahp->ah_txq[q].tqi_type != HAL_TX_QUEUE_INACTIVE) {
155 HALDEBUG(ah, HAL_DEBUG_ANY,
156 "%s: no available UAPSD tx queue\n", __func__);
157 return -1;
158 }
159 break;
160 case HAL_TX_QUEUE_DATA:
161 for (q = 0; q < pCap->halTotalQueues; q++)
162 if (ahp->ah_txq[q].tqi_type == HAL_TX_QUEUE_INACTIVE)
163 break;
164 if (q == pCap->halTotalQueues) {
165 HALDEBUG(ah, HAL_DEBUG_ANY,
166 "%s: no available tx queue\n", __func__);
167 return -1;
168 }
169 break;
170 default:
171 HALDEBUG(ah, HAL_DEBUG_ANY,
172 "%s: bad tx queue type %u\n", __func__, type);
173 return -1;
174 }
175
176 HALDEBUG(ah, HAL_DEBUG_TXQUEUE, "%s: queue %u\n", __func__, q);
177
178 qi = &ahp->ah_txq[q];
179 if (qi->tqi_type != HAL_TX_QUEUE_INACTIVE) {
180 HALDEBUG(ah, HAL_DEBUG_ANY, "%s: tx queue %u already active\n",
181 __func__, q);
182 return -1;
183 }
184 OS_MEMZERO(qi, sizeof(HAL_TX_QUEUE_INFO));
185 qi->tqi_type = type;
186 if (qInfo == AH_NULL) {
187 qi->tqi_qflags = defqflags;
188 qi->tqi_aifs = INIT_AIFS;
189 qi->tqi_cwmin = HAL_TXQ_USEDEFAULT; /* NB: do at reset */
190 qi->tqi_cwmax = INIT_CWMAX;
191 qi->tqi_shretry = INIT_SH_RETRY;
192 qi->tqi_lgretry = INIT_LG_RETRY;
193 qi->tqi_physCompBuf = 0;
194 } else {
195 qi->tqi_physCompBuf = qInfo->tqi_compBuf;
196 (void) ar5212SetTxQueueProps(ah, q, qInfo);
197 }
198 /* NB: must be followed by ar5212ResetTxQueue */
199 return q;
200 }
201
202 /*
203 * Update the h/w interrupt registers to reflect a tx q's configuration.
204 */
205 static void
206 setTxQInterrupts(struct ath_hal *ah, HAL_TX_QUEUE_INFO *qi)
207 {
208 struct ath_hal_5212 *ahp = AH5212(ah);
209
210 HALDEBUG(ah, HAL_DEBUG_TXQUEUE,
211 "%s: tx ok 0x%x err 0x%x desc 0x%x eol 0x%x urn 0x%x\n", __func__,
212 ahp->ah_txOkInterruptMask, ahp->ah_txErrInterruptMask,
213 ahp->ah_txDescInterruptMask, ahp->ah_txEolInterruptMask,
214 ahp->ah_txUrnInterruptMask);
215
216 OS_REG_WRITE(ah, AR_IMR_S0,
217 SM(ahp->ah_txOkInterruptMask, AR_IMR_S0_QCU_TXOK)
218 | SM(ahp->ah_txDescInterruptMask, AR_IMR_S0_QCU_TXDESC)
219 );
220 OS_REG_WRITE(ah, AR_IMR_S1,
221 SM(ahp->ah_txErrInterruptMask, AR_IMR_S1_QCU_TXERR)
222 | SM(ahp->ah_txEolInterruptMask, AR_IMR_S1_QCU_TXEOL)
223 );
224 OS_REG_RMW_FIELD(ah, AR_IMR_S2,
225 AR_IMR_S2_QCU_TXURN, ahp->ah_txUrnInterruptMask);
226 }
227
228 /*
229 * Free a tx DCU/QCU combination.
230 */
231 HAL_BOOL
232 ar5212ReleaseTxQueue(struct ath_hal *ah, u_int q)
233 {
234 struct ath_hal_5212 *ahp = AH5212(ah);
235 HAL_CAPABILITIES *pCap = &AH_PRIVATE(ah)->ah_caps;
236 HAL_TX_QUEUE_INFO *qi;
237
238 if (q >= pCap->halTotalQueues) {
239 HALDEBUG(ah, HAL_DEBUG_ANY, "%s: invalid queue num %u\n",
240 __func__, q);
241 return AH_FALSE;
242 }
243 qi = &ahp->ah_txq[q];
244 if (qi->tqi_type == HAL_TX_QUEUE_INACTIVE) {
245 HALDEBUG(ah, HAL_DEBUG_TXQUEUE, "%s: inactive queue %u\n",
246 __func__, q);
247 return AH_FALSE;
248 }
249
250 HALDEBUG(ah, HAL_DEBUG_TXQUEUE, "%s: release queue %u\n", __func__, q);
251
252 qi->tqi_type = HAL_TX_QUEUE_INACTIVE;
253 ahp->ah_txOkInterruptMask &= ~(1 << q);
254 ahp->ah_txErrInterruptMask &= ~(1 << q);
255 ahp->ah_txDescInterruptMask &= ~(1 << q);
256 ahp->ah_txEolInterruptMask &= ~(1 << q);
257 ahp->ah_txUrnInterruptMask &= ~(1 << q);
258 setTxQInterrupts(ah, qi);
259
260 return AH_TRUE;
261 }
262
263 /*
264 * Set the retry, aifs, cwmin/max, readyTime regs for specified queue
265 * Assumes:
266 * phwChannel has been set to point to the current channel
267 */
268 #define TU_TO_USEC(_tu) ((_tu) << 10)
269 HAL_BOOL
270 ar5212ResetTxQueue(struct ath_hal *ah, u_int q)
271 {
272 struct ath_hal_5212 *ahp = AH5212(ah);
273 HAL_CAPABILITIES *pCap = &AH_PRIVATE(ah)->ah_caps;
274 const struct ieee80211_channel *chan = AH_PRIVATE(ah)->ah_curchan;
275 HAL_TX_QUEUE_INFO *qi;
276 uint32_t cwMin, chanCwMin, qmisc, dmisc;
277
278 if (q >= pCap->halTotalQueues) {
279 HALDEBUG(ah, HAL_DEBUG_ANY, "%s: invalid queue num %u\n",
280 __func__, q);
281 return AH_FALSE;
282 }
283 qi = &ahp->ah_txq[q];
284 if (qi->tqi_type == HAL_TX_QUEUE_INACTIVE) {
285 HALDEBUG(ah, HAL_DEBUG_TXQUEUE, "%s: inactive queue %u\n",
286 __func__, q);
287 return AH_TRUE; /* XXX??? */
288 }
289
290 HALDEBUG(ah, HAL_DEBUG_TXQUEUE, "%s: reset queue %u\n", __func__, q);
291
292 if (qi->tqi_cwmin == HAL_TXQ_USEDEFAULT) {
293 /*
294 * Select cwmin according to channel type.
295 * NB: chan can be NULL during attach
296 */
297 if (chan && IEEE80211_IS_CHAN_B(chan))
298 chanCwMin = INIT_CWMIN_11B;
299 else
300 chanCwMin = INIT_CWMIN;
301 /* make sure that the CWmin is of the form (2^n - 1) */
302 for (cwMin = 1; cwMin < chanCwMin; cwMin = (cwMin << 1) | 1)
303 ;
304 } else
305 cwMin = qi->tqi_cwmin;
306
307 /* set cwMin/Max and AIFS values */
308 OS_REG_WRITE(ah, AR_DLCL_IFS(q),
309 SM(cwMin, AR_D_LCL_IFS_CWMIN)
310 | SM(qi->tqi_cwmax, AR_D_LCL_IFS_CWMAX)
311 | SM(qi->tqi_aifs, AR_D_LCL_IFS_AIFS));
312
313 /* Set retry limit values */
314 OS_REG_WRITE(ah, AR_DRETRY_LIMIT(q),
315 SM(INIT_SSH_RETRY, AR_D_RETRY_LIMIT_STA_SH)
316 | SM(INIT_SLG_RETRY, AR_D_RETRY_LIMIT_STA_LG)
317 | SM(qi->tqi_lgretry, AR_D_RETRY_LIMIT_FR_LG)
318 | SM(qi->tqi_shretry, AR_D_RETRY_LIMIT_FR_SH)
319 );
320
321 /* NB: always enable early termination on the QCU */
322 qmisc = AR_Q_MISC_DCU_EARLY_TERM_REQ
323 | SM(AR_Q_MISC_FSP_ASAP, AR_Q_MISC_FSP);
324
325 /* NB: always enable DCU to wait for next fragment from QCU */
326 dmisc = AR_D_MISC_FRAG_WAIT_EN;
327
328 #ifdef AH_SUPPORT_5311
329 if (AH_PRIVATE(ah)->ah_macVersion < AR_SREV_VERSION_OAHU) {
330 /* Configure DCU to use the global sequence count */
331 dmisc |= AR5311_D_MISC_SEQ_NUM_CONTROL;
332 }
333 #endif
334 /* multiqueue support */
335 if (qi->tqi_cbrPeriod) {
336 OS_REG_WRITE(ah, AR_QCBRCFG(q),
337 SM(qi->tqi_cbrPeriod,AR_Q_CBRCFG_CBR_INTERVAL)
338 | SM(qi->tqi_cbrOverflowLimit, AR_Q_CBRCFG_CBR_OVF_THRESH));
339 qmisc = (qmisc &~ AR_Q_MISC_FSP) | AR_Q_MISC_FSP_CBR;
340 if (qi->tqi_cbrOverflowLimit)
341 qmisc |= AR_Q_MISC_CBR_EXP_CNTR_LIMIT;
342 }
343 if (qi->tqi_readyTime) {
344 OS_REG_WRITE(ah, AR_QRDYTIMECFG(q),
345 SM(qi->tqi_readyTime, AR_Q_RDYTIMECFG_INT)
346 | AR_Q_RDYTIMECFG_ENA);
347 }
348
349 OS_REG_WRITE(ah, AR_DCHNTIME(q),
350 SM(qi->tqi_burstTime, AR_D_CHNTIME_DUR)
351 | (qi->tqi_burstTime ? AR_D_CHNTIME_EN : 0));
352
353 if (qi->tqi_readyTime &&
354 (qi->tqi_qflags & HAL_TXQ_RDYTIME_EXP_POLICY_ENABLE))
355 qmisc |= AR_Q_MISC_RDYTIME_EXP_POLICY;
356 if (qi->tqi_qflags & HAL_TXQ_DBA_GATED)
357 qmisc = (qmisc &~ AR_Q_MISC_FSP) | AR_Q_MISC_FSP_DBA_GATED;
358 if (MS(qmisc, AR_Q_MISC_FSP) != AR_Q_MISC_FSP_ASAP) {
359 /*
360 * These are meangingful only when not scheduled asap.
361 */
362 if (qi->tqi_qflags & HAL_TXQ_CBR_DIS_BEMPTY)
363 qmisc |= AR_Q_MISC_CBR_INCR_DIS0;
364 else
365 qmisc &= ~AR_Q_MISC_CBR_INCR_DIS0;
366 if (qi->tqi_qflags & HAL_TXQ_CBR_DIS_QEMPTY)
367 qmisc |= AR_Q_MISC_CBR_INCR_DIS1;
368 else
369 qmisc &= ~AR_Q_MISC_CBR_INCR_DIS1;
370 }
371
372 if (qi->tqi_qflags & HAL_TXQ_BACKOFF_DISABLE)
373 dmisc |= AR_D_MISC_POST_FR_BKOFF_DIS;
374 if (qi->tqi_qflags & HAL_TXQ_FRAG_BURST_BACKOFF_ENABLE)
375 dmisc |= AR_D_MISC_FRAG_BKOFF_EN;
376 if (qi->tqi_qflags & HAL_TXQ_ARB_LOCKOUT_GLOBAL)
377 dmisc |= SM(AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL,
378 AR_D_MISC_ARB_LOCKOUT_CNTRL);
379 else if (qi->tqi_qflags & HAL_TXQ_ARB_LOCKOUT_INTRA)
380 dmisc |= SM(AR_D_MISC_ARB_LOCKOUT_CNTRL_INTRA_FR,
381 AR_D_MISC_ARB_LOCKOUT_CNTRL);
382 if (qi->tqi_qflags & HAL_TXQ_IGNORE_VIRTCOL)
383 dmisc |= SM(AR_D_MISC_VIR_COL_HANDLING_IGNORE,
384 AR_D_MISC_VIR_COL_HANDLING);
385 if (qi->tqi_qflags & HAL_TXQ_SEQNUM_INC_DIS)
386 dmisc |= AR_D_MISC_SEQ_NUM_INCR_DIS;
387
388 /*
389 * Fillin type-dependent bits. Most of this can be
390 * removed by specifying the queue parameters in the
391 * driver; it's here for backwards compatibility.
392 */
393 switch (qi->tqi_type) {
394 case HAL_TX_QUEUE_BEACON: /* beacon frames */
395 qmisc |= AR_Q_MISC_FSP_DBA_GATED
396 | AR_Q_MISC_BEACON_USE
397 | AR_Q_MISC_CBR_INCR_DIS1;
398
399 dmisc |= SM(AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL,
400 AR_D_MISC_ARB_LOCKOUT_CNTRL)
401 | AR_D_MISC_BEACON_USE
402 | AR_D_MISC_POST_FR_BKOFF_DIS;
403 break;
404 case HAL_TX_QUEUE_CAB: /* CAB frames */
405 /*
406 * No longer Enable AR_Q_MISC_RDYTIME_EXP_POLICY,
407 * There is an issue with the CAB Queue
408 * not properly refreshing the Tx descriptor if
409 * the TXE clear setting is used.
410 */
411 qmisc |= AR_Q_MISC_FSP_DBA_GATED
412 | AR_Q_MISC_CBR_INCR_DIS1
413 | AR_Q_MISC_CBR_INCR_DIS0;
414
415 if (qi->tqi_readyTime) {
416 HALDEBUG(ah, HAL_DEBUG_TXQUEUE,
417 "%s: using tqi_readyTime\n", __func__);
418 OS_REG_WRITE(ah, AR_QRDYTIMECFG(q),
419 SM(qi->tqi_readyTime, AR_Q_RDYTIMECFG_INT) |
420 AR_Q_RDYTIMECFG_ENA);
421 } else {
422 int value;
423 /*
424 * NB: don't set default ready time if driver
425 * has explicitly specified something. This is
426 * here solely for backwards compatibility.
427 */
428 /*
429 * XXX for now, hard-code a CAB interval of 70%
430 * XXX of the total beacon interval.
431 */
432
433 value = (ahp->ah_beaconInterval * 70 / 100)
434 - (ah->ah_config.ah_sw_beacon_response_time -
435 + ah->ah_config.ah_dma_beacon_response_time)
436 - ah->ah_config.ah_additional_swba_backoff;
437 /*
438 * XXX Ensure it isn't too low - nothing lower
439 * XXX than 10 TU
440 */
441 if (value < 10)
442 value = 10;
443 HALDEBUG(ah, HAL_DEBUG_TXQUEUE,
444 "%s: defaulting to rdytime = %d uS\n",
445 __func__, value);
446 OS_REG_WRITE(ah, AR_QRDYTIMECFG(q),
447 SM(TU_TO_USEC(value), AR_Q_RDYTIMECFG_INT) |
448 AR_Q_RDYTIMECFG_ENA);
449 }
450 dmisc |= SM(AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL,
451 AR_D_MISC_ARB_LOCKOUT_CNTRL);
452 break;
453 default: /* NB: silence compiler */
454 break;
455 }
456
457 OS_REG_WRITE(ah, AR_QMISC(q), qmisc);
458 OS_REG_WRITE(ah, AR_DMISC(q), dmisc);
459
460 /* Setup compression scratchpad buffer */
461 /*
462 * XXX: calling this asynchronously to queue operation can
463 * cause unexpected behavior!!!
464 */
465 if (qi->tqi_physCompBuf) {
466 HALASSERT(qi->tqi_type == HAL_TX_QUEUE_DATA ||
467 qi->tqi_type == HAL_TX_QUEUE_UAPSD);
468 OS_REG_WRITE(ah, AR_Q_CBBS, (80 + 2*q));
469 OS_REG_WRITE(ah, AR_Q_CBBA, qi->tqi_physCompBuf);
470 OS_REG_WRITE(ah, AR_Q_CBC, HAL_COMP_BUF_MAX_SIZE/1024);
471 OS_REG_WRITE(ah, AR_Q0_MISC + 4*q,
472 OS_REG_READ(ah, AR_Q0_MISC + 4*q)
473 | AR_Q_MISC_QCU_COMP_EN);
474 }
475
476 /*
477 * Always update the secondary interrupt mask registers - this
478 * could be a new queue getting enabled in a running system or
479 * hw getting re-initialized during a reset!
480 *
481 * Since we don't differentiate between tx interrupts corresponding
482 * to individual queues - secondary tx mask regs are always unmasked;
483 * tx interrupts are enabled/disabled for all queues collectively
484 * using the primary mask reg
485 */
486 if (qi->tqi_qflags & HAL_TXQ_TXOKINT_ENABLE)
487 ahp->ah_txOkInterruptMask |= 1 << q;
488 else
489 ahp->ah_txOkInterruptMask &= ~(1 << q);
490 if (qi->tqi_qflags & HAL_TXQ_TXERRINT_ENABLE)
491 ahp->ah_txErrInterruptMask |= 1 << q;
492 else
493 ahp->ah_txErrInterruptMask &= ~(1 << q);
494 if (qi->tqi_qflags & HAL_TXQ_TXDESCINT_ENABLE)
495 ahp->ah_txDescInterruptMask |= 1 << q;
496 else
497 ahp->ah_txDescInterruptMask &= ~(1 << q);
498 if (qi->tqi_qflags & HAL_TXQ_TXEOLINT_ENABLE)
499 ahp->ah_txEolInterruptMask |= 1 << q;
500 else
501 ahp->ah_txEolInterruptMask &= ~(1 << q);
502 if (qi->tqi_qflags & HAL_TXQ_TXURNINT_ENABLE)
503 ahp->ah_txUrnInterruptMask |= 1 << q;
504 else
505 ahp->ah_txUrnInterruptMask &= ~(1 << q);
506 setTxQInterrupts(ah, qi);
507
508 return AH_TRUE;
509 }
510 #undef TU_TO_USEC
511
512 /*
513 * Get the TXDP for the specified queue
514 */
515 uint32_t
516 ar5212GetTxDP(struct ath_hal *ah, u_int q)
517 {
518 HALASSERT(q < AH_PRIVATE(ah)->ah_caps.halTotalQueues);
519 return OS_REG_READ(ah, AR_QTXDP(q));
520 }
521
522 /*
523 * Set the TxDP for the specified queue
524 */
525 HAL_BOOL
526 ar5212SetTxDP(struct ath_hal *ah, u_int q, uint32_t txdp)
527 {
528 HALASSERT(q < AH_PRIVATE(ah)->ah_caps.halTotalQueues);
529 HALASSERT(AH5212(ah)->ah_txq[q].tqi_type != HAL_TX_QUEUE_INACTIVE);
530
531 /*
532 * Make sure that TXE is deasserted before setting the TXDP. If TXE
533 * is still asserted, setting TXDP will have no effect.
534 */
535 HALASSERT((OS_REG_READ(ah, AR_Q_TXE) & (1 << q)) == 0);
536
537 OS_REG_WRITE(ah, AR_QTXDP(q), txdp);
538
539 return AH_TRUE;
540 }
541
542 /*
543 * Set Transmit Enable bits for the specified queue
544 */
545 HAL_BOOL
546 ar5212StartTxDma(struct ath_hal *ah, u_int q)
547 {
548 HALASSERT(q < AH_PRIVATE(ah)->ah_caps.halTotalQueues);
549
550 HALASSERT(AH5212(ah)->ah_txq[q].tqi_type != HAL_TX_QUEUE_INACTIVE);
551
552 HALDEBUG(ah, HAL_DEBUG_TXQUEUE, "%s: queue %u\n", __func__, q);
553
554 /* Check to be sure we're not enabling a q that has its TXD bit set. */
555 HALASSERT((OS_REG_READ(ah, AR_Q_TXD) & (1 << q)) == 0);
556
557 OS_REG_WRITE(ah, AR_Q_TXE, 1 << q);
558 return AH_TRUE;
559 }
560
561 /*
562 * Return the number of pending frames or 0 if the specified
563 * queue is stopped.
564 */
565 uint32_t
566 ar5212NumTxPending(struct ath_hal *ah, u_int q)
567 {
568 uint32_t npend;
569
570 HALASSERT(q < AH_PRIVATE(ah)->ah_caps.halTotalQueues);
571 HALASSERT(AH5212(ah)->ah_txq[q].tqi_type != HAL_TX_QUEUE_INACTIVE);
572
573 npend = OS_REG_READ(ah, AR_QSTS(q)) & AR_Q_STS_PEND_FR_CNT;
574 if (npend == 0) {
575 /*
576 * Pending frame count (PFC) can momentarily go to zero
577 * while TXE remains asserted. In other words a PFC of
578 * zero is not sufficient to say that the queue has stopped.
579 */
580 if (OS_REG_READ(ah, AR_Q_TXE) & (1 << q))
581 npend = 1; /* arbitrarily return 1 */
582 }
583 return npend;
584 }
585
586 /*
587 * Stop transmit on the specified queue
588 */
589 HAL_BOOL
590 ar5212StopTxDma(struct ath_hal *ah, u_int q)
591 {
592 u_int i;
593 u_int wait;
594
595 HALASSERT(q < AH_PRIVATE(ah)->ah_caps.halTotalQueues);
596
597 HALASSERT(AH5212(ah)->ah_txq[q].tqi_type != HAL_TX_QUEUE_INACTIVE);
598
599 OS_REG_WRITE(ah, AR_Q_TXD, 1 << q);
600 for (i = 1000; i != 0; i--) {
601 if (ar5212NumTxPending(ah, q) == 0)
602 break;
603 OS_DELAY(100); /* XXX get actual value */
604 }
605 #ifdef AH_DEBUG
606 if (i == 0) {
607 HALDEBUG(ah, HAL_DEBUG_ANY,
608 "%s: queue %u DMA did not stop in 100 msec\n", __func__, q);
609 HALDEBUG(ah, HAL_DEBUG_ANY,
610 "%s: QSTS 0x%x Q_TXE 0x%x Q_TXD 0x%x Q_CBR 0x%x\n", __func__,
611 OS_REG_READ(ah, AR_QSTS(q)), OS_REG_READ(ah, AR_Q_TXE),
612 OS_REG_READ(ah, AR_Q_TXD), OS_REG_READ(ah, AR_QCBRCFG(q)));
613 HALDEBUG(ah, HAL_DEBUG_ANY,
614 "%s: Q_MISC 0x%x Q_RDYTIMECFG 0x%x Q_RDYTIMESHDN 0x%x\n",
615 __func__, OS_REG_READ(ah, AR_QMISC(q)),
616 OS_REG_READ(ah, AR_QRDYTIMECFG(q)),
617 OS_REG_READ(ah, AR_Q_RDYTIMESHDN));
618 }
619 #endif /* AH_DEBUG */
620
621 /* 2413+ and up can kill packets at the PCU level */
622 if (ar5212NumTxPending(ah, q) &&
623 (IS_2413(ah) || IS_5413(ah) || IS_2425(ah) || IS_2417(ah))) {
624 uint32_t tsfLow, j;
625
626 HALDEBUG(ah, HAL_DEBUG_TXQUEUE,
627 "%s: Num of pending TX Frames %d on Q %d\n",
628 __func__, ar5212NumTxPending(ah, q), q);
629
630 /* Kill last PCU Tx Frame */
631 /* TODO - save off and restore current values of Q1/Q2? */
632 for (j = 0; j < 2; j++) {
633 tsfLow = OS_REG_READ(ah, AR_TSF_L32);
634 OS_REG_WRITE(ah, AR_QUIET2, SM(100, AR_QUIET2_QUIET_PER) |
635 SM(10, AR_QUIET2_QUIET_DUR));
636 OS_REG_WRITE(ah, AR_QUIET1, AR_QUIET1_QUIET_ENABLE |
637 SM(tsfLow >> 10, AR_QUIET1_NEXT_QUIET));
638 if ((OS_REG_READ(ah, AR_TSF_L32) >> 10) == (tsfLow >> 10)) {
639 break;
640 }
641 HALDEBUG(ah, HAL_DEBUG_ANY,
642 "%s: TSF moved while trying to set quiet time "
643 "TSF: 0x%08x\n", __func__, tsfLow);
644 HALASSERT(j < 1); /* TSF shouldn't count twice or reg access is taking forever */
645 }
646
647 OS_REG_SET_BIT(ah, AR_DIAG_SW, AR_DIAG_CHAN_IDLE);
648
649 /* Allow the quiet mechanism to do its work */
650 OS_DELAY(200);
651 OS_REG_CLR_BIT(ah, AR_QUIET1, AR_QUIET1_QUIET_ENABLE);
652
653 /* Give at least 1 millisec more to wait */
654 wait = 100;
655
656 /* Verify all transmit is dead */
657 while (ar5212NumTxPending(ah, q)) {
658 if ((--wait) == 0) {
659 HALDEBUG(ah, HAL_DEBUG_ANY,
660 "%s: Failed to stop Tx DMA in %d msec after killing last frame\n",
661 __func__, wait);
662 break;
663 }
664 OS_DELAY(10);
665 }
666
667 OS_REG_CLR_BIT(ah, AR_DIAG_SW, AR_DIAG_CHAN_IDLE);
668 }
669
670 OS_REG_WRITE(ah, AR_Q_TXD, 0);
671 return (i != 0);
672 }
673
674 /*
675 * Descriptor Access Functions
676 */
677
678 #define VALID_PKT_TYPES \
679 ((1<<HAL_PKT_TYPE_NORMAL)|(1<<HAL_PKT_TYPE_ATIM)|\
680 (1<<HAL_PKT_TYPE_PSPOLL)|(1<<HAL_PKT_TYPE_PROBE_RESP)|\
681 (1<<HAL_PKT_TYPE_BEACON))
682 #define isValidPktType(_t) ((1<<(_t)) & VALID_PKT_TYPES)
683 #define VALID_TX_RATES \
684 ((1<<0x0b)|(1<<0x0f)|(1<<0x0a)|(1<<0x0e)|(1<<0x09)|(1<<0x0d)|\
685 (1<<0x08)|(1<<0x0c)|(1<<0x1b)|(1<<0x1a)|(1<<0x1e)|(1<<0x19)|\
686 (1<<0x1d)|(1<<0x18)|(1<<0x1c))
687 #define isValidTxRate(_r) ((1<<(_r)) & VALID_TX_RATES)
688
689 HAL_BOOL
690 ar5212SetupTxDesc(struct ath_hal *ah, struct ath_desc *ds,
691 u_int pktLen,
692 u_int hdrLen,
693 HAL_PKT_TYPE type,
694 u_int txPower,
695 u_int txRate0, u_int txTries0,
696 u_int keyIx,
697 u_int antMode,
698 u_int flags,
699 u_int rtsctsRate,
700 u_int rtsctsDuration,
701 u_int compicvLen,
702 u_int compivLen,
703 u_int comp)
704 {
705 #define RTSCTS (HAL_TXDESC_RTSENA|HAL_TXDESC_CTSENA)
706 struct ar5212_desc *ads = AR5212DESC(ds);
707 struct ath_hal_5212 *ahp = AH5212(ah);
708
709 (void) hdrLen;
710
711 HALASSERT(txTries0 != 0);
712 HALASSERT(isValidPktType(type));
713 HALASSERT(isValidTxRate(txRate0));
714 HALASSERT((flags & RTSCTS) != RTSCTS);
715 /* XXX validate antMode */
716
717 txPower = (txPower + ahp->ah_txPowerIndexOffset );
718 if(txPower > 63) txPower=63;
719
720 ads->ds_ctl0 = (pktLen & AR_FrameLen)
721 | (txPower << AR_XmitPower_S)
722 | (flags & HAL_TXDESC_VEOL ? AR_VEOL : 0)
723 | (flags & HAL_TXDESC_CLRDMASK ? AR_ClearDestMask : 0)
724 | SM(antMode, AR_AntModeXmit)
725 | (flags & HAL_TXDESC_INTREQ ? AR_TxInterReq : 0)
726 ;
727 ads->ds_ctl1 = (type << AR_FrmType_S)
728 | (flags & HAL_TXDESC_NOACK ? AR_NoAck : 0)
729 | (comp << AR_CompProc_S)
730 | (compicvLen << AR_CompICVLen_S)
731 | (compivLen << AR_CompIVLen_S)
732 ;
733 ads->ds_ctl2 = SM(txTries0, AR_XmitDataTries0)
734 | (flags & HAL_TXDESC_DURENA ? AR_DurUpdateEna : 0)
735 ;
736 ads->ds_ctl3 = (txRate0 << AR_XmitRate0_S)
737 ;
738 if (keyIx != HAL_TXKEYIX_INVALID) {
739 /* XXX validate key index */
740 ads->ds_ctl1 |= SM(keyIx, AR_DestIdx);
741 ads->ds_ctl0 |= AR_DestIdxValid;
742 }
743 if (flags & RTSCTS) {
744 if (!isValidTxRate(rtsctsRate)) {
745 HALDEBUG(ah, HAL_DEBUG_ANY,
746 "%s: invalid rts/cts rate 0x%x\n",
747 __func__, rtsctsRate);
748 return AH_FALSE;
749 }
750 /* XXX validate rtsctsDuration */
751 ads->ds_ctl0 |= (flags & HAL_TXDESC_CTSENA ? AR_CTSEnable : 0)
752 | (flags & HAL_TXDESC_RTSENA ? AR_RTSCTSEnable : 0)
753 ;
754 ads->ds_ctl2 |= SM(rtsctsDuration, AR_RTSCTSDuration);
755 ads->ds_ctl3 |= (rtsctsRate << AR_RTSCTSRate_S);
756 }
757 return AH_TRUE;
758 #undef RTSCTS
759 }
760
761 HAL_BOOL
762 ar5212SetupXTxDesc(struct ath_hal *ah, struct ath_desc *ds,
763 u_int txRate1, u_int txTries1,
764 u_int txRate2, u_int txTries2,
765 u_int txRate3, u_int txTries3)
766 {
767 struct ar5212_desc *ads = AR5212DESC(ds);
768
769 if (txTries1) {
770 HALASSERT(isValidTxRate(txRate1));
771 ads->ds_ctl2 |= SM(txTries1, AR_XmitDataTries1)
772 | AR_DurUpdateEna
773 ;
774 ads->ds_ctl3 |= (txRate1 << AR_XmitRate1_S);
775 }
776 if (txTries2) {
777 HALASSERT(isValidTxRate(txRate2));
778 ads->ds_ctl2 |= SM(txTries2, AR_XmitDataTries2)
779 | AR_DurUpdateEna
780 ;
781 ads->ds_ctl3 |= (txRate2 << AR_XmitRate2_S);
782 }
783 if (txTries3) {
784 HALASSERT(isValidTxRate(txRate3));
785 ads->ds_ctl2 |= SM(txTries3, AR_XmitDataTries3)
786 | AR_DurUpdateEna
787 ;
788 ads->ds_ctl3 |= (txRate3 << AR_XmitRate3_S);
789 }
790 return AH_TRUE;
791 }
792
793 void
794 ar5212IntrReqTxDesc(struct ath_hal *ah, struct ath_desc *ds)
795 {
796 struct ar5212_desc *ads = AR5212DESC(ds);
797
798 #ifdef AH_NEED_DESC_SWAP
799 ads->ds_ctl0 |= __bswap32(AR_TxInterReq);
800 #else
801 ads->ds_ctl0 |= AR_TxInterReq;
802 #endif
803 }
804
805 HAL_BOOL
806 ar5212FillTxDesc(struct ath_hal *ah, struct ath_desc *ds,
807 HAL_DMA_ADDR *bufAddrList, uint32_t *segLenList, u_int qcuId,
808 u_int descId, HAL_BOOL firstSeg, HAL_BOOL lastSeg,
809 const struct ath_desc *ds0)
810 {
811 struct ar5212_desc *ads = AR5212DESC(ds);
812 uint32_t segLen = segLenList[0];
813
814 HALASSERT((segLen &~ AR_BufLen) == 0);
815
816 ds->ds_data = bufAddrList[0];
817
818 if (firstSeg) {
819 /*
820 * First descriptor, don't clobber xmit control data
821 * setup by ar5212SetupTxDesc.
822 */
823 ads->ds_ctl1 |= segLen | (lastSeg ? 0 : AR_More);
824 } else if (lastSeg) { /* !firstSeg && lastSeg */
825 /*
826 * Last descriptor in a multi-descriptor frame,
827 * copy the multi-rate transmit parameters from
828 * the first frame for processing on completion.
829 */
830 ads->ds_ctl1 = segLen;
831 #ifdef AH_NEED_DESC_SWAP
832 ads->ds_ctl0 = __bswap32(AR5212DESC_CONST(ds0)->ds_ctl0)
833 & AR_TxInterReq;
834 ads->ds_ctl2 = __bswap32(AR5212DESC_CONST(ds0)->ds_ctl2);
835 ads->ds_ctl3 = __bswap32(AR5212DESC_CONST(ds0)->ds_ctl3);
836 #else
837 ads->ds_ctl0 = AR5212DESC_CONST(ds0)->ds_ctl0 & AR_TxInterReq;
838 ads->ds_ctl2 = AR5212DESC_CONST(ds0)->ds_ctl2;
839 ads->ds_ctl3 = AR5212DESC_CONST(ds0)->ds_ctl3;
840 #endif
841 } else { /* !firstSeg && !lastSeg */
842 /*
843 * Intermediate descriptor in a multi-descriptor frame.
844 */
845 #ifdef AH_NEED_DESC_SWAP
846 ads->ds_ctl0 = __bswap32(AR5212DESC_CONST(ds0)->ds_ctl0)
847 & AR_TxInterReq;
848 #else
849 ads->ds_ctl0 = AR5212DESC_CONST(ds0)->ds_ctl0 & AR_TxInterReq;
850 #endif
851 ads->ds_ctl1 = segLen | AR_More;
852 ads->ds_ctl2 = 0;
853 ads->ds_ctl3 = 0;
854 }
855 ads->ds_txstatus0 = ads->ds_txstatus1 = 0;
856 return AH_TRUE;
857 }
858
859 #ifdef AH_NEED_DESC_SWAP
860 /* Swap transmit descriptor */
861 static __inline void
862 ar5212SwapTxDesc(struct ath_desc *ds)
863 {
864 ds->ds_data = __bswap32(ds->ds_data);
865 ds->ds_ctl0 = __bswap32(ds->ds_ctl0);
866 ds->ds_ctl1 = __bswap32(ds->ds_ctl1);
867 ds->ds_hw[0] = __bswap32(ds->ds_hw[0]);
868 ds->ds_hw[1] = __bswap32(ds->ds_hw[1]);
869 ds->ds_hw[2] = __bswap32(ds->ds_hw[2]);
870 ds->ds_hw[3] = __bswap32(ds->ds_hw[3]);
871 }
872 #endif
873
874 /*
875 * Processing of HW TX descriptor.
876 */
877 HAL_STATUS
878 ar5212ProcTxDesc(struct ath_hal *ah,
879 struct ath_desc *ds, struct ath_tx_status *ts)
880 {
881 struct ar5212_desc *ads = AR5212DESC(ds);
882
883 #ifdef AH_NEED_DESC_SWAP
884 if ((ads->ds_txstatus1 & __bswap32(AR_Done)) == 0)
885 return HAL_EINPROGRESS;
886
887 ar5212SwapTxDesc(ds);
888 #else
889 if ((ads->ds_txstatus1 & AR_Done) == 0)
890 return HAL_EINPROGRESS;
891 #endif
892
893 /* Update software copies of the HW status */
894 ts->ts_seqnum = MS(ads->ds_txstatus1, AR_SeqNum);
895 ts->ts_tstamp = MS(ads->ds_txstatus0, AR_SendTimestamp);
896 ts->ts_status = 0;
897 if ((ads->ds_txstatus0 & AR_FrmXmitOK) == 0) {
898 if (ads->ds_txstatus0 & AR_ExcessiveRetries)
899 ts->ts_status |= HAL_TXERR_XRETRY;
900 if (ads->ds_txstatus0 & AR_Filtered)
901 ts->ts_status |= HAL_TXERR_FILT;
902 if (ads->ds_txstatus0 & AR_FIFOUnderrun)
903 ts->ts_status |= HAL_TXERR_FIFO;
904 }
905 /*
906 * Extract the transmit rate used and mark the rate as
907 * ``alternate'' if it wasn't the series 0 rate.
908 */
909 ts->ts_finaltsi = MS(ads->ds_txstatus1, AR_FinalTSIndex);
910 switch (ts->ts_finaltsi) {
911 case 0:
912 ts->ts_rate = MS(ads->ds_ctl3, AR_XmitRate0);
913 break;
914 case 1:
915 ts->ts_rate = MS(ads->ds_ctl3, AR_XmitRate1);
916 break;
917 case 2:
918 ts->ts_rate = MS(ads->ds_ctl3, AR_XmitRate2);
919 break;
920 case 3:
921 ts->ts_rate = MS(ads->ds_ctl3, AR_XmitRate3);
922 break;
923 }
924 ts->ts_rssi = MS(ads->ds_txstatus1, AR_AckSigStrength);
925 ts->ts_shortretry = MS(ads->ds_txstatus0, AR_RTSFailCnt);
926 ts->ts_longretry = MS(ads->ds_txstatus0, AR_DataFailCnt);
927 /*
928 * The retry count has the number of un-acked tries for the
929 * final series used. When doing multi-rate retry we must
930 * fixup the retry count by adding in the try counts for
931 * each series that was fully-processed. Beware that this
932 * takes values from the try counts in the final descriptor.
933 * These are not required by the hardware. We assume they
934 * are placed there by the driver as otherwise we have no
935 * access and the driver can't do the calculation because it
936 * doesn't know the descriptor format.
937 */
938 switch (ts->ts_finaltsi) {
939 case 3: ts->ts_longretry += MS(ads->ds_ctl2, AR_XmitDataTries2);
940 case 2: ts->ts_longretry += MS(ads->ds_ctl2, AR_XmitDataTries1);
941 case 1: ts->ts_longretry += MS(ads->ds_ctl2, AR_XmitDataTries0);
942 }
943 ts->ts_virtcol = MS(ads->ds_txstatus0, AR_VirtCollCnt);
944 ts->ts_antenna = (ads->ds_txstatus1 & AR_XmitAtenna ? 2 : 1);
945
946 return HAL_OK;
947 }
948
949 /*
950 * Determine which tx queues need interrupt servicing.
951 */
952 void
953 ar5212GetTxIntrQueue(struct ath_hal *ah, uint32_t *txqs)
954 {
955 struct ath_hal_5212 *ahp = AH5212(ah);
956 *txqs &= ahp->ah_intrTxqs;
957 ahp->ah_intrTxqs &= ~(*txqs);
958 }
959
960 /*
961 * Retrieve the rate table from the given TX completion descriptor
962 */
963 HAL_BOOL
964 ar5212GetTxCompletionRates(struct ath_hal *ah, const struct ath_desc *ds0, int *rates, int *tries)
965 {
966 const struct ar5212_desc *ads = AR5212DESC_CONST(ds0);
967
968 rates[0] = MS(ads->ds_ctl3, AR_XmitRate0);
969 rates[1] = MS(ads->ds_ctl3, AR_XmitRate1);
970 rates[2] = MS(ads->ds_ctl3, AR_XmitRate2);
971 rates[3] = MS(ads->ds_ctl3, AR_XmitRate3);
972
973 tries[0] = MS(ads->ds_ctl2, AR_XmitDataTries0);
974 tries[1] = MS(ads->ds_ctl2, AR_XmitDataTries1);
975 tries[2] = MS(ads->ds_ctl2, AR_XmitDataTries2);
976 tries[3] = MS(ads->ds_ctl2, AR_XmitDataTries3);
977
978 return AH_TRUE;
979 }
980
981 void
982 ar5212SetTxDescLink(struct ath_hal *ah, void *ds, uint32_t link)
983 {
984 struct ar5212_desc *ads = AR5212DESC(ds);
985
986 ads->ds_link = link;
987 }
988
989 void
990 ar5212GetTxDescLink(struct ath_hal *ah, void *ds, uint32_t *link)
991 {
992 struct ar5212_desc *ads = AR5212DESC(ds);
993
994 *link = ads->ds_link;
995 }
996
997 void
998 ar5212GetTxDescLinkPtr(struct ath_hal *ah, void *ds, uint32_t **linkptr)
999 {
1000 struct ar5212_desc *ads = AR5212DESC(ds);
1001
1002 *linkptr = &ads->ds_link;
1003 }
Cache object: b4b5792c5c148365c6ec42321384719d
|