1 /*-
2 * Copyright (c) 2002-2009 Sam Leffler, Errno Consulting
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer,
10 * without modification.
11 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
12 * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
13 * redistribution must be conditioned upon including a substantially
14 * similar Disclaimer requirement for further binary redistribution.
15 *
16 * NO WARRANTY
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
20 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
22 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
25 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
27 * THE POSSIBILITY OF SUCH DAMAGES.
28 */
29
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
32
33 /*
34 * Driver for the Atheros Wireless LAN controller.
35 *
36 * This software is derived from work of Atsushi Onoe; his contribution
37 * is greatly appreciated.
38 */
39
40 #include "opt_inet.h"
41 #include "opt_ath.h"
42 #include "opt_wlan.h"
43
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/sysctl.h>
47 #include <sys/mbuf.h>
48 #include <sys/malloc.h>
49 #include <sys/lock.h>
50 #include <sys/mutex.h>
51 #include <sys/kernel.h>
52 #include <sys/socket.h>
53 #include <sys/sockio.h>
54 #include <sys/errno.h>
55 #include <sys/callout.h>
56 #include <sys/bus.h>
57 #include <sys/endian.h>
58 #include <sys/kthread.h>
59 #include <sys/taskqueue.h>
60 #include <sys/priv.h>
61
62 #include <machine/bus.h>
63
64 #include <net/if.h>
65 #include <net/if_dl.h>
66 #include <net/if_media.h>
67 #include <net/if_types.h>
68 #include <net/if_arp.h>
69 #include <net/ethernet.h>
70 #include <net/if_llc.h>
71
72 #include <net80211/ieee80211_var.h>
73 #include <net80211/ieee80211_regdomain.h>
74 #ifdef IEEE80211_SUPPORT_SUPERG
75 #include <net80211/ieee80211_superg.h>
76 #endif
77 #ifdef IEEE80211_SUPPORT_TDMA
78 #include <net80211/ieee80211_tdma.h>
79 #endif
80
81 #include <net/bpf.h>
82
83 #ifdef INET
84 #include <netinet/in.h>
85 #include <netinet/if_ether.h>
86 #endif
87
88 #include <dev/ath/if_athvar.h>
89 #include <dev/ath/ath_hal/ah_devid.h> /* XXX for softled */
90 #include <dev/ath/ath_hal/ah_diagcodes.h>
91
92 #include <dev/ath/if_ath_debug.h>
93
94 #ifdef ATH_TX99_DIAG
95 #include <dev/ath/ath_tx99/ath_tx99.h>
96 #endif
97
98 #include <dev/ath/if_ath_misc.h>
99 #include <dev/ath/if_ath_tx.h>
100 #include <dev/ath/if_ath_tx_ht.h>
101
102 /*
103 * Whether to use the 11n rate scenario functions or not
104 */
105 static inline int
106 ath_tx_is_11n(struct ath_softc *sc)
107 {
108 return (sc->sc_ah->ah_magic == 0x20065416);
109 }
110
111 void
112 ath_txfrag_cleanup(struct ath_softc *sc,
113 ath_bufhead *frags, struct ieee80211_node *ni)
114 {
115 struct ath_buf *bf, *next;
116
117 ATH_TXBUF_LOCK_ASSERT(sc);
118
119 STAILQ_FOREACH_SAFE(bf, frags, bf_list, next) {
120 /* NB: bf assumed clean */
121 STAILQ_REMOVE_HEAD(frags, bf_list);
122 STAILQ_INSERT_HEAD(&sc->sc_txbuf, bf, bf_list);
123 ieee80211_node_decref(ni);
124 }
125 }
126
127 /*
128 * Setup xmit of a fragmented frame. Allocate a buffer
129 * for each frag and bump the node reference count to
130 * reflect the held reference to be setup by ath_tx_start.
131 */
132 int
133 ath_txfrag_setup(struct ath_softc *sc, ath_bufhead *frags,
134 struct mbuf *m0, struct ieee80211_node *ni)
135 {
136 struct mbuf *m;
137 struct ath_buf *bf;
138
139 ATH_TXBUF_LOCK(sc);
140 for (m = m0->m_nextpkt; m != NULL; m = m->m_nextpkt) {
141 bf = _ath_getbuf_locked(sc);
142 if (bf == NULL) { /* out of buffers, cleanup */
143 ath_txfrag_cleanup(sc, frags, ni);
144 break;
145 }
146 ieee80211_node_incref(ni);
147 STAILQ_INSERT_TAIL(frags, bf, bf_list);
148 }
149 ATH_TXBUF_UNLOCK(sc);
150
151 return !STAILQ_EMPTY(frags);
152 }
153
154 /*
155 * Reclaim mbuf resources. For fragmented frames we
156 * need to claim each frag chained with m_nextpkt.
157 */
158 void
159 ath_freetx(struct mbuf *m)
160 {
161 struct mbuf *next;
162
163 do {
164 next = m->m_nextpkt;
165 m->m_nextpkt = NULL;
166 m_freem(m);
167 } while ((m = next) != NULL);
168 }
169
170 static int
171 ath_tx_dmasetup(struct ath_softc *sc, struct ath_buf *bf, struct mbuf *m0)
172 {
173 struct mbuf *m;
174 int error;
175
176 /*
177 * Load the DMA map so any coalescing is done. This
178 * also calculates the number of descriptors we need.
179 */
180 error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0,
181 bf->bf_segs, &bf->bf_nseg,
182 BUS_DMA_NOWAIT);
183 if (error == EFBIG) {
184 /* XXX packet requires too many descriptors */
185 bf->bf_nseg = ATH_TXDESC+1;
186 } else if (error != 0) {
187 sc->sc_stats.ast_tx_busdma++;
188 ath_freetx(m0);
189 return error;
190 }
191 /*
192 * Discard null packets and check for packets that
193 * require too many TX descriptors. We try to convert
194 * the latter to a cluster.
195 */
196 if (bf->bf_nseg > ATH_TXDESC) { /* too many desc's, linearize */
197 sc->sc_stats.ast_tx_linear++;
198 m = m_collapse(m0, M_NOWAIT, ATH_TXDESC);
199 if (m == NULL) {
200 ath_freetx(m0);
201 sc->sc_stats.ast_tx_nombuf++;
202 return ENOMEM;
203 }
204 m0 = m;
205 error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0,
206 bf->bf_segs, &bf->bf_nseg,
207 BUS_DMA_NOWAIT);
208 if (error != 0) {
209 sc->sc_stats.ast_tx_busdma++;
210 ath_freetx(m0);
211 return error;
212 }
213 KASSERT(bf->bf_nseg <= ATH_TXDESC,
214 ("too many segments after defrag; nseg %u", bf->bf_nseg));
215 } else if (bf->bf_nseg == 0) { /* null packet, discard */
216 sc->sc_stats.ast_tx_nodata++;
217 ath_freetx(m0);
218 return EIO;
219 }
220 DPRINTF(sc, ATH_DEBUG_XMIT, "%s: m %p len %u\n",
221 __func__, m0, m0->m_pkthdr.len);
222 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE);
223 bf->bf_m = m0;
224
225 return 0;
226 }
227
228 static void
229 ath_tx_chaindesclist(struct ath_softc *sc, struct ath_txq *txq, struct ath_buf *bf)
230 {
231 struct ath_hal *ah = sc->sc_ah;
232 struct ath_desc *ds, *ds0;
233 int i;
234
235 /*
236 * Fillin the remainder of the descriptor info.
237 */
238 ds0 = ds = bf->bf_desc;
239 for (i = 0; i < bf->bf_nseg; i++, ds++) {
240 ds->ds_data = bf->bf_segs[i].ds_addr;
241 if (i == bf->bf_nseg - 1)
242 ds->ds_link = 0;
243 else
244 ds->ds_link = bf->bf_daddr + sizeof(*ds) * (i + 1);
245 ath_hal_filltxdesc(ah, ds
246 , bf->bf_segs[i].ds_len /* segment length */
247 , i == 0 /* first segment */
248 , i == bf->bf_nseg - 1 /* last segment */
249 , ds0 /* first descriptor */
250 );
251 DPRINTF(sc, ATH_DEBUG_XMIT,
252 "%s: %d: %08x %08x %08x %08x %08x %08x\n",
253 __func__, i, ds->ds_link, ds->ds_data,
254 ds->ds_ctl0, ds->ds_ctl1, ds->ds_hw[0], ds->ds_hw[1]);
255 }
256
257 }
258
259 static void
260 ath_tx_handoff(struct ath_softc *sc, struct ath_txq *txq, struct ath_buf *bf)
261 {
262 struct ath_hal *ah = sc->sc_ah;
263
264 /* Fill in the details in the descriptor list */
265 ath_tx_chaindesclist(sc, txq, bf);
266
267 /*
268 * Insert the frame on the outbound list and pass it on
269 * to the hardware. Multicast frames buffered for power
270 * save stations and transmit from the CAB queue are stored
271 * on a s/w only queue and loaded on to the CAB queue in
272 * the SWBA handler since frames only go out on DTIM and
273 * to avoid possible races.
274 */
275 ATH_TXQ_LOCK(txq);
276 KASSERT((bf->bf_flags & ATH_BUF_BUSY) == 0,
277 ("busy status 0x%x", bf->bf_flags));
278 if (txq->axq_qnum != ATH_TXQ_SWQ) {
279 #ifdef IEEE80211_SUPPORT_TDMA
280 int qbusy;
281
282 ATH_TXQ_INSERT_TAIL(txq, bf, bf_list);
283 qbusy = ath_hal_txqenabled(ah, txq->axq_qnum);
284 if (txq->axq_link == NULL) {
285 /*
286 * Be careful writing the address to TXDP. If
287 * the tx q is enabled then this write will be
288 * ignored. Normally this is not an issue but
289 * when tdma is in use and the q is beacon gated
290 * this race can occur. If the q is busy then
291 * defer the work to later--either when another
292 * packet comes along or when we prepare a beacon
293 * frame at SWBA.
294 */
295 if (!qbusy) {
296 ath_hal_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
297 txq->axq_flags &= ~ATH_TXQ_PUTPENDING;
298 DPRINTF(sc, ATH_DEBUG_XMIT,
299 "%s: TXDP[%u] = %p (%p) depth %d\n",
300 __func__, txq->axq_qnum,
301 (caddr_t)bf->bf_daddr, bf->bf_desc,
302 txq->axq_depth);
303 } else {
304 txq->axq_flags |= ATH_TXQ_PUTPENDING;
305 DPRINTF(sc, ATH_DEBUG_TDMA | ATH_DEBUG_XMIT,
306 "%s: Q%u busy, defer enable\n", __func__,
307 txq->axq_qnum);
308 }
309 } else {
310 *txq->axq_link = bf->bf_daddr;
311 DPRINTF(sc, ATH_DEBUG_XMIT,
312 "%s: link[%u](%p)=%p (%p) depth %d\n", __func__,
313 txq->axq_qnum, txq->axq_link,
314 (caddr_t)bf->bf_daddr, bf->bf_desc, txq->axq_depth);
315 if ((txq->axq_flags & ATH_TXQ_PUTPENDING) && !qbusy) {
316 /*
317 * The q was busy when we previously tried
318 * to write the address of the first buffer
319 * in the chain. Since it's not busy now
320 * handle this chore. We are certain the
321 * buffer at the front is the right one since
322 * axq_link is NULL only when the buffer list
323 * is/was empty.
324 */
325 ath_hal_puttxbuf(ah, txq->axq_qnum,
326 STAILQ_FIRST(&txq->axq_q)->bf_daddr);
327 txq->axq_flags &= ~ATH_TXQ_PUTPENDING;
328 DPRINTF(sc, ATH_DEBUG_TDMA | ATH_DEBUG_XMIT,
329 "%s: Q%u restarted\n", __func__,
330 txq->axq_qnum);
331 }
332 }
333 #else
334 ATH_TXQ_INSERT_TAIL(txq, bf, bf_list);
335 if (txq->axq_link == NULL) {
336 ath_hal_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
337 DPRINTF(sc, ATH_DEBUG_XMIT,
338 "%s: TXDP[%u] = %p (%p) depth %d\n",
339 __func__, txq->axq_qnum,
340 (caddr_t)bf->bf_daddr, bf->bf_desc,
341 txq->axq_depth);
342 } else {
343 *txq->axq_link = bf->bf_daddr;
344 DPRINTF(sc, ATH_DEBUG_XMIT,
345 "%s: link[%u](%p)=%p (%p) depth %d\n", __func__,
346 txq->axq_qnum, txq->axq_link,
347 (caddr_t)bf->bf_daddr, bf->bf_desc, txq->axq_depth);
348 }
349 #endif /* IEEE80211_SUPPORT_TDMA */
350 txq->axq_link = &bf->bf_desc[bf->bf_nseg - 1].ds_link;
351 ath_hal_txstart(ah, txq->axq_qnum);
352 } else {
353 if (txq->axq_link != NULL) {
354 struct ath_buf *last = ATH_TXQ_LAST(txq);
355 struct ieee80211_frame *wh;
356
357 /* mark previous frame */
358 wh = mtod(last->bf_m, struct ieee80211_frame *);
359 wh->i_fc[1] |= IEEE80211_FC1_MORE_DATA;
360 bus_dmamap_sync(sc->sc_dmat, last->bf_dmamap,
361 BUS_DMASYNC_PREWRITE);
362
363 /* link descriptor */
364 *txq->axq_link = bf->bf_daddr;
365 }
366 ATH_TXQ_INSERT_TAIL(txq, bf, bf_list);
367 txq->axq_link = &bf->bf_desc[bf->bf_nseg - 1].ds_link;
368 }
369 ATH_TXQ_UNLOCK(txq);
370 }
371
372 static int
373 ath_tx_tag_crypto(struct ath_softc *sc, struct ieee80211_node *ni,
374 struct mbuf *m0, int iswep, int isfrag, int *hdrlen, int *pktlen, int *keyix)
375 {
376 if (iswep) {
377 const struct ieee80211_cipher *cip;
378 struct ieee80211_key *k;
379
380 /*
381 * Construct the 802.11 header+trailer for an encrypted
382 * frame. The only reason this can fail is because of an
383 * unknown or unsupported cipher/key type.
384 */
385 k = ieee80211_crypto_encap(ni, m0);
386 if (k == NULL) {
387 /*
388 * This can happen when the key is yanked after the
389 * frame was queued. Just discard the frame; the
390 * 802.11 layer counts failures and provides
391 * debugging/diagnostics.
392 */
393 return 0;
394 }
395 /*
396 * Adjust the packet + header lengths for the crypto
397 * additions and calculate the h/w key index. When
398 * a s/w mic is done the frame will have had any mic
399 * added to it prior to entry so m0->m_pkthdr.len will
400 * account for it. Otherwise we need to add it to the
401 * packet length.
402 */
403 cip = k->wk_cipher;
404 (*hdrlen) += cip->ic_header;
405 (*pktlen) += cip->ic_header + cip->ic_trailer;
406 /* NB: frags always have any TKIP MIC done in s/w */
407 if ((k->wk_flags & IEEE80211_KEY_SWMIC) == 0 && !isfrag)
408 (*pktlen) += cip->ic_miclen;
409 (*keyix) = k->wk_keyix;
410 } else if (ni->ni_ucastkey.wk_cipher == &ieee80211_cipher_none) {
411 /*
412 * Use station key cache slot, if assigned.
413 */
414 (*keyix) = ni->ni_ucastkey.wk_keyix;
415 if ((*keyix) == IEEE80211_KEYIX_NONE)
416 (*keyix) = HAL_TXKEYIX_INVALID;
417 } else
418 (*keyix) = HAL_TXKEYIX_INVALID;
419
420 return 1;
421 }
422
423 static uint8_t
424 ath_tx_get_rtscts_rate(struct ath_hal *ah, const HAL_RATE_TABLE *rt,
425 int rix, int cix, int shortPreamble)
426 {
427 uint8_t ctsrate;
428
429 /*
430 * CTS transmit rate is derived from the transmit rate
431 * by looking in the h/w rate table. We must also factor
432 * in whether or not a short preamble is to be used.
433 */
434 /* NB: cix is set above where RTS/CTS is enabled */
435 KASSERT(cix != 0xff, ("cix not setup"));
436 ctsrate = rt->info[cix].rateCode;
437
438 /* XXX this should only matter for legacy rates */
439 if (shortPreamble)
440 ctsrate |= rt->info[cix].shortPreamble;
441
442 return ctsrate;
443 }
444
445
446 /*
447 * Calculate the RTS/CTS duration for legacy frames.
448 */
449 static int
450 ath_tx_calc_ctsduration(struct ath_hal *ah, int rix, int cix,
451 int shortPreamble, int pktlen, const HAL_RATE_TABLE *rt,
452 int flags)
453 {
454 int ctsduration = 0;
455
456 /* This mustn't be called for HT modes */
457 if (rt->info[cix].phy == IEEE80211_T_HT) {
458 printf("%s: HT rate where it shouldn't be (0x%x)\n",
459 __func__, rt->info[cix].rateCode);
460 return -1;
461 }
462
463 /*
464 * Compute the transmit duration based on the frame
465 * size and the size of an ACK frame. We call into the
466 * HAL to do the computation since it depends on the
467 * characteristics of the actual PHY being used.
468 *
469 * NB: CTS is assumed the same size as an ACK so we can
470 * use the precalculated ACK durations.
471 */
472 if (shortPreamble) {
473 if (flags & HAL_TXDESC_RTSENA) /* SIFS + CTS */
474 ctsduration += rt->info[cix].spAckDuration;
475 ctsduration += ath_hal_computetxtime(ah,
476 rt, pktlen, rix, AH_TRUE);
477 if ((flags & HAL_TXDESC_NOACK) == 0) /* SIFS + ACK */
478 ctsduration += rt->info[rix].spAckDuration;
479 } else {
480 if (flags & HAL_TXDESC_RTSENA) /* SIFS + CTS */
481 ctsduration += rt->info[cix].lpAckDuration;
482 ctsduration += ath_hal_computetxtime(ah,
483 rt, pktlen, rix, AH_FALSE);
484 if ((flags & HAL_TXDESC_NOACK) == 0) /* SIFS + ACK */
485 ctsduration += rt->info[rix].lpAckDuration;
486 }
487
488 return ctsduration;
489 }
490
491 int
492 ath_tx_start(struct ath_softc *sc, struct ieee80211_node *ni, struct ath_buf *bf,
493 struct mbuf *m0)
494 {
495 struct ieee80211vap *vap = ni->ni_vap;
496 struct ath_vap *avp = ATH_VAP(vap);
497 struct ath_hal *ah = sc->sc_ah;
498 struct ifnet *ifp = sc->sc_ifp;
499 struct ieee80211com *ic = ifp->if_l2com;
500 const struct chanAccParams *cap = &ic->ic_wme.wme_chanParams;
501 int error, iswep, ismcast, isfrag, ismrr;
502 int keyix, hdrlen, pktlen, try0;
503 u_int8_t rix, txrate, ctsrate;
504 u_int8_t cix = 0xff; /* NB: silence compiler */
505 struct ath_desc *ds;
506 struct ath_txq *txq;
507 struct ieee80211_frame *wh;
508 u_int subtype, flags, ctsduration;
509 HAL_PKT_TYPE atype;
510 const HAL_RATE_TABLE *rt;
511 HAL_BOOL shortPreamble;
512 struct ath_node *an;
513 u_int pri;
514 uint8_t try[4], rate[4];
515
516 bzero(try, sizeof(try));
517 bzero(rate, sizeof(rate));
518
519 wh = mtod(m0, struct ieee80211_frame *);
520 iswep = wh->i_fc[1] & IEEE80211_FC1_WEP;
521 ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1);
522 isfrag = m0->m_flags & M_FRAG;
523 hdrlen = ieee80211_anyhdrsize(wh);
524 /*
525 * Packet length must not include any
526 * pad bytes; deduct them here.
527 */
528 pktlen = m0->m_pkthdr.len - (hdrlen & 3);
529
530 /* Handle encryption twiddling if needed */
531 if (! ath_tx_tag_crypto(sc, ni, m0, iswep, isfrag, &hdrlen, &pktlen, &keyix)) {
532 ath_freetx(m0);
533 return EIO;
534 }
535
536 /* packet header may have moved, reset our local pointer */
537 wh = mtod(m0, struct ieee80211_frame *);
538
539 pktlen += IEEE80211_CRC_LEN;
540
541 /*
542 * Load the DMA map so any coalescing is done. This
543 * also calculates the number of descriptors we need.
544 */
545 error = ath_tx_dmasetup(sc, bf, m0);
546 if (error != 0)
547 return error;
548 bf->bf_node = ni; /* NB: held reference */
549 m0 = bf->bf_m; /* NB: may have changed */
550 wh = mtod(m0, struct ieee80211_frame *);
551
552 /* setup descriptors */
553 ds = bf->bf_desc;
554 rt = sc->sc_currates;
555 KASSERT(rt != NULL, ("no rate table, mode %u", sc->sc_curmode));
556
557 /*
558 * NB: the 802.11 layer marks whether or not we should
559 * use short preamble based on the current mode and
560 * negotiated parameters.
561 */
562 if ((ic->ic_flags & IEEE80211_F_SHPREAMBLE) &&
563 (ni->ni_capinfo & IEEE80211_CAPINFO_SHORT_PREAMBLE)) {
564 shortPreamble = AH_TRUE;
565 sc->sc_stats.ast_tx_shortpre++;
566 } else {
567 shortPreamble = AH_FALSE;
568 }
569
570 an = ATH_NODE(ni);
571 flags = HAL_TXDESC_CLRDMASK; /* XXX needed for crypto errs */
572 ismrr = 0; /* default no multi-rate retry*/
573 pri = M_WME_GETAC(m0); /* honor classification */
574 /* XXX use txparams instead of fixed values */
575 /*
576 * Calculate Atheros packet type from IEEE80211 packet header,
577 * setup for rate calculations, and select h/w transmit queue.
578 */
579 switch (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) {
580 case IEEE80211_FC0_TYPE_MGT:
581 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
582 if (subtype == IEEE80211_FC0_SUBTYPE_BEACON)
583 atype = HAL_PKT_TYPE_BEACON;
584 else if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP)
585 atype = HAL_PKT_TYPE_PROBE_RESP;
586 else if (subtype == IEEE80211_FC0_SUBTYPE_ATIM)
587 atype = HAL_PKT_TYPE_ATIM;
588 else
589 atype = HAL_PKT_TYPE_NORMAL; /* XXX */
590 rix = an->an_mgmtrix;
591 txrate = rt->info[rix].rateCode;
592 if (shortPreamble)
593 txrate |= rt->info[rix].shortPreamble;
594 try0 = ATH_TXMGTTRY;
595 flags |= HAL_TXDESC_INTREQ; /* force interrupt */
596 break;
597 case IEEE80211_FC0_TYPE_CTL:
598 atype = HAL_PKT_TYPE_PSPOLL; /* stop setting of duration */
599 rix = an->an_mgmtrix;
600 txrate = rt->info[rix].rateCode;
601 if (shortPreamble)
602 txrate |= rt->info[rix].shortPreamble;
603 try0 = ATH_TXMGTTRY;
604 flags |= HAL_TXDESC_INTREQ; /* force interrupt */
605 break;
606 case IEEE80211_FC0_TYPE_DATA:
607 atype = HAL_PKT_TYPE_NORMAL; /* default */
608 /*
609 * Data frames: multicast frames go out at a fixed rate,
610 * EAPOL frames use the mgmt frame rate; otherwise consult
611 * the rate control module for the rate to use.
612 */
613 if (ismcast) {
614 rix = an->an_mcastrix;
615 txrate = rt->info[rix].rateCode;
616 if (shortPreamble)
617 txrate |= rt->info[rix].shortPreamble;
618 try0 = 1;
619 } else if (m0->m_flags & M_EAPOL) {
620 /* XXX? maybe always use long preamble? */
621 rix = an->an_mgmtrix;
622 txrate = rt->info[rix].rateCode;
623 if (shortPreamble)
624 txrate |= rt->info[rix].shortPreamble;
625 try0 = ATH_TXMAXTRY; /* XXX?too many? */
626 } else {
627 ath_rate_findrate(sc, an, shortPreamble, pktlen,
628 &rix, &try0, &txrate);
629 sc->sc_txrix = rix; /* for LED blinking */
630 sc->sc_lastdatarix = rix; /* for fast frames */
631 if (try0 != ATH_TXMAXTRY)
632 ismrr = 1;
633 }
634 if (cap->cap_wmeParams[pri].wmep_noackPolicy)
635 flags |= HAL_TXDESC_NOACK;
636 break;
637 default:
638 if_printf(ifp, "bogus frame type 0x%x (%s)\n",
639 wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK, __func__);
640 /* XXX statistic */
641 ath_freetx(m0);
642 return EIO;
643 }
644 txq = sc->sc_ac2q[pri];
645
646 /*
647 * When servicing one or more stations in power-save mode
648 * (or) if there is some mcast data waiting on the mcast
649 * queue (to prevent out of order delivery) multicast
650 * frames must be buffered until after the beacon.
651 */
652 if (ismcast && (vap->iv_ps_sta || avp->av_mcastq.axq_depth))
653 txq = &avp->av_mcastq;
654
655 /*
656 * Calculate miscellaneous flags.
657 */
658 if (ismcast) {
659 flags |= HAL_TXDESC_NOACK; /* no ack on broad/multicast */
660 } else if (pktlen > vap->iv_rtsthreshold &&
661 (ni->ni_ath_flags & IEEE80211_NODE_FF) == 0) {
662 flags |= HAL_TXDESC_RTSENA; /* RTS based on frame length */
663 cix = rt->info[rix].controlRate;
664 sc->sc_stats.ast_tx_rts++;
665 }
666 if (flags & HAL_TXDESC_NOACK) /* NB: avoid double counting */
667 sc->sc_stats.ast_tx_noack++;
668 #ifdef IEEE80211_SUPPORT_TDMA
669 if (sc->sc_tdma && (flags & HAL_TXDESC_NOACK) == 0) {
670 DPRINTF(sc, ATH_DEBUG_TDMA,
671 "%s: discard frame, ACK required w/ TDMA\n", __func__);
672 sc->sc_stats.ast_tdma_ack++;
673 ath_freetx(m0);
674 return EIO;
675 }
676 #endif
677
678 /*
679 * If 802.11g protection is enabled, determine whether
680 * to use RTS/CTS or just CTS. Note that this is only
681 * done for OFDM unicast frames.
682 */
683 if ((ic->ic_flags & IEEE80211_F_USEPROT) &&
684 rt->info[rix].phy == IEEE80211_T_OFDM &&
685 (flags & HAL_TXDESC_NOACK) == 0) {
686 /* XXX fragments must use CCK rates w/ protection */
687 if (ic->ic_protmode == IEEE80211_PROT_RTSCTS)
688 flags |= HAL_TXDESC_RTSENA;
689 else if (ic->ic_protmode == IEEE80211_PROT_CTSONLY)
690 flags |= HAL_TXDESC_CTSENA;
691 if (isfrag) {
692 /*
693 * For frags it would be desirable to use the
694 * highest CCK rate for RTS/CTS. But stations
695 * farther away may detect it at a lower CCK rate
696 * so use the configured protection rate instead
697 * (for now).
698 */
699 cix = rt->info[sc->sc_protrix].controlRate;
700 } else
701 cix = rt->info[sc->sc_protrix].controlRate;
702 sc->sc_stats.ast_tx_protect++;
703 }
704
705 #if 0
706 /*
707 * If 11n protection is enabled and it's a HT frame,
708 * enable RTS.
709 *
710 * XXX ic_htprotmode or ic_curhtprotmode?
711 * XXX should it_htprotmode only matter if ic_curhtprotmode
712 * XXX indicates it's not a HT pure environment?
713 */
714 if ((ic->ic_htprotmode == IEEE80211_PROT_RTSCTS) &&
715 rt->info[rix].phy == IEEE80211_T_HT &&
716 (flags & HAL_TXDESC_NOACK) == 0) {
717 cix = rt->info[sc->sc_protrix].controlRate;
718 flags |= HAL_TXDESC_RTSENA;
719 sc->sc_stats.ast_tx_htprotect++;
720 }
721 #endif
722
723 /*
724 * Calculate duration. This logically belongs in the 802.11
725 * layer but it lacks sufficient information to calculate it.
726 */
727 if ((flags & HAL_TXDESC_NOACK) == 0 &&
728 (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != IEEE80211_FC0_TYPE_CTL) {
729 u_int16_t dur;
730 if (shortPreamble)
731 dur = rt->info[rix].spAckDuration;
732 else
733 dur = rt->info[rix].lpAckDuration;
734 if (wh->i_fc[1] & IEEE80211_FC1_MORE_FRAG) {
735 dur += dur; /* additional SIFS+ACK */
736 KASSERT(m0->m_nextpkt != NULL, ("no fragment"));
737 /*
738 * Include the size of next fragment so NAV is
739 * updated properly. The last fragment uses only
740 * the ACK duration
741 */
742 dur += ath_hal_computetxtime(ah, rt,
743 m0->m_nextpkt->m_pkthdr.len,
744 rix, shortPreamble);
745 }
746 if (isfrag) {
747 /*
748 * Force hardware to use computed duration for next
749 * fragment by disabling multi-rate retry which updates
750 * duration based on the multi-rate duration table.
751 */
752 ismrr = 0;
753 try0 = ATH_TXMGTTRY; /* XXX? */
754 }
755 *(u_int16_t *)wh->i_dur = htole16(dur);
756 }
757
758 /*
759 * Calculate RTS/CTS rate and duration if needed.
760 */
761 ctsduration = 0;
762 if (flags & (HAL_TXDESC_RTSENA|HAL_TXDESC_CTSENA)) {
763 ctsrate = ath_tx_get_rtscts_rate(ah, rt, rix, cix, shortPreamble);
764
765 /* The 11n chipsets do ctsduration calculations for you */
766 if (! ath_tx_is_11n(sc))
767 ctsduration = ath_tx_calc_ctsduration(ah, rix, cix, shortPreamble,
768 pktlen, rt, flags);
769 /*
770 * Must disable multi-rate retry when using RTS/CTS.
771 */
772 ismrr = 0;
773 try0 = ATH_TXMGTTRY; /* XXX */
774 } else
775 ctsrate = 0;
776
777 /*
778 * At this point we are committed to sending the frame
779 * and we don't need to look at m_nextpkt; clear it in
780 * case this frame is part of frag chain.
781 */
782 m0->m_nextpkt = NULL;
783
784 if (IFF_DUMPPKTS(sc, ATH_DEBUG_XMIT))
785 ieee80211_dump_pkt(ic, mtod(m0, const uint8_t *), m0->m_len,
786 sc->sc_hwmap[rix].ieeerate, -1);
787
788 if (ieee80211_radiotap_active_vap(vap)) {
789 u_int64_t tsf = ath_hal_gettsf64(ah);
790
791 sc->sc_tx_th.wt_tsf = htole64(tsf);
792 sc->sc_tx_th.wt_flags = sc->sc_hwmap[rix].txflags;
793 if (iswep)
794 sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_WEP;
795 if (isfrag)
796 sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_FRAG;
797 sc->sc_tx_th.wt_rate = sc->sc_hwmap[rix].ieeerate;
798 sc->sc_tx_th.wt_txpower = ni->ni_txpower;
799 sc->sc_tx_th.wt_antenna = sc->sc_txantenna;
800
801 ieee80211_radiotap_tx(vap, m0);
802 }
803
804 /*
805 * Determine if a tx interrupt should be generated for
806 * this descriptor. We take a tx interrupt to reap
807 * descriptors when the h/w hits an EOL condition or
808 * when the descriptor is specifically marked to generate
809 * an interrupt. We periodically mark descriptors in this
810 * way to insure timely replenishing of the supply needed
811 * for sending frames. Defering interrupts reduces system
812 * load and potentially allows more concurrent work to be
813 * done but if done to aggressively can cause senders to
814 * backup.
815 *
816 * NB: use >= to deal with sc_txintrperiod changing
817 * dynamically through sysctl.
818 */
819 if (flags & HAL_TXDESC_INTREQ) {
820 txq->axq_intrcnt = 0;
821 } else if (++txq->axq_intrcnt >= sc->sc_txintrperiod) {
822 flags |= HAL_TXDESC_INTREQ;
823 txq->axq_intrcnt = 0;
824 }
825
826 if (ath_tx_is_11n(sc)) {
827 rate[0] = rix;
828 try[0] = try0;
829 }
830
831 /*
832 * Formulate first tx descriptor with tx controls.
833 */
834 /* XXX check return value? */
835 /* XXX is this ok to call for 11n descriptors? */
836 /* XXX or should it go through the first, next, last 11n calls? */
837 ath_hal_setuptxdesc(ah, ds
838 , pktlen /* packet length */
839 , hdrlen /* header length */
840 , atype /* Atheros packet type */
841 , ni->ni_txpower /* txpower */
842 , txrate, try0 /* series 0 rate/tries */
843 , keyix /* key cache index */
844 , sc->sc_txantenna /* antenna mode */
845 , flags /* flags */
846 , ctsrate /* rts/cts rate */
847 , ctsduration /* rts/cts duration */
848 );
849 bf->bf_txflags = flags;
850 /*
851 * Setup the multi-rate retry state only when we're
852 * going to use it. This assumes ath_hal_setuptxdesc
853 * initializes the descriptors (so we don't have to)
854 * when the hardware supports multi-rate retry and
855 * we don't use it.
856 */
857 if (ismrr) {
858 if (ath_tx_is_11n(sc))
859 ath_rate_getxtxrates(sc, an, rix, rate, try);
860 else
861 ath_rate_setupxtxdesc(sc, an, ds, shortPreamble, rix);
862 }
863
864 if (ath_tx_is_11n(sc)) {
865 ath_buf_set_rate(sc, ni, bf, pktlen, flags, ctsrate, (atype == HAL_PKT_TYPE_PSPOLL), rate, try);
866 }
867
868 ath_tx_handoff(sc, txq, bf);
869 return 0;
870 }
871
872 static int
873 ath_tx_raw_start(struct ath_softc *sc, struct ieee80211_node *ni,
874 struct ath_buf *bf, struct mbuf *m0,
875 const struct ieee80211_bpf_params *params)
876 {
877 struct ifnet *ifp = sc->sc_ifp;
878 struct ieee80211com *ic = ifp->if_l2com;
879 struct ath_hal *ah = sc->sc_ah;
880 struct ieee80211vap *vap = ni->ni_vap;
881 int error, ismcast, ismrr;
882 int keyix, hdrlen, pktlen, try0, txantenna;
883 u_int8_t rix, cix, txrate, ctsrate, rate1, rate2, rate3;
884 struct ieee80211_frame *wh;
885 u_int flags, ctsduration;
886 HAL_PKT_TYPE atype;
887 const HAL_RATE_TABLE *rt;
888 struct ath_desc *ds;
889 u_int pri;
890 uint8_t try[4], rate[4];
891
892 bzero(try, sizeof(try));
893 bzero(rate, sizeof(rate));
894
895 wh = mtod(m0, struct ieee80211_frame *);
896 ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1);
897 hdrlen = ieee80211_anyhdrsize(wh);
898 /*
899 * Packet length must not include any
900 * pad bytes; deduct them here.
901 */
902 /* XXX honor IEEE80211_BPF_DATAPAD */
903 pktlen = m0->m_pkthdr.len - (hdrlen & 3) + IEEE80211_CRC_LEN;
904
905 /* Handle encryption twiddling if needed */
906 if (! ath_tx_tag_crypto(sc, ni, m0, params->ibp_flags & IEEE80211_BPF_CRYPTO, 0, &hdrlen, &pktlen, &keyix)) {
907 ath_freetx(m0);
908 return EIO;
909 }
910 /* packet header may have moved, reset our local pointer */
911 wh = mtod(m0, struct ieee80211_frame *);
912
913 error = ath_tx_dmasetup(sc, bf, m0);
914 if (error != 0)
915 return error;
916 m0 = bf->bf_m; /* NB: may have changed */
917 wh = mtod(m0, struct ieee80211_frame *);
918 bf->bf_node = ni; /* NB: held reference */
919
920 flags = HAL_TXDESC_CLRDMASK; /* XXX needed for crypto errs */
921 flags |= HAL_TXDESC_INTREQ; /* force interrupt */
922 if (params->ibp_flags & IEEE80211_BPF_RTS)
923 flags |= HAL_TXDESC_RTSENA;
924 else if (params->ibp_flags & IEEE80211_BPF_CTS)
925 flags |= HAL_TXDESC_CTSENA;
926 /* XXX leave ismcast to injector? */
927 if ((params->ibp_flags & IEEE80211_BPF_NOACK) || ismcast)
928 flags |= HAL_TXDESC_NOACK;
929
930 rt = sc->sc_currates;
931 KASSERT(rt != NULL, ("no rate table, mode %u", sc->sc_curmode));
932 rix = ath_tx_findrix(sc, params->ibp_rate0);
933 txrate = rt->info[rix].rateCode;
934 if (params->ibp_flags & IEEE80211_BPF_SHORTPRE)
935 txrate |= rt->info[rix].shortPreamble;
936 sc->sc_txrix = rix;
937 try0 = params->ibp_try0;
938 ismrr = (params->ibp_try1 != 0);
939 txantenna = params->ibp_pri >> 2;
940 if (txantenna == 0) /* XXX? */
941 txantenna = sc->sc_txantenna;
942
943 ctsduration = 0;
944 if (flags & (HAL_TXDESC_RTSENA|HAL_TXDESC_CTSENA)) {
945 cix = ath_tx_findrix(sc, params->ibp_ctsrate);
946 ctsrate = ath_tx_get_rtscts_rate(ah, rt, rix, cix, params->ibp_flags & IEEE80211_BPF_SHORTPRE);
947 /* The 11n chipsets do ctsduration calculations for you */
948 if (! ath_tx_is_11n(sc))
949 ctsduration = ath_tx_calc_ctsduration(ah, rix, cix,
950 params->ibp_flags & IEEE80211_BPF_SHORTPRE, pktlen,
951 rt, flags);
952 /*
953 * Must disable multi-rate retry when using RTS/CTS.
954 */
955 ismrr = 0; /* XXX */
956 } else
957 ctsrate = 0;
958
959 pri = params->ibp_pri & 3;
960 /*
961 * NB: we mark all packets as type PSPOLL so the h/w won't
962 * set the sequence number, duration, etc.
963 */
964 atype = HAL_PKT_TYPE_PSPOLL;
965
966 if (IFF_DUMPPKTS(sc, ATH_DEBUG_XMIT))
967 ieee80211_dump_pkt(ic, mtod(m0, caddr_t), m0->m_len,
968 sc->sc_hwmap[rix].ieeerate, -1);
969
970 if (ieee80211_radiotap_active_vap(vap)) {
971 u_int64_t tsf = ath_hal_gettsf64(ah);
972
973 sc->sc_tx_th.wt_tsf = htole64(tsf);
974 sc->sc_tx_th.wt_flags = sc->sc_hwmap[rix].txflags;
975 if (wh->i_fc[1] & IEEE80211_FC1_WEP)
976 sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_WEP;
977 if (m0->m_flags & M_FRAG)
978 sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_FRAG;
979 sc->sc_tx_th.wt_rate = sc->sc_hwmap[rix].ieeerate;
980 sc->sc_tx_th.wt_txpower = ni->ni_txpower;
981 sc->sc_tx_th.wt_antenna = sc->sc_txantenna;
982
983 ieee80211_radiotap_tx(vap, m0);
984 }
985
986 /*
987 * Formulate first tx descriptor with tx controls.
988 */
989 ds = bf->bf_desc;
990 /* XXX check return value? */
991 ath_hal_setuptxdesc(ah, ds
992 , pktlen /* packet length */
993 , hdrlen /* header length */
994 , atype /* Atheros packet type */
995 , params->ibp_power /* txpower */
996 , txrate, try0 /* series 0 rate/tries */
997 , keyix /* key cache index */
998 , txantenna /* antenna mode */
999 , flags /* flags */
1000 , ctsrate /* rts/cts rate */
1001 , ctsduration /* rts/cts duration */
1002 );
1003 bf->bf_txflags = flags;
1004
1005 if (ath_tx_is_11n(sc)) {
1006 rate[0] = ath_tx_findrix(sc, params->ibp_rate0);
1007 try[0] = params->ibp_try0;
1008
1009 if (ismrr) {
1010 /* Remember, rate[] is actually an array of rix's -adrian */
1011 rate[0] = ath_tx_findrix(sc, params->ibp_rate0);
1012 rate[1] = ath_tx_findrix(sc, params->ibp_rate1);
1013 rate[2] = ath_tx_findrix(sc, params->ibp_rate2);
1014 rate[3] = ath_tx_findrix(sc, params->ibp_rate3);
1015
1016 try[0] = params->ibp_try0;
1017 try[1] = params->ibp_try1;
1018 try[2] = params->ibp_try2;
1019 try[3] = params->ibp_try3;
1020 }
1021 } else {
1022 if (ismrr) {
1023 rix = ath_tx_findrix(sc, params->ibp_rate1);
1024 rate1 = rt->info[rix].rateCode;
1025 if (params->ibp_flags & IEEE80211_BPF_SHORTPRE)
1026 rate1 |= rt->info[rix].shortPreamble;
1027 if (params->ibp_try2) {
1028 rix = ath_tx_findrix(sc, params->ibp_rate2);
1029 rate2 = rt->info[rix].rateCode;
1030 if (params->ibp_flags & IEEE80211_BPF_SHORTPRE)
1031 rate2 |= rt->info[rix].shortPreamble;
1032 } else
1033 rate2 = 0;
1034 if (params->ibp_try3) {
1035 rix = ath_tx_findrix(sc, params->ibp_rate3);
1036 rate3 = rt->info[rix].rateCode;
1037 if (params->ibp_flags & IEEE80211_BPF_SHORTPRE)
1038 rate3 |= rt->info[rix].shortPreamble;
1039 } else
1040 rate3 = 0;
1041 ath_hal_setupxtxdesc(ah, ds
1042 , rate1, params->ibp_try1 /* series 1 */
1043 , rate2, params->ibp_try2 /* series 2 */
1044 , rate3, params->ibp_try3 /* series 3 */
1045 );
1046 }
1047 }
1048
1049 if (ath_tx_is_11n(sc)) {
1050 /*
1051 * notice that rix doesn't include any of the "magic" flags txrate
1052 * does for communicating "other stuff" to the HAL.
1053 */
1054 ath_buf_set_rate(sc, ni, bf, pktlen, flags, ctsrate, (atype == HAL_PKT_TYPE_PSPOLL), rate, try);
1055 }
1056
1057 /* NB: no buffered multicast in power save support */
1058 ath_tx_handoff(sc, sc->sc_ac2q[pri], bf);
1059 return 0;
1060 }
1061
1062 int
1063 ath_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
1064 const struct ieee80211_bpf_params *params)
1065 {
1066 struct ieee80211com *ic = ni->ni_ic;
1067 struct ifnet *ifp = ic->ic_ifp;
1068 struct ath_softc *sc = ifp->if_softc;
1069 struct ath_buf *bf;
1070 int error;
1071
1072 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || sc->sc_invalid) {
1073 DPRINTF(sc, ATH_DEBUG_XMIT, "%s: discard frame, %s", __func__,
1074 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 ?
1075 "!running" : "invalid");
1076 m_freem(m);
1077 error = ENETDOWN;
1078 goto bad;
1079 }
1080 /*
1081 * Grab a TX buffer and associated resources.
1082 */
1083 bf = ath_getbuf(sc);
1084 if (bf == NULL) {
1085 sc->sc_stats.ast_tx_nobuf++;
1086 m_freem(m);
1087 error = ENOBUFS;
1088 goto bad;
1089 }
1090
1091 if (params == NULL) {
1092 /*
1093 * Legacy path; interpret frame contents to decide
1094 * precisely how to send the frame.
1095 */
1096 if (ath_tx_start(sc, ni, bf, m)) {
1097 error = EIO; /* XXX */
1098 goto bad2;
1099 }
1100 } else {
1101 /*
1102 * Caller supplied explicit parameters to use in
1103 * sending the frame.
1104 */
1105 if (ath_tx_raw_start(sc, ni, bf, m, params)) {
1106 error = EIO; /* XXX */
1107 goto bad2;
1108 }
1109 }
1110 sc->sc_wd_timer = 5;
1111 ifp->if_opackets++;
1112 sc->sc_stats.ast_tx_raw++;
1113
1114 return 0;
1115 bad2:
1116 ATH_TXBUF_LOCK(sc);
1117 STAILQ_INSERT_HEAD(&sc->sc_txbuf, bf, bf_list);
1118 ATH_TXBUF_UNLOCK(sc);
1119 bad:
1120 ifp->if_oerrors++;
1121 sc->sc_stats.ast_tx_raw_fail++;
1122 ieee80211_free_node(ni);
1123 return error;
1124 }
Cache object: 62eb3f43179946c069f092ab63a88e0e
|