The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/ath/if_ath_rx.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 2002-2009 Sam Leffler, Errno Consulting
    3  * All rights reserved.
    4  *
    5  * Redistribution and use in source and binary forms, with or without
    6  * modification, are permitted provided that the following conditions
    7  * are met:
    8  * 1. Redistributions of source code must retain the above copyright
    9  *    notice, this list of conditions and the following disclaimer,
   10  *    without modification.
   11  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
   12  *    similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
   13  *    redistribution must be conditioned upon including a substantially
   14  *    similar Disclaimer requirement for further binary redistribution.
   15  *
   16  * NO WARRANTY
   17  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
   18  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
   19  * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
   20  * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
   21  * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
   22  * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
   23  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
   24  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
   25  * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
   26  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
   27  * THE POSSIBILITY OF SUCH DAMAGES.
   28  */
   29 
   30 #include <sys/cdefs.h>
   31 __FBSDID("$FreeBSD: releng/11.2/sys/dev/ath/if_ath_rx.c 331722 2018-03-29 02:50:57Z eadler $");
   32 
   33 /*
   34  * Driver for the Atheros Wireless LAN controller.
   35  *
   36  * This software is derived from work of Atsushi Onoe; his contribution
   37  * is greatly appreciated.
   38  */
   39 
   40 #include "opt_inet.h"
   41 #include "opt_ath.h"
   42 /*
   43  * This is needed for register operations which are performed
   44  * by the driver - eg, calls to ath_hal_gettsf32().
   45  *
   46  * It's also required for any AH_DEBUG checks in here, eg the
   47  * module dependencies.
   48  */
   49 #include "opt_ah.h"
   50 #include "opt_wlan.h"
   51 
   52 #include <sys/param.h>
   53 #include <sys/systm.h>
   54 #include <sys/sysctl.h>
   55 #include <sys/mbuf.h>
   56 #include <sys/malloc.h>
   57 #include <sys/lock.h>
   58 #include <sys/mutex.h>
   59 #include <sys/kernel.h>
   60 #include <sys/socket.h>
   61 #include <sys/sockio.h>
   62 #include <sys/errno.h>
   63 #include <sys/callout.h>
   64 #include <sys/bus.h>
   65 #include <sys/endian.h>
   66 #include <sys/kthread.h>
   67 #include <sys/taskqueue.h>
   68 #include <sys/priv.h>
   69 #include <sys/module.h>
   70 #include <sys/ktr.h>
   71 #include <sys/smp.h>    /* for mp_ncpus */
   72 
   73 #include <machine/bus.h>
   74 
   75 #include <net/if.h>
   76 #include <net/if_var.h>
   77 #include <net/if_dl.h>
   78 #include <net/if_media.h>
   79 #include <net/if_types.h>
   80 #include <net/if_arp.h>
   81 #include <net/ethernet.h>
   82 #include <net/if_llc.h>
   83 
   84 #include <net80211/ieee80211_var.h>
   85 #include <net80211/ieee80211_regdomain.h>
   86 #ifdef IEEE80211_SUPPORT_SUPERG
   87 #include <net80211/ieee80211_superg.h>
   88 #endif
   89 #ifdef IEEE80211_SUPPORT_TDMA
   90 #include <net80211/ieee80211_tdma.h>
   91 #endif
   92 
   93 #include <net/bpf.h>
   94 
   95 #ifdef INET
   96 #include <netinet/in.h>
   97 #include <netinet/if_ether.h>
   98 #endif
   99 
  100 #include <dev/ath/if_athvar.h>
  101 #include <dev/ath/ath_hal/ah_devid.h>           /* XXX for softled */
  102 #include <dev/ath/ath_hal/ah_diagcodes.h>
  103 
  104 #include <dev/ath/if_ath_debug.h>
  105 #include <dev/ath/if_ath_misc.h>
  106 #include <dev/ath/if_ath_tsf.h>
  107 #include <dev/ath/if_ath_tx.h>
  108 #include <dev/ath/if_ath_sysctl.h>
  109 #include <dev/ath/if_ath_led.h>
  110 #include <dev/ath/if_ath_keycache.h>
  111 #include <dev/ath/if_ath_rx.h>
  112 #include <dev/ath/if_ath_beacon.h>
  113 #include <dev/ath/if_athdfs.h>
  114 #include <dev/ath/if_ath_descdma.h>
  115 
  116 #ifdef ATH_TX99_DIAG
  117 #include <dev/ath/ath_tx99/ath_tx99.h>
  118 #endif
  119 
  120 #ifdef  ATH_DEBUG_ALQ
  121 #include <dev/ath/if_ath_alq.h>
  122 #endif
  123 
  124 #include <dev/ath/if_ath_lna_div.h>
  125 
  126 /*
  127  * Calculate the receive filter according to the
  128  * operating mode and state:
  129  *
  130  * o always accept unicast, broadcast, and multicast traffic
  131  * o accept PHY error frames when hardware doesn't have MIB support
  132  *   to count and we need them for ANI (sta mode only until recently)
  133  *   and we are not scanning (ANI is disabled)
  134  *   NB: older hal's add rx filter bits out of sight and we need to
  135  *       blindly preserve them
  136  * o probe request frames are accepted only when operating in
  137  *   hostap, adhoc, mesh, or monitor modes
  138  * o enable promiscuous mode
  139  *   - when in monitor mode
  140  *   - if interface marked PROMISC (assumes bridge setting is filtered)
  141  * o accept beacons:
  142  *   - when operating in station mode for collecting rssi data when
  143  *     the station is otherwise quiet, or
  144  *   - when operating in adhoc mode so the 802.11 layer creates
  145  *     node table entries for peers,
  146  *   - when scanning
  147  *   - when doing s/w beacon miss (e.g. for ap+sta)
  148  *   - when operating in ap mode in 11g to detect overlapping bss that
  149  *     require protection
  150  *   - when operating in mesh mode to detect neighbors
  151  * o accept control frames:
  152  *   - when in monitor mode
  153  * XXX HT protection for 11n
  154  */
  155 u_int32_t
  156 ath_calcrxfilter(struct ath_softc *sc)
  157 {
  158         struct ieee80211com *ic = &sc->sc_ic;
  159         u_int32_t rfilt;
  160 
  161         rfilt = HAL_RX_FILTER_UCAST | HAL_RX_FILTER_BCAST | HAL_RX_FILTER_MCAST;
  162         if (!sc->sc_needmib && !sc->sc_scanning)
  163                 rfilt |= HAL_RX_FILTER_PHYERR;
  164         if (ic->ic_opmode != IEEE80211_M_STA)
  165                 rfilt |= HAL_RX_FILTER_PROBEREQ;
  166         /* XXX ic->ic_monvaps != 0? */
  167         if (ic->ic_opmode == IEEE80211_M_MONITOR || ic->ic_promisc > 0)
  168                 rfilt |= HAL_RX_FILTER_PROM;
  169 
  170         /*
  171          * Only listen to all beacons if we're scanning.
  172          *
  173          * Otherwise we only really need to hear beacons from
  174          * our own BSSID.
  175          *
  176          * IBSS? software beacon miss? Just receive all beacons.
  177          * We need to hear beacons/probe requests from everyone so
  178          * we can merge ibss.
  179          */
  180         if (ic->ic_opmode == IEEE80211_M_IBSS || sc->sc_swbmiss) {
  181                 rfilt |= HAL_RX_FILTER_BEACON;
  182         } else if (ic->ic_opmode == IEEE80211_M_STA) {
  183                 if (sc->sc_do_mybeacon && ! sc->sc_scanning) {
  184                         rfilt |= HAL_RX_FILTER_MYBEACON;
  185                 } else { /* scanning, non-mybeacon chips */
  186                         rfilt |= HAL_RX_FILTER_BEACON;
  187                 }
  188         }
  189 
  190         /*
  191          * NB: We don't recalculate the rx filter when
  192          * ic_protmode changes; otherwise we could do
  193          * this only when ic_protmode != NONE.
  194          */
  195         if (ic->ic_opmode == IEEE80211_M_HOSTAP &&
  196             IEEE80211_IS_CHAN_ANYG(ic->ic_curchan))
  197                 rfilt |= HAL_RX_FILTER_BEACON;
  198 
  199         /*
  200          * Enable hardware PS-POLL RX only for hostap mode;
  201          * STA mode sends PS-POLL frames but never
  202          * receives them.
  203          */
  204         if (ath_hal_getcapability(sc->sc_ah, HAL_CAP_PSPOLL,
  205             0, NULL) == HAL_OK &&
  206             ic->ic_opmode == IEEE80211_M_HOSTAP)
  207                 rfilt |= HAL_RX_FILTER_PSPOLL;
  208 
  209         if (sc->sc_nmeshvaps) {
  210                 rfilt |= HAL_RX_FILTER_BEACON;
  211                 if (sc->sc_hasbmatch)
  212                         rfilt |= HAL_RX_FILTER_BSSID;
  213                 else
  214                         rfilt |= HAL_RX_FILTER_PROM;
  215         }
  216         if (ic->ic_opmode == IEEE80211_M_MONITOR)
  217                 rfilt |= HAL_RX_FILTER_CONTROL;
  218 
  219         /*
  220          * Enable RX of compressed BAR frames only when doing
  221          * 802.11n. Required for A-MPDU.
  222          */
  223         if (IEEE80211_IS_CHAN_HT(ic->ic_curchan))
  224                 rfilt |= HAL_RX_FILTER_COMPBAR;
  225 
  226         /*
  227          * Enable radar PHY errors if requested by the
  228          * DFS module.
  229          */
  230         if (sc->sc_dodfs)
  231                 rfilt |= HAL_RX_FILTER_PHYRADAR;
  232 
  233         /*
  234          * Enable spectral PHY errors if requested by the
  235          * spectral module.
  236          */
  237         if (sc->sc_dospectral)
  238                 rfilt |= HAL_RX_FILTER_PHYRADAR;
  239 
  240         DPRINTF(sc, ATH_DEBUG_MODE, "%s: RX filter 0x%x, %s\n",
  241             __func__, rfilt, ieee80211_opmode_name[ic->ic_opmode]);
  242         return rfilt;
  243 }
  244 
  245 static int
  246 ath_legacy_rxbuf_init(struct ath_softc *sc, struct ath_buf *bf)
  247 {
  248         struct ath_hal *ah = sc->sc_ah;
  249         int error;
  250         struct mbuf *m;
  251         struct ath_desc *ds;
  252 
  253         /* XXX TODO: ATH_RX_LOCK_ASSERT(sc); */
  254 
  255         m = bf->bf_m;
  256         if (m == NULL) {
  257                 /*
  258                  * NB: by assigning a page to the rx dma buffer we
  259                  * implicitly satisfy the Atheros requirement that
  260                  * this buffer be cache-line-aligned and sized to be
  261                  * multiple of the cache line size.  Not doing this
  262                  * causes weird stuff to happen (for the 5210 at least).
  263                  */
  264                 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
  265                 if (m == NULL) {
  266                         DPRINTF(sc, ATH_DEBUG_ANY,
  267                                 "%s: no mbuf/cluster\n", __func__);
  268                         sc->sc_stats.ast_rx_nombuf++;
  269                         return ENOMEM;
  270                 }
  271                 m->m_pkthdr.len = m->m_len = m->m_ext.ext_size;
  272 
  273                 error = bus_dmamap_load_mbuf_sg(sc->sc_dmat,
  274                                              bf->bf_dmamap, m,
  275                                              bf->bf_segs, &bf->bf_nseg,
  276                                              BUS_DMA_NOWAIT);
  277                 if (error != 0) {
  278                         DPRINTF(sc, ATH_DEBUG_ANY,
  279                             "%s: bus_dmamap_load_mbuf_sg failed; error %d\n",
  280                             __func__, error);
  281                         sc->sc_stats.ast_rx_busdma++;
  282                         m_freem(m);
  283                         return error;
  284                 }
  285                 KASSERT(bf->bf_nseg == 1,
  286                         ("multi-segment packet; nseg %u", bf->bf_nseg));
  287                 bf->bf_m = m;
  288         }
  289         bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREREAD);
  290 
  291         /*
  292          * Setup descriptors.  For receive we always terminate
  293          * the descriptor list with a self-linked entry so we'll
  294          * not get overrun under high load (as can happen with a
  295          * 5212 when ANI processing enables PHY error frames).
  296          *
  297          * To insure the last descriptor is self-linked we create
  298          * each descriptor as self-linked and add it to the end.  As
  299          * each additional descriptor is added the previous self-linked
  300          * entry is ``fixed'' naturally.  This should be safe even
  301          * if DMA is happening.  When processing RX interrupts we
  302          * never remove/process the last, self-linked, entry on the
  303          * descriptor list.  This insures the hardware always has
  304          * someplace to write a new frame.
  305          */
  306         /*
  307          * 11N: we can no longer afford to self link the last descriptor.
  308          * MAC acknowledges BA status as long as it copies frames to host
  309          * buffer (or rx fifo). This can incorrectly acknowledge packets
  310          * to a sender if last desc is self-linked.
  311          */
  312         ds = bf->bf_desc;
  313         if (sc->sc_rxslink)
  314                 ds->ds_link = bf->bf_daddr;     /* link to self */
  315         else
  316                 ds->ds_link = 0;                /* terminate the list */
  317         ds->ds_data = bf->bf_segs[0].ds_addr;
  318         ath_hal_setuprxdesc(ah, ds
  319                 , m->m_len              /* buffer size */
  320                 , 0
  321         );
  322 
  323         if (sc->sc_rxlink != NULL)
  324                 *sc->sc_rxlink = bf->bf_daddr;
  325         sc->sc_rxlink = &ds->ds_link;
  326         return 0;
  327 }
  328 
  329 /*
  330  * Intercept management frames to collect beacon rssi data
  331  * and to do ibss merges.
  332  */
  333 void
  334 ath_recv_mgmt(struct ieee80211_node *ni, struct mbuf *m,
  335         int subtype, const struct ieee80211_rx_stats *rxs, int rssi, int nf)
  336 {
  337         struct ieee80211vap *vap = ni->ni_vap;
  338         struct ath_softc *sc = vap->iv_ic->ic_softc;
  339         uint64_t tsf_beacon_old, tsf_beacon;
  340         uint64_t nexttbtt;
  341         int64_t tsf_delta;
  342         int32_t tsf_delta_bmiss;
  343         int32_t tsf_remainder;
  344         uint64_t tsf_beacon_target;
  345         int tsf_intval;
  346 
  347         tsf_beacon_old = ((uint64_t) le32dec(ni->ni_tstamp.data + 4)) << 32;
  348         tsf_beacon_old |= le32dec(ni->ni_tstamp.data);
  349 
  350 #define TU_TO_TSF(_tu)  (((u_int64_t)(_tu)) << 10)
  351         tsf_intval = 1;
  352         if (ni->ni_intval > 0) {
  353                 tsf_intval = TU_TO_TSF(ni->ni_intval);
  354         }
  355 #undef  TU_TO_TSF
  356 
  357         /*
  358          * Call up first so subsequent work can use information
  359          * potentially stored in the node (e.g. for ibss merge).
  360          */
  361         ATH_VAP(vap)->av_recv_mgmt(ni, m, subtype, rxs, rssi, nf);
  362         switch (subtype) {
  363         case IEEE80211_FC0_SUBTYPE_BEACON:
  364 
  365                 /*
  366                  * Only do the following processing if it's for
  367                  * the current BSS.
  368                  *
  369                  * In scan and IBSS mode we receive all beacons,
  370                  * which means we need to filter out stuff
  371                  * that isn't for us or we'll end up constantly
  372                  * trying to sync / merge to BSSes that aren't
  373                  * actually us.
  374                  */
  375                 if (IEEE80211_ADDR_EQ(ni->ni_bssid, vap->iv_bss->ni_bssid)) {
  376                         /* update rssi statistics for use by the hal */
  377                         /* XXX unlocked check against vap->iv_bss? */
  378                         ATH_RSSI_LPF(sc->sc_halstats.ns_avgbrssi, rssi);
  379 
  380 
  381                         tsf_beacon = ((uint64_t) le32dec(ni->ni_tstamp.data + 4)) << 32;
  382                         tsf_beacon |= le32dec(ni->ni_tstamp.data);
  383 
  384                         nexttbtt = ath_hal_getnexttbtt(sc->sc_ah);
  385 
  386                         /*
  387                          * Let's calculate the delta and remainder, so we can see
  388                          * if the beacon timer from the AP is varying by more than
  389                          * a few TU.  (Which would be a huge, huge problem.)
  390                          */
  391                         tsf_delta = (long long) tsf_beacon - (long long) tsf_beacon_old;
  392 
  393                         tsf_delta_bmiss = tsf_delta / tsf_intval;
  394 
  395                         /*
  396                          * If our delta is greater than half the beacon interval,
  397                          * let's round the bmiss value up to the next beacon
  398                          * interval.  Ie, we're running really, really early
  399                          * on the next beacon.
  400                          */
  401                         if (tsf_delta % tsf_intval > (tsf_intval / 2))
  402                                 tsf_delta_bmiss ++;
  403 
  404                         tsf_beacon_target = tsf_beacon_old +
  405                             (((unsigned long long) tsf_delta_bmiss) * (long long) tsf_intval);
  406 
  407                         /*
  408                          * The remainder using '%' is between 0 .. intval-1.
  409                          * If we're actually running too fast, then the remainder
  410                          * will be some large number just under intval-1.
  411                          * So we need to look at whether we're running
  412                          * before or after the target beacon interval
  413                          * and if we are, modify how we do the remainder
  414                          * calculation.
  415                          */
  416                         if (tsf_beacon < tsf_beacon_target) {
  417                                 tsf_remainder =
  418                                     -(tsf_intval - ((tsf_beacon - tsf_beacon_old) % tsf_intval));
  419                         } else {
  420                                 tsf_remainder = (tsf_beacon - tsf_beacon_old) % tsf_intval;
  421                         }
  422 
  423                         DPRINTF(sc, ATH_DEBUG_BEACON, "%s: old_tsf=%llu, new_tsf=%llu, target_tsf=%llu, delta=%lld, bmiss=%d, remainder=%d\n",
  424                             __func__,
  425                             (unsigned long long) tsf_beacon_old,
  426                             (unsigned long long) tsf_beacon,
  427                             (unsigned long long) tsf_beacon_target,
  428                             (long long) tsf_delta,
  429                             tsf_delta_bmiss,
  430                             tsf_remainder);
  431 
  432                         DPRINTF(sc, ATH_DEBUG_BEACON, "%s: tsf=%llu, nexttbtt=%llu, delta=%d\n",
  433                             __func__,
  434                             (unsigned long long) tsf_beacon,
  435                             (unsigned long long) nexttbtt,
  436                             (int32_t) tsf_beacon - (int32_t) nexttbtt + tsf_intval);
  437 
  438                         /* We only do syncbeacon on STA VAPs; not on IBSS */
  439                         if (vap->iv_opmode == IEEE80211_M_STA &&
  440                             sc->sc_syncbeacon &&
  441                             ni == vap->iv_bss &&
  442                             (vap->iv_state == IEEE80211_S_RUN || vap->iv_state == IEEE80211_S_SLEEP)) {
  443                                 DPRINTF(sc, ATH_DEBUG_BEACON,
  444                                     "%s: syncbeacon=1; syncing\n",
  445                                     __func__);
  446                                 /*
  447                                  * Resync beacon timers using the tsf of the beacon
  448                                  * frame we just received.
  449                                  */
  450                                 ath_beacon_config(sc, vap);
  451                                 sc->sc_syncbeacon = 0;
  452                         }
  453                 }
  454 
  455                 /* fall thru... */
  456         case IEEE80211_FC0_SUBTYPE_PROBE_RESP:
  457                 if (vap->iv_opmode == IEEE80211_M_IBSS &&
  458                     vap->iv_state == IEEE80211_S_RUN &&
  459                     ieee80211_ibss_merge_check(ni)) {
  460                         uint32_t rstamp = sc->sc_lastrs->rs_tstamp;
  461                         uint64_t tsf = ath_extend_tsf(sc, rstamp,
  462                                 ath_hal_gettsf64(sc->sc_ah));
  463                         /*
  464                          * Handle ibss merge as needed; check the tsf on the
  465                          * frame before attempting the merge.  The 802.11 spec
  466                          * says the station should change it's bssid to match
  467                          * the oldest station with the same ssid, where oldest
  468                          * is determined by the tsf.  Note that hardware
  469                          * reconfiguration happens through callback to
  470                          * ath_newstate as the state machine will go from
  471                          * RUN -> RUN when this happens.
  472                          */
  473                         if (le64toh(ni->ni_tstamp.tsf) >= tsf) {
  474                                 DPRINTF(sc, ATH_DEBUG_STATE,
  475                                     "ibss merge, rstamp %u tsf %ju "
  476                                     "tstamp %ju\n", rstamp, (uintmax_t)tsf,
  477                                     (uintmax_t)ni->ni_tstamp.tsf);
  478                                 (void) ieee80211_ibss_merge(ni);
  479                         }
  480                 }
  481                 break;
  482         }
  483 }
  484 
  485 #ifdef  ATH_ENABLE_RADIOTAP_VENDOR_EXT
  486 static void
  487 ath_rx_tap_vendor(struct ath_softc *sc, struct mbuf *m,
  488     const struct ath_rx_status *rs, u_int64_t tsf, int16_t nf)
  489 {
  490 
  491         /* Fill in the extension bitmap */
  492         sc->sc_rx_th.wr_ext_bitmap = htole32(1 << ATH_RADIOTAP_VENDOR_HEADER);
  493 
  494         /* Fill in the vendor header */
  495         sc->sc_rx_th.wr_vh.vh_oui[0] = 0x7f;
  496         sc->sc_rx_th.wr_vh.vh_oui[1] = 0x03;
  497         sc->sc_rx_th.wr_vh.vh_oui[2] = 0x00;
  498 
  499         /* XXX what should this be? */
  500         sc->sc_rx_th.wr_vh.vh_sub_ns = 0;
  501         sc->sc_rx_th.wr_vh.vh_skip_len =
  502             htole16(sizeof(struct ath_radiotap_vendor_hdr));
  503 
  504         /* General version info */
  505         sc->sc_rx_th.wr_v.vh_version = 1;
  506 
  507         sc->sc_rx_th.wr_v.vh_rx_chainmask = sc->sc_rxchainmask;
  508 
  509         /* rssi */
  510         sc->sc_rx_th.wr_v.rssi_ctl[0] = rs->rs_rssi_ctl[0];
  511         sc->sc_rx_th.wr_v.rssi_ctl[1] = rs->rs_rssi_ctl[1];
  512         sc->sc_rx_th.wr_v.rssi_ctl[2] = rs->rs_rssi_ctl[2];
  513         sc->sc_rx_th.wr_v.rssi_ext[0] = rs->rs_rssi_ext[0];
  514         sc->sc_rx_th.wr_v.rssi_ext[1] = rs->rs_rssi_ext[1];
  515         sc->sc_rx_th.wr_v.rssi_ext[2] = rs->rs_rssi_ext[2];
  516 
  517         /* evm */
  518         sc->sc_rx_th.wr_v.evm[0] = rs->rs_evm0;
  519         sc->sc_rx_th.wr_v.evm[1] = rs->rs_evm1;
  520         sc->sc_rx_th.wr_v.evm[2] = rs->rs_evm2;
  521         /* These are only populated from the AR9300 or later */
  522         sc->sc_rx_th.wr_v.evm[3] = rs->rs_evm3;
  523         sc->sc_rx_th.wr_v.evm[4] = rs->rs_evm4;
  524 
  525         /* direction */
  526         sc->sc_rx_th.wr_v.vh_flags = ATH_VENDOR_PKT_RX;
  527 
  528         /* RX rate */
  529         sc->sc_rx_th.wr_v.vh_rx_hwrate = rs->rs_rate;
  530 
  531         /* RX flags */
  532         sc->sc_rx_th.wr_v.vh_rs_flags = rs->rs_flags;
  533 
  534         if (rs->rs_isaggr)
  535                 sc->sc_rx_th.wr_v.vh_flags |= ATH_VENDOR_PKT_ISAGGR;
  536         if (rs->rs_moreaggr)
  537                 sc->sc_rx_th.wr_v.vh_flags |= ATH_VENDOR_PKT_MOREAGGR;
  538 
  539         /* phyerr info */
  540         if (rs->rs_status & HAL_RXERR_PHY) {
  541                 sc->sc_rx_th.wr_v.vh_phyerr_code = rs->rs_phyerr;
  542                 sc->sc_rx_th.wr_v.vh_flags |= ATH_VENDOR_PKT_RXPHYERR;
  543         } else {
  544                 sc->sc_rx_th.wr_v.vh_phyerr_code = 0xff;
  545         }
  546         sc->sc_rx_th.wr_v.vh_rs_status = rs->rs_status;
  547         sc->sc_rx_th.wr_v.vh_rssi = rs->rs_rssi;
  548 }
  549 #endif  /* ATH_ENABLE_RADIOTAP_VENDOR_EXT */
  550 
  551 static void
  552 ath_rx_tap(struct ath_softc *sc, struct mbuf *m,
  553         const struct ath_rx_status *rs, u_int64_t tsf, int16_t nf)
  554 {
  555 #define CHAN_HT20       htole32(IEEE80211_CHAN_HT20)
  556 #define CHAN_HT40U      htole32(IEEE80211_CHAN_HT40U)
  557 #define CHAN_HT40D      htole32(IEEE80211_CHAN_HT40D)
  558 #define CHAN_HT         (CHAN_HT20|CHAN_HT40U|CHAN_HT40D)
  559         const HAL_RATE_TABLE *rt;
  560         uint8_t rix;
  561 
  562         rt = sc->sc_currates;
  563         KASSERT(rt != NULL, ("no rate table, mode %u", sc->sc_curmode));
  564         rix = rt->rateCodeToIndex[rs->rs_rate];
  565         sc->sc_rx_th.wr_rate = sc->sc_hwmap[rix].ieeerate;
  566         sc->sc_rx_th.wr_flags = sc->sc_hwmap[rix].rxflags;
  567 #ifdef AH_SUPPORT_AR5416
  568         sc->sc_rx_th.wr_chan_flags &= ~CHAN_HT;
  569         if (rs->rs_status & HAL_RXERR_PHY) {
  570                 /*
  571                  * PHY error - make sure the channel flags
  572                  * reflect the actual channel configuration,
  573                  * not the received frame.
  574                  */
  575                 if (IEEE80211_IS_CHAN_HT40U(sc->sc_curchan))
  576                         sc->sc_rx_th.wr_chan_flags |= CHAN_HT40U;
  577                 else if (IEEE80211_IS_CHAN_HT40D(sc->sc_curchan))
  578                         sc->sc_rx_th.wr_chan_flags |= CHAN_HT40D;
  579                 else if (IEEE80211_IS_CHAN_HT20(sc->sc_curchan))
  580                         sc->sc_rx_th.wr_chan_flags |= CHAN_HT20;
  581         } else if (sc->sc_rx_th.wr_rate & IEEE80211_RATE_MCS) { /* HT rate */
  582                 struct ieee80211com *ic = &sc->sc_ic;
  583 
  584                 if ((rs->rs_flags & HAL_RX_2040) == 0)
  585                         sc->sc_rx_th.wr_chan_flags |= CHAN_HT20;
  586                 else if (IEEE80211_IS_CHAN_HT40U(ic->ic_curchan))
  587                         sc->sc_rx_th.wr_chan_flags |= CHAN_HT40U;
  588                 else
  589                         sc->sc_rx_th.wr_chan_flags |= CHAN_HT40D;
  590                 if ((rs->rs_flags & HAL_RX_GI) == 0)
  591                         sc->sc_rx_th.wr_flags |= IEEE80211_RADIOTAP_F_SHORTGI;
  592         }
  593 
  594 #endif
  595         sc->sc_rx_th.wr_tsf = htole64(ath_extend_tsf(sc, rs->rs_tstamp, tsf));
  596         if (rs->rs_status & HAL_RXERR_CRC)
  597                 sc->sc_rx_th.wr_flags |= IEEE80211_RADIOTAP_F_BADFCS;
  598         /* XXX propagate other error flags from descriptor */
  599         sc->sc_rx_th.wr_antnoise = nf;
  600         sc->sc_rx_th.wr_antsignal = nf + rs->rs_rssi;
  601         sc->sc_rx_th.wr_antenna = rs->rs_antenna;
  602 #undef CHAN_HT
  603 #undef CHAN_HT20
  604 #undef CHAN_HT40U
  605 #undef CHAN_HT40D
  606 }
  607 
  608 static void
  609 ath_handle_micerror(struct ieee80211com *ic,
  610         struct ieee80211_frame *wh, int keyix)
  611 {
  612         struct ieee80211_node *ni;
  613 
  614         /* XXX recheck MIC to deal w/ chips that lie */
  615         /* XXX discard MIC errors on !data frames */
  616         ni = ieee80211_find_rxnode(ic, (const struct ieee80211_frame_min *) wh);
  617         if (ni != NULL) {
  618                 ieee80211_notify_michael_failure(ni->ni_vap, wh, keyix);
  619                 ieee80211_free_node(ni);
  620         }
  621 }
  622 
  623 /*
  624  * Process a single packet.
  625  *
  626  * The mbuf must already be synced, unmapped and removed from bf->bf_m
  627  * by this stage.
  628  *
  629  * The mbuf must be consumed by this routine - either passed up the
  630  * net80211 stack, put on the holding queue, or freed.
  631  */
  632 int
  633 ath_rx_pkt(struct ath_softc *sc, struct ath_rx_status *rs, HAL_STATUS status,
  634     uint64_t tsf, int nf, HAL_RX_QUEUE qtype, struct ath_buf *bf,
  635     struct mbuf *m)
  636 {
  637         uint64_t rstamp;
  638         int len, type;
  639         struct ieee80211com *ic = &sc->sc_ic;
  640         struct ieee80211_node *ni;
  641         int is_good = 0;
  642         struct ath_rx_edma *re = &sc->sc_rxedma[qtype];
  643 
  644         /*
  645          * Calculate the correct 64 bit TSF given
  646          * the TSF64 register value and rs_tstamp.
  647          */
  648         rstamp = ath_extend_tsf(sc, rs->rs_tstamp, tsf);
  649 
  650         /* These aren't specifically errors */
  651 #ifdef  AH_SUPPORT_AR5416
  652         if (rs->rs_flags & HAL_RX_GI)
  653                 sc->sc_stats.ast_rx_halfgi++;
  654         if (rs->rs_flags & HAL_RX_2040)
  655                 sc->sc_stats.ast_rx_2040++;
  656         if (rs->rs_flags & HAL_RX_DELIM_CRC_PRE)
  657                 sc->sc_stats.ast_rx_pre_crc_err++;
  658         if (rs->rs_flags & HAL_RX_DELIM_CRC_POST)
  659                 sc->sc_stats.ast_rx_post_crc_err++;
  660         if (rs->rs_flags & HAL_RX_DECRYPT_BUSY)
  661                 sc->sc_stats.ast_rx_decrypt_busy_err++;
  662         if (rs->rs_flags & HAL_RX_HI_RX_CHAIN)
  663                 sc->sc_stats.ast_rx_hi_rx_chain++;
  664         if (rs->rs_flags & HAL_RX_STBC)
  665                 sc->sc_stats.ast_rx_stbc++;
  666 #endif /* AH_SUPPORT_AR5416 */
  667 
  668         if (rs->rs_status != 0) {
  669                 if (rs->rs_status & HAL_RXERR_CRC)
  670                         sc->sc_stats.ast_rx_crcerr++;
  671                 if (rs->rs_status & HAL_RXERR_FIFO)
  672                         sc->sc_stats.ast_rx_fifoerr++;
  673                 if (rs->rs_status & HAL_RXERR_PHY) {
  674                         sc->sc_stats.ast_rx_phyerr++;
  675                         /* Process DFS radar events */
  676                         if ((rs->rs_phyerr == HAL_PHYERR_RADAR) ||
  677                             (rs->rs_phyerr == HAL_PHYERR_FALSE_RADAR_EXT)) {
  678                                 /* Now pass it to the radar processing code */
  679                                 ath_dfs_process_phy_err(sc, m, rstamp, rs);
  680                         }
  681 
  682                         /* Be suitably paranoid about receiving phy errors out of the stats array bounds */
  683                         if (rs->rs_phyerr < 64)
  684                                 sc->sc_stats.ast_rx_phy[rs->rs_phyerr]++;
  685                         goto rx_error;  /* NB: don't count in ierrors */
  686                 }
  687                 if (rs->rs_status & HAL_RXERR_DECRYPT) {
  688                         /*
  689                          * Decrypt error.  If the error occurred
  690                          * because there was no hardware key, then
  691                          * let the frame through so the upper layers
  692                          * can process it.  This is necessary for 5210
  693                          * parts which have no way to setup a ``clear''
  694                          * key cache entry.
  695                          *
  696                          * XXX do key cache faulting
  697                          */
  698                         if (rs->rs_keyix == HAL_RXKEYIX_INVALID)
  699                                 goto rx_accept;
  700                         sc->sc_stats.ast_rx_badcrypt++;
  701                 }
  702                 /*
  703                  * Similar as above - if the failure was a keymiss
  704                  * just punt it up to the upper layers for now.
  705                  */
  706                 if (rs->rs_status & HAL_RXERR_KEYMISS) {
  707                         sc->sc_stats.ast_rx_keymiss++;
  708                         goto rx_accept;
  709                 }
  710                 if (rs->rs_status & HAL_RXERR_MIC) {
  711                         sc->sc_stats.ast_rx_badmic++;
  712                         /*
  713                          * Do minimal work required to hand off
  714                          * the 802.11 header for notification.
  715                          */
  716                         /* XXX frag's and qos frames */
  717                         len = rs->rs_datalen;
  718                         if (len >= sizeof (struct ieee80211_frame)) {
  719                                 ath_handle_micerror(ic,
  720                                     mtod(m, struct ieee80211_frame *),
  721                                     sc->sc_splitmic ?
  722                                         rs->rs_keyix-32 : rs->rs_keyix);
  723                         }
  724                 }
  725                 counter_u64_add(ic->ic_ierrors, 1);
  726 rx_error:
  727                 /*
  728                  * Cleanup any pending partial frame.
  729                  */
  730                 if (re->m_rxpending != NULL) {
  731                         m_freem(re->m_rxpending);
  732                         re->m_rxpending = NULL;
  733                 }
  734                 /*
  735                  * When a tap is present pass error frames
  736                  * that have been requested.  By default we
  737                  * pass decrypt+mic errors but others may be
  738                  * interesting (e.g. crc).
  739                  */
  740                 if (ieee80211_radiotap_active(ic) &&
  741                     (rs->rs_status & sc->sc_monpass)) {
  742                         /* NB: bpf needs the mbuf length setup */
  743                         len = rs->rs_datalen;
  744                         m->m_pkthdr.len = m->m_len = len;
  745                         ath_rx_tap(sc, m, rs, rstamp, nf);
  746 #ifdef  ATH_ENABLE_RADIOTAP_VENDOR_EXT
  747                         ath_rx_tap_vendor(sc, m, rs, rstamp, nf);
  748 #endif  /* ATH_ENABLE_RADIOTAP_VENDOR_EXT */
  749                         ieee80211_radiotap_rx_all(ic, m);
  750                 }
  751                 /* XXX pass MIC errors up for s/w reclaculation */
  752                 m_freem(m); m = NULL;
  753                 goto rx_next;
  754         }
  755 rx_accept:
  756         len = rs->rs_datalen;
  757         m->m_len = len;
  758 
  759         if (rs->rs_more) {
  760                 /*
  761                  * Frame spans multiple descriptors; save
  762                  * it for the next completed descriptor, it
  763                  * will be used to construct a jumbogram.
  764                  */
  765                 if (re->m_rxpending != NULL) {
  766                         /* NB: max frame size is currently 2 clusters */
  767                         sc->sc_stats.ast_rx_toobig++;
  768                         m_freem(re->m_rxpending);
  769                 }
  770                 m->m_pkthdr.len = len;
  771                 re->m_rxpending = m;
  772                 m = NULL;
  773                 goto rx_next;
  774         } else if (re->m_rxpending != NULL) {
  775                 /*
  776                  * This is the second part of a jumbogram,
  777                  * chain it to the first mbuf, adjust the
  778                  * frame length, and clear the rxpending state.
  779                  */
  780                 re->m_rxpending->m_next = m;
  781                 re->m_rxpending->m_pkthdr.len += len;
  782                 m = re->m_rxpending;
  783                 re->m_rxpending = NULL;
  784         } else {
  785                 /*
  786                  * Normal single-descriptor receive; setup packet length.
  787                  */
  788                 m->m_pkthdr.len = len;
  789         }
  790 
  791         /*
  792          * Validate rs->rs_antenna.
  793          *
  794          * Some users w/ AR9285 NICs have reported crashes
  795          * here because rs_antenna field is bogusly large.
  796          * Let's enforce the maximum antenna limit of 8
  797          * (and it shouldn't be hard coded, but that's a
  798          * separate problem) and if there's an issue, print
  799          * out an error and adjust rs_antenna to something
  800          * sensible.
  801          *
  802          * This code should be removed once the actual
  803          * root cause of the issue has been identified.
  804          * For example, it may be that the rs_antenna
  805          * field is only valid for the last frame of
  806          * an aggregate and it just happens that it is
  807          * "mostly" right. (This is a general statement -
  808          * the majority of the statistics are only valid
  809          * for the last frame in an aggregate.
  810          */
  811         if (rs->rs_antenna > 7) {
  812                 device_printf(sc->sc_dev, "%s: rs_antenna > 7 (%d)\n",
  813                     __func__, rs->rs_antenna);
  814 #ifdef  ATH_DEBUG
  815                 ath_printrxbuf(sc, bf, 0, status == HAL_OK);
  816 #endif /* ATH_DEBUG */
  817                 rs->rs_antenna = 0;     /* XXX better than nothing */
  818         }
  819 
  820         /*
  821          * If this is an AR9285/AR9485, then the receive and LNA
  822          * configuration is stored in RSSI[2] / EXTRSSI[2].
  823          * We can extract this out to build a much better
  824          * receive antenna profile.
  825          *
  826          * Yes, this just blurts over the above RX antenna field
  827          * for now.  It's fine, the AR9285 doesn't really use
  828          * that.
  829          *
  830          * Later on we should store away the fine grained LNA
  831          * information and keep separate counters just for
  832          * that.  It'll help when debugging the AR9285/AR9485
  833          * combined diversity code.
  834          */
  835         if (sc->sc_rx_lnamixer) {
  836                 rs->rs_antenna = 0;
  837 
  838                 /* Bits 0:1 - the LNA configuration used */
  839                 rs->rs_antenna |=
  840                     ((rs->rs_rssi_ctl[2] & HAL_RX_LNA_CFG_USED)
  841                       >> HAL_RX_LNA_CFG_USED_S);
  842 
  843                 /* Bit 2 - the external RX antenna switch */
  844                 if (rs->rs_rssi_ctl[2] & HAL_RX_LNA_EXTCFG)
  845                         rs->rs_antenna |= 0x4;
  846         }
  847 
  848         sc->sc_stats.ast_ant_rx[rs->rs_antenna]++;
  849 
  850         /*
  851          * Populate the rx status block.  When there are bpf
  852          * listeners we do the additional work to provide
  853          * complete status.  Otherwise we fill in only the
  854          * material required by ieee80211_input.  Note that
  855          * noise setting is filled in above.
  856          */
  857         if (ieee80211_radiotap_active(ic)) {
  858                 ath_rx_tap(sc, m, rs, rstamp, nf);
  859 #ifdef  ATH_ENABLE_RADIOTAP_VENDOR_EXT
  860                 ath_rx_tap_vendor(sc, m, rs, rstamp, nf);
  861 #endif  /* ATH_ENABLE_RADIOTAP_VENDOR_EXT */
  862         }
  863 
  864         /*
  865          * From this point on we assume the frame is at least
  866          * as large as ieee80211_frame_min; verify that.
  867          */
  868         if (len < IEEE80211_MIN_LEN) {
  869                 if (!ieee80211_radiotap_active(ic)) {
  870                         DPRINTF(sc, ATH_DEBUG_RECV,
  871                             "%s: short packet %d\n", __func__, len);
  872                         sc->sc_stats.ast_rx_tooshort++;
  873                 } else {
  874                         /* NB: in particular this captures ack's */
  875                         ieee80211_radiotap_rx_all(ic, m);
  876                 }
  877                 m_freem(m); m = NULL;
  878                 goto rx_next;
  879         }
  880 
  881         if (IFF_DUMPPKTS(sc, ATH_DEBUG_RECV)) {
  882                 const HAL_RATE_TABLE *rt = sc->sc_currates;
  883                 uint8_t rix = rt->rateCodeToIndex[rs->rs_rate];
  884 
  885                 ieee80211_dump_pkt(ic, mtod(m, caddr_t), len,
  886                     sc->sc_hwmap[rix].ieeerate, rs->rs_rssi);
  887         }
  888 
  889         m_adj(m, -IEEE80211_CRC_LEN);
  890 
  891         /*
  892          * Locate the node for sender, track state, and then
  893          * pass the (referenced) node up to the 802.11 layer
  894          * for its use.
  895          */
  896         ni = ieee80211_find_rxnode_withkey(ic,
  897                 mtod(m, const struct ieee80211_frame_min *),
  898                 rs->rs_keyix == HAL_RXKEYIX_INVALID ?
  899                         IEEE80211_KEYIX_NONE : rs->rs_keyix);
  900         sc->sc_lastrs = rs;
  901 
  902 #ifdef  AH_SUPPORT_AR5416
  903         if (rs->rs_isaggr)
  904                 sc->sc_stats.ast_rx_agg++;
  905 #endif /* AH_SUPPORT_AR5416 */
  906 
  907         if (ni != NULL) {
  908                 /*
  909                  * Only punt packets for ampdu reorder processing for
  910                  * 11n nodes; net80211 enforces that M_AMPDU is only
  911                  * set for 11n nodes.
  912                  */
  913                 if (ni->ni_flags & IEEE80211_NODE_HT)
  914                         m->m_flags |= M_AMPDU;
  915 
  916                 /*
  917                  * Sending station is known, dispatch directly.
  918                  */
  919                 type = ieee80211_input(ni, m, rs->rs_rssi, nf);
  920                 ieee80211_free_node(ni);
  921                 m = NULL;
  922                 /*
  923                  * Arrange to update the last rx timestamp only for
  924                  * frames from our ap when operating in station mode.
  925                  * This assumes the rx key is always setup when
  926                  * associated.
  927                  */
  928                 if (ic->ic_opmode == IEEE80211_M_STA &&
  929                     rs->rs_keyix != HAL_RXKEYIX_INVALID)
  930                         is_good = 1;
  931         } else {
  932                 type = ieee80211_input_all(ic, m, rs->rs_rssi, nf);
  933                 m = NULL;
  934         }
  935 
  936         /*
  937          * At this point we have passed the frame up the stack; thus
  938          * the mbuf is no longer ours.
  939          */
  940 
  941         /*
  942          * Track rx rssi and do any rx antenna management.
  943          */
  944         ATH_RSSI_LPF(sc->sc_halstats.ns_avgrssi, rs->rs_rssi);
  945         if (sc->sc_diversity) {
  946                 /*
  947                  * When using fast diversity, change the default rx
  948                  * antenna if diversity chooses the other antenna 3
  949                  * times in a row.
  950                  */
  951                 if (sc->sc_defant != rs->rs_antenna) {
  952                         if (++sc->sc_rxotherant >= 3)
  953                                 ath_setdefantenna(sc, rs->rs_antenna);
  954                 } else
  955                         sc->sc_rxotherant = 0;
  956         }
  957 
  958         /* Handle slow diversity if enabled */
  959         if (sc->sc_dolnadiv) {
  960                 ath_lna_rx_comb_scan(sc, rs, ticks, hz);
  961         }
  962 
  963         if (sc->sc_softled) {
  964                 /*
  965                  * Blink for any data frame.  Otherwise do a
  966                  * heartbeat-style blink when idle.  The latter
  967                  * is mainly for station mode where we depend on
  968                  * periodic beacon frames to trigger the poll event.
  969                  */
  970                 if (type == IEEE80211_FC0_TYPE_DATA) {
  971                         const HAL_RATE_TABLE *rt = sc->sc_currates;
  972                         ath_led_event(sc,
  973                             rt->rateCodeToIndex[rs->rs_rate]);
  974                 } else if (ticks - sc->sc_ledevent >= sc->sc_ledidle)
  975                         ath_led_event(sc, 0);
  976                 }
  977 rx_next:
  978         /*
  979          * Debugging - complain if we didn't NULL the mbuf pointer
  980          * here.
  981          */
  982         if (m != NULL) {
  983                 device_printf(sc->sc_dev,
  984                     "%s: mbuf %p should've been freed!\n",
  985                     __func__,
  986                     m);
  987         }
  988         return (is_good);
  989 }
  990 
  991 #define ATH_RX_MAX              128
  992 
  993 /*
  994  * XXX TODO: break out the "get buffers" from "call ath_rx_pkt()" like
  995  * the EDMA code does.
  996  *
  997  * XXX TODO: then, do all of the RX list management stuff inside
  998  * ATH_RX_LOCK() so we don't end up potentially racing.  The EDMA
  999  * code is doing it right.
 1000  */
 1001 static void
 1002 ath_rx_proc(struct ath_softc *sc, int resched)
 1003 {
 1004 #define PA2DESC(_sc, _pa) \
 1005         ((struct ath_desc *)((caddr_t)(_sc)->sc_rxdma.dd_desc + \
 1006                 ((_pa) - (_sc)->sc_rxdma.dd_desc_paddr)))
 1007         struct ath_buf *bf;
 1008         struct ath_hal *ah = sc->sc_ah;
 1009 #ifdef IEEE80211_SUPPORT_SUPERG
 1010         struct ieee80211com *ic = &sc->sc_ic;
 1011 #endif
 1012         struct ath_desc *ds;
 1013         struct ath_rx_status *rs;
 1014         struct mbuf *m;
 1015         int ngood;
 1016         HAL_STATUS status;
 1017         int16_t nf;
 1018         u_int64_t tsf;
 1019         int npkts = 0;
 1020         int kickpcu = 0;
 1021         int ret;
 1022 
 1023         /* XXX we must not hold the ATH_LOCK here */
 1024         ATH_UNLOCK_ASSERT(sc);
 1025         ATH_PCU_UNLOCK_ASSERT(sc);
 1026 
 1027         ATH_PCU_LOCK(sc);
 1028         sc->sc_rxproc_cnt++;
 1029         kickpcu = sc->sc_kickpcu;
 1030         ATH_PCU_UNLOCK(sc);
 1031 
 1032         ATH_LOCK(sc);
 1033         ath_power_set_power_state(sc, HAL_PM_AWAKE);
 1034         ATH_UNLOCK(sc);
 1035 
 1036         DPRINTF(sc, ATH_DEBUG_RX_PROC, "%s: called\n", __func__);
 1037         ngood = 0;
 1038         nf = ath_hal_getchannoise(ah, sc->sc_curchan);
 1039         sc->sc_stats.ast_rx_noise = nf;
 1040         tsf = ath_hal_gettsf64(ah);
 1041         do {
 1042                 /*
 1043                  * Don't process too many packets at a time; give the
 1044                  * TX thread time to also run - otherwise the TX
 1045                  * latency can jump by quite a bit, causing throughput
 1046                  * degredation.
 1047                  */
 1048                 if (!kickpcu && npkts >= ATH_RX_MAX)
 1049                         break;
 1050 
 1051                 bf = TAILQ_FIRST(&sc->sc_rxbuf);
 1052                 if (sc->sc_rxslink && bf == NULL) {     /* NB: shouldn't happen */
 1053                         device_printf(sc->sc_dev, "%s: no buffer!\n", __func__);
 1054                         break;
 1055                 } else if (bf == NULL) {
 1056                         /*
 1057                          * End of List:
 1058                          * this can happen for non-self-linked RX chains
 1059                          */
 1060                         sc->sc_stats.ast_rx_hitqueueend++;
 1061                         break;
 1062                 }
 1063                 m = bf->bf_m;
 1064                 if (m == NULL) {                /* NB: shouldn't happen */
 1065                         /*
 1066                          * If mbuf allocation failed previously there
 1067                          * will be no mbuf; try again to re-populate it.
 1068                          */
 1069                         /* XXX make debug msg */
 1070                         device_printf(sc->sc_dev, "%s: no mbuf!\n", __func__);
 1071                         TAILQ_REMOVE(&sc->sc_rxbuf, bf, bf_list);
 1072                         goto rx_proc_next;
 1073                 }
 1074                 ds = bf->bf_desc;
 1075                 if (ds->ds_link == bf->bf_daddr) {
 1076                         /* NB: never process the self-linked entry at the end */
 1077                         sc->sc_stats.ast_rx_hitqueueend++;
 1078                         break;
 1079                 }
 1080                 /* XXX sync descriptor memory */
 1081                 /*
 1082                  * Must provide the virtual address of the current
 1083                  * descriptor, the physical address, and the virtual
 1084                  * address of the next descriptor in the h/w chain.
 1085                  * This allows the HAL to look ahead to see if the
 1086                  * hardware is done with a descriptor by checking the
 1087                  * done bit in the following descriptor and the address
 1088                  * of the current descriptor the DMA engine is working
 1089                  * on.  All this is necessary because of our use of
 1090                  * a self-linked list to avoid rx overruns.
 1091                  */
 1092                 rs = &bf->bf_status.ds_rxstat;
 1093                 status = ath_hal_rxprocdesc(ah, ds,
 1094                                 bf->bf_daddr, PA2DESC(sc, ds->ds_link), rs);
 1095 #ifdef ATH_DEBUG
 1096                 if (sc->sc_debug & ATH_DEBUG_RECV_DESC)
 1097                         ath_printrxbuf(sc, bf, 0, status == HAL_OK);
 1098 #endif
 1099 
 1100 #ifdef  ATH_DEBUG_ALQ
 1101                 if (if_ath_alq_checkdebug(&sc->sc_alq, ATH_ALQ_EDMA_RXSTATUS))
 1102                     if_ath_alq_post(&sc->sc_alq, ATH_ALQ_EDMA_RXSTATUS,
 1103                     sc->sc_rx_statuslen, (char *) ds);
 1104 #endif  /* ATH_DEBUG_ALQ */
 1105 
 1106                 if (status == HAL_EINPROGRESS)
 1107                         break;
 1108 
 1109                 TAILQ_REMOVE(&sc->sc_rxbuf, bf, bf_list);
 1110                 npkts++;
 1111 
 1112                 /*
 1113                  * Process a single frame.
 1114                  */
 1115                 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_POSTREAD);
 1116                 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
 1117                 bf->bf_m = NULL;
 1118                 if (ath_rx_pkt(sc, rs, status, tsf, nf, HAL_RX_QUEUE_HP, bf, m))
 1119                         ngood++;
 1120 rx_proc_next:
 1121                 /*
 1122                  * If there's a holding buffer, insert that onto
 1123                  * the RX list; the hardware is now definitely not pointing
 1124                  * to it now.
 1125                  */
 1126                 ret = 0;
 1127                 if (sc->sc_rxedma[HAL_RX_QUEUE_HP].m_holdbf != NULL) {
 1128                         TAILQ_INSERT_TAIL(&sc->sc_rxbuf,
 1129                             sc->sc_rxedma[HAL_RX_QUEUE_HP].m_holdbf,
 1130                             bf_list);
 1131                         ret = ath_rxbuf_init(sc,
 1132                             sc->sc_rxedma[HAL_RX_QUEUE_HP].m_holdbf);
 1133                 }
 1134                 /*
 1135                  * Next, throw our buffer into the holding entry.  The hardware
 1136                  * may use the descriptor to read the link pointer before
 1137                  * DMAing the next descriptor in to write out a packet.
 1138                  */
 1139                 sc->sc_rxedma[HAL_RX_QUEUE_HP].m_holdbf = bf;
 1140         } while (ret == 0);
 1141 
 1142         /* rx signal state monitoring */
 1143         ath_hal_rxmonitor(ah, &sc->sc_halstats, sc->sc_curchan);
 1144         if (ngood)
 1145                 sc->sc_lastrx = tsf;
 1146 
 1147         ATH_KTR(sc, ATH_KTR_RXPROC, 2, "ath_rx_proc: npkts=%d, ngood=%d", npkts, ngood);
 1148         /* Queue DFS tasklet if needed */
 1149         if (resched && ath_dfs_tasklet_needed(sc, sc->sc_curchan))
 1150                 taskqueue_enqueue(sc->sc_tq, &sc->sc_dfstask);
 1151 
 1152         /*
 1153          * Now that all the RX frames were handled that
 1154          * need to be handled, kick the PCU if there's
 1155          * been an RXEOL condition.
 1156          */
 1157         if (resched && kickpcu) {
 1158                 ATH_PCU_LOCK(sc);
 1159                 ATH_KTR(sc, ATH_KTR_ERROR, 0, "ath_rx_proc: kickpcu");
 1160                 device_printf(sc->sc_dev, "%s: kickpcu; handled %d packets\n",
 1161                     __func__, npkts);
 1162 
 1163                 /*
 1164                  * Go through the process of fully tearing down
 1165                  * the RX buffers and reinitialising them.
 1166                  *
 1167                  * There's a hardware bug that causes the RX FIFO
 1168                  * to get confused under certain conditions and
 1169                  * constantly write over the same frame, leading
 1170                  * the RX driver code here to get heavily confused.
 1171                  */
 1172                 /*
 1173                  * XXX Has RX DMA stopped enough here to just call
 1174                  *     ath_startrecv()?
 1175                  * XXX Do we need to use the holding buffer to restart
 1176                  *     RX DMA by appending entries to the final
 1177                  *     descriptor?  Quite likely.
 1178                  */
 1179 #if 1
 1180                 ath_startrecv(sc);
 1181 #else
 1182                 /*
 1183                  * Disabled for now - it'd be nice to be able to do
 1184                  * this in order to limit the amount of CPU time spent
 1185                  * reinitialising the RX side (and thus minimise RX
 1186                  * drops) however there's a hardware issue that
 1187                  * causes things to get too far out of whack.
 1188                  */
 1189                 /*
 1190                  * XXX can we hold the PCU lock here?
 1191                  * Are there any net80211 buffer calls involved?
 1192                  */
 1193                 bf = TAILQ_FIRST(&sc->sc_rxbuf);
 1194                 ath_hal_putrxbuf(ah, bf->bf_daddr, HAL_RX_QUEUE_HP);
 1195                 ath_hal_rxena(ah);              /* enable recv descriptors */
 1196                 ath_mode_init(sc);              /* set filters, etc. */
 1197                 ath_hal_startpcurecv(ah);       /* re-enable PCU/DMA engine */
 1198 #endif
 1199 
 1200                 ath_hal_intrset(ah, sc->sc_imask);
 1201                 sc->sc_kickpcu = 0;
 1202                 ATH_PCU_UNLOCK(sc);
 1203         }
 1204 
 1205 #ifdef IEEE80211_SUPPORT_SUPERG
 1206         if (resched)
 1207                 ieee80211_ff_age_all(ic, 100);
 1208 #endif
 1209 
 1210         /*
 1211          * Put the hardware to sleep again if we're done with it.
 1212          */
 1213         ATH_LOCK(sc);
 1214         ath_power_restore_power_state(sc);
 1215         ATH_UNLOCK(sc);
 1216 
 1217         /*
 1218          * If we hit the maximum number of frames in this round,
 1219          * reschedule for another immediate pass.  This gives
 1220          * the TX and TX completion routines time to run, which
 1221          * will reduce latency.
 1222          */
 1223         if (npkts >= ATH_RX_MAX)
 1224                 sc->sc_rx.recv_sched(sc, resched);
 1225 
 1226         ATH_PCU_LOCK(sc);
 1227         sc->sc_rxproc_cnt--;
 1228         ATH_PCU_UNLOCK(sc);
 1229 }
 1230 #undef  PA2DESC
 1231 #undef  ATH_RX_MAX
 1232 
 1233 /*
 1234  * Only run the RX proc if it's not already running.
 1235  * Since this may get run as part of the reset/flush path,
 1236  * the task can't clash with an existing, running tasklet.
 1237  */
 1238 static void
 1239 ath_legacy_rx_tasklet(void *arg, int npending)
 1240 {
 1241         struct ath_softc *sc = arg;
 1242 
 1243         ATH_KTR(sc, ATH_KTR_RXPROC, 1, "ath_rx_proc: pending=%d", npending);
 1244         DPRINTF(sc, ATH_DEBUG_RX_PROC, "%s: pending %u\n", __func__, npending);
 1245         ATH_PCU_LOCK(sc);
 1246         if (sc->sc_inreset_cnt > 0) {
 1247                 device_printf(sc->sc_dev,
 1248                     "%s: sc_inreset_cnt > 0; skipping\n", __func__);
 1249                 ATH_PCU_UNLOCK(sc);
 1250                 return;
 1251         }
 1252         ATH_PCU_UNLOCK(sc);
 1253 
 1254         ath_rx_proc(sc, 1);
 1255 }
 1256 
 1257 static void
 1258 ath_legacy_flushrecv(struct ath_softc *sc)
 1259 {
 1260 
 1261         ath_rx_proc(sc, 0);
 1262 }
 1263 
 1264 static void
 1265 ath_legacy_flush_rxpending(struct ath_softc *sc)
 1266 {
 1267 
 1268         /* XXX ATH_RX_LOCK_ASSERT(sc); */
 1269 
 1270         if (sc->sc_rxedma[HAL_RX_QUEUE_LP].m_rxpending != NULL) {
 1271                 m_freem(sc->sc_rxedma[HAL_RX_QUEUE_LP].m_rxpending);
 1272                 sc->sc_rxedma[HAL_RX_QUEUE_LP].m_rxpending = NULL;
 1273         }
 1274         if (sc->sc_rxedma[HAL_RX_QUEUE_HP].m_rxpending != NULL) {
 1275                 m_freem(sc->sc_rxedma[HAL_RX_QUEUE_HP].m_rxpending);
 1276                 sc->sc_rxedma[HAL_RX_QUEUE_HP].m_rxpending = NULL;
 1277         }
 1278 }
 1279 
 1280 static int
 1281 ath_legacy_flush_rxholdbf(struct ath_softc *sc)
 1282 {
 1283         struct ath_buf *bf;
 1284 
 1285         /* XXX ATH_RX_LOCK_ASSERT(sc); */
 1286         /*
 1287          * If there are RX holding buffers, free them here and return
 1288          * them to the list.
 1289          *
 1290          * XXX should just verify that bf->bf_m is NULL, as it must
 1291          * be at this point!
 1292          */
 1293         bf = sc->sc_rxedma[HAL_RX_QUEUE_HP].m_holdbf;
 1294         if (bf != NULL) {
 1295                 if (bf->bf_m != NULL)
 1296                         m_freem(bf->bf_m);
 1297                 bf->bf_m = NULL;
 1298                 TAILQ_INSERT_TAIL(&sc->sc_rxbuf, bf, bf_list);
 1299                 (void) ath_rxbuf_init(sc, bf);
 1300         }
 1301         sc->sc_rxedma[HAL_RX_QUEUE_HP].m_holdbf = NULL;
 1302 
 1303         bf = sc->sc_rxedma[HAL_RX_QUEUE_LP].m_holdbf;
 1304         if (bf != NULL) {
 1305                 if (bf->bf_m != NULL)
 1306                         m_freem(bf->bf_m);
 1307                 bf->bf_m = NULL;
 1308                 TAILQ_INSERT_TAIL(&sc->sc_rxbuf, bf, bf_list);
 1309                 (void) ath_rxbuf_init(sc, bf);
 1310         }
 1311         sc->sc_rxedma[HAL_RX_QUEUE_LP].m_holdbf = NULL;
 1312 
 1313         return (0);
 1314 }
 1315 
 1316 /*
 1317  * Disable the receive h/w in preparation for a reset.
 1318  */
 1319 static void
 1320 ath_legacy_stoprecv(struct ath_softc *sc, int dodelay)
 1321 {
 1322 #define PA2DESC(_sc, _pa) \
 1323         ((struct ath_desc *)((caddr_t)(_sc)->sc_rxdma.dd_desc + \
 1324                 ((_pa) - (_sc)->sc_rxdma.dd_desc_paddr)))
 1325         struct ath_hal *ah = sc->sc_ah;
 1326 
 1327         ATH_RX_LOCK(sc);
 1328 
 1329         ath_hal_stoppcurecv(ah);        /* disable PCU */
 1330         ath_hal_setrxfilter(ah, 0);     /* clear recv filter */
 1331         ath_hal_stopdmarecv(ah);        /* disable DMA engine */
 1332         /*
 1333          * TODO: see if this particular DELAY() is required; it may be
 1334          * masking some missing FIFO flush or DMA sync.
 1335          */
 1336 #if 0
 1337         if (dodelay)
 1338 #endif
 1339                 DELAY(3000);            /* 3ms is long enough for 1 frame */
 1340 #ifdef ATH_DEBUG
 1341         if (sc->sc_debug & (ATH_DEBUG_RESET | ATH_DEBUG_FATAL)) {
 1342                 struct ath_buf *bf;
 1343                 u_int ix;
 1344 
 1345                 device_printf(sc->sc_dev,
 1346                     "%s: rx queue %p, link %p\n",
 1347                     __func__,
 1348                     (caddr_t)(uintptr_t) ath_hal_getrxbuf(ah, HAL_RX_QUEUE_HP),
 1349                     sc->sc_rxlink);
 1350                 ix = 0;
 1351                 TAILQ_FOREACH(bf, &sc->sc_rxbuf, bf_list) {
 1352                         struct ath_desc *ds = bf->bf_desc;
 1353                         struct ath_rx_status *rs = &bf->bf_status.ds_rxstat;
 1354                         HAL_STATUS status = ath_hal_rxprocdesc(ah, ds,
 1355                                 bf->bf_daddr, PA2DESC(sc, ds->ds_link), rs);
 1356                         if (status == HAL_OK || (sc->sc_debug & ATH_DEBUG_FATAL))
 1357                                 ath_printrxbuf(sc, bf, ix, status == HAL_OK);
 1358                         ix++;
 1359                 }
 1360         }
 1361 #endif
 1362 
 1363         (void) ath_legacy_flush_rxpending(sc);
 1364         (void) ath_legacy_flush_rxholdbf(sc);
 1365 
 1366         sc->sc_rxlink = NULL;           /* just in case */
 1367 
 1368         ATH_RX_UNLOCK(sc);
 1369 #undef PA2DESC
 1370 }
 1371 
 1372 /*
 1373  * XXX TODO: something was calling startrecv without calling
 1374  * stoprecv.  Let's figure out what/why.  It was showing up
 1375  * as a mbuf leak (rxpending) and ath_buf leak (holdbf.)
 1376  */
 1377 
 1378 /*
 1379  * Enable the receive h/w following a reset.
 1380  */
 1381 static int
 1382 ath_legacy_startrecv(struct ath_softc *sc)
 1383 {
 1384         struct ath_hal *ah = sc->sc_ah;
 1385         struct ath_buf *bf;
 1386 
 1387         ATH_RX_LOCK(sc);
 1388 
 1389         /*
 1390          * XXX should verify these are already all NULL!
 1391          */
 1392         sc->sc_rxlink = NULL;
 1393         (void) ath_legacy_flush_rxpending(sc);
 1394         (void) ath_legacy_flush_rxholdbf(sc);
 1395 
 1396         /*
 1397          * Re-chain all of the buffers in the RX buffer list.
 1398          */
 1399         TAILQ_FOREACH(bf, &sc->sc_rxbuf, bf_list) {
 1400                 int error = ath_rxbuf_init(sc, bf);
 1401                 if (error != 0) {
 1402                         DPRINTF(sc, ATH_DEBUG_RECV,
 1403                                 "%s: ath_rxbuf_init failed %d\n",
 1404                                 __func__, error);
 1405                         return error;
 1406                 }
 1407         }
 1408 
 1409         bf = TAILQ_FIRST(&sc->sc_rxbuf);
 1410         ath_hal_putrxbuf(ah, bf->bf_daddr, HAL_RX_QUEUE_HP);
 1411         ath_hal_rxena(ah);              /* enable recv descriptors */
 1412         ath_mode_init(sc);              /* set filters, etc. */
 1413         ath_hal_startpcurecv(ah);       /* re-enable PCU/DMA engine */
 1414 
 1415         ATH_RX_UNLOCK(sc);
 1416         return 0;
 1417 }
 1418 
 1419 static int
 1420 ath_legacy_dma_rxsetup(struct ath_softc *sc)
 1421 {
 1422         int error;
 1423 
 1424         error = ath_descdma_setup(sc, &sc->sc_rxdma, &sc->sc_rxbuf,
 1425             "rx", sizeof(struct ath_desc), ath_rxbuf, 1);
 1426         if (error != 0)
 1427                 return (error);
 1428 
 1429         return (0);
 1430 }
 1431 
 1432 static int
 1433 ath_legacy_dma_rxteardown(struct ath_softc *sc)
 1434 {
 1435 
 1436         if (sc->sc_rxdma.dd_desc_len != 0)
 1437                 ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf);
 1438         return (0);
 1439 }
 1440 
 1441 static void
 1442 ath_legacy_recv_sched(struct ath_softc *sc, int dosched)
 1443 {
 1444 
 1445         taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask);
 1446 }
 1447 
 1448 static void
 1449 ath_legacy_recv_sched_queue(struct ath_softc *sc, HAL_RX_QUEUE q,
 1450     int dosched)
 1451 {
 1452 
 1453         taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask);
 1454 }
 1455 
 1456 void
 1457 ath_recv_setup_legacy(struct ath_softc *sc)
 1458 {
 1459 
 1460         /* Sensible legacy defaults */
 1461         /*
 1462          * XXX this should be changed to properly support the
 1463          * exact RX descriptor size for each HAL.
 1464          */
 1465         sc->sc_rx_statuslen = sizeof(struct ath_desc);
 1466 
 1467         sc->sc_rx.recv_start = ath_legacy_startrecv;
 1468         sc->sc_rx.recv_stop = ath_legacy_stoprecv;
 1469         sc->sc_rx.recv_flush = ath_legacy_flushrecv;
 1470         sc->sc_rx.recv_tasklet = ath_legacy_rx_tasklet;
 1471         sc->sc_rx.recv_rxbuf_init = ath_legacy_rxbuf_init;
 1472 
 1473         sc->sc_rx.recv_setup = ath_legacy_dma_rxsetup;
 1474         sc->sc_rx.recv_teardown = ath_legacy_dma_rxteardown;
 1475         sc->sc_rx.recv_sched = ath_legacy_recv_sched;
 1476         sc->sc_rx.recv_sched_queue = ath_legacy_recv_sched_queue;
 1477 }

Cache object: f5ef37fa34fe038958b74dbf6ee43ab4


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.